* config/rs6000/rs6000.c (spe_init_builtins,
[official-gcc.git] / gcc / simplify-rtx.c
blob0565dcd32b888dcd0c0f92a85a92a8d66c83c0a1
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "function.h"
35 #include "expr.h"
36 #include "toplev.h"
37 #include "output.h"
38 #include "ggc.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
43 virtual regs here because the simplify_*_operation routines are called
44 by integrate.c, which is called before virtual register instantiation.
46 ?!? NONZERO_BASE_PLUS_P needs to move into
47 a header file so that their definitions can be shared with the
48 simplification routines in simplify-rtx.c. Until then, do not
49 change this macro without also changing the copy in simplify-rtx.c. */
51 /* Allows reference to the stack pointer.
53 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
54 arg_pointer_rtx by itself is nonzero, because on at least one machine,
55 the i960, the arg pointer is zero when it is unused. */
57 #define NONZERO_BASE_PLUS_P(X) \
58 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
59 || (X) == virtual_stack_vars_rtx \
60 || (X) == virtual_incoming_args_rtx \
61 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
62 && (XEXP (X, 0) == frame_pointer_rtx \
63 || XEXP (X, 0) == hard_frame_pointer_rtx \
64 || ((X) == arg_pointer_rtx \
65 && fixed_regs[ARG_POINTER_REGNUM]) \
66 || XEXP (X, 0) == virtual_stack_vars_rtx \
67 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
68 || (X) == stack_pointer_rtx \
69 || (X) == virtual_stack_dynamic_rtx \
70 || (X) == virtual_outgoing_args_rtx \
71 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
72 && (XEXP (X, 0) == stack_pointer_rtx \
73 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
74 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
75 || GET_CODE (X) == ADDRESSOF)
77 /* Much code operates on (low, high) pairs; the low value is an
78 unsigned wide int, the high value a signed wide int. We
79 occasionally need to sign extend from low to high as if low were a
80 signed wide int. */
81 #define HWI_SIGN_EXTEND(low) \
82 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
84 static rtx neg_const_int PARAMS ((enum machine_mode, rtx));
85 static int simplify_plus_minus_op_data_cmp PARAMS ((const void *,
86 const void *));
87 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
88 enum machine_mode, rtx,
89 rtx, int));
91 /* Negate a CONST_INT rtx, truncating (because a conversion from a
92 maximally negative number can overflow). */
93 static rtx
94 neg_const_int (mode, i)
95 enum machine_mode mode;
96 rtx i;
98 return gen_int_mode (- INTVAL (i), mode);
102 /* Make a binary operation by properly ordering the operands and
103 seeing if the expression folds. */
106 simplify_gen_binary (code, mode, op0, op1)
107 enum rtx_code code;
108 enum machine_mode mode;
109 rtx op0, op1;
111 rtx tem;
113 /* Put complex operands first and constants second if commutative. */
114 if (GET_RTX_CLASS (code) == 'c'
115 && swap_commutative_operands_p (op0, op1))
116 tem = op0, op0 = op1, op1 = tem;
118 /* If this simplifies, do it. */
119 tem = simplify_binary_operation (code, mode, op0, op1);
120 if (tem)
121 return tem;
123 /* Handle addition and subtraction specially. Otherwise, just form
124 the operation. */
126 if (code == PLUS || code == MINUS)
128 tem = simplify_plus_minus (code, mode, op0, op1, 1);
129 if (tem)
130 return tem;
133 return gen_rtx_fmt_ee (code, mode, op0, op1);
136 /* If X is a MEM referencing the constant pool, return the real value.
137 Otherwise return X. */
139 avoid_constant_pool_reference (x)
140 rtx x;
142 rtx c, addr;
143 enum machine_mode cmode;
145 if (GET_CODE (x) != MEM)
146 return x;
147 addr = XEXP (x, 0);
149 if (GET_CODE (addr) != SYMBOL_REF
150 || ! CONSTANT_POOL_ADDRESS_P (addr))
151 return x;
153 c = get_pool_constant (addr);
154 cmode = get_pool_mode (addr);
156 /* If we're accessing the constant in a different mode than it was
157 originally stored, attempt to fix that up via subreg simplifications.
158 If that fails we have no choice but to return the original memory. */
159 if (cmode != GET_MODE (x))
161 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
162 return c ? c : x;
165 return c;
168 /* Make a unary operation by first seeing if it folds and otherwise making
169 the specified operation. */
172 simplify_gen_unary (code, mode, op, op_mode)
173 enum rtx_code code;
174 enum machine_mode mode;
175 rtx op;
176 enum machine_mode op_mode;
178 rtx tem;
180 /* If this simplifies, use it. */
181 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
182 return tem;
184 return gen_rtx_fmt_e (code, mode, op);
187 /* Likewise for ternary operations. */
190 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
191 enum rtx_code code;
192 enum machine_mode mode, op0_mode;
193 rtx op0, op1, op2;
195 rtx tem;
197 /* If this simplifies, use it. */
198 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
199 op0, op1, op2)))
200 return tem;
202 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
205 /* Likewise, for relational operations.
206 CMP_MODE specifies mode comparison is done in.
210 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
211 enum rtx_code code;
212 enum machine_mode mode;
213 enum machine_mode cmp_mode;
214 rtx op0, op1;
216 rtx tem;
218 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
219 return tem;
221 /* For the following tests, ensure const0_rtx is op1. */
222 if (op0 == const0_rtx && swap_commutative_operands_p (op0, op1))
223 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
225 /* If op0 is a compare, extract the comparison arguments from it. */
226 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
227 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
229 /* If op0 is a comparison, extract the comparison arguments form it. */
230 if (code == NE && op1 == const0_rtx
231 && GET_RTX_CLASS (GET_CODE (op0)) == '<')
232 return op0;
233 else if (code == EQ && op1 == const0_rtx)
235 /* The following tests GET_RTX_CLASS (GET_CODE (op0)) == '<'. */
236 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
237 if (new != UNKNOWN)
239 code = new;
240 mode = cmp_mode;
241 op1 = XEXP (op0, 1);
242 op0 = XEXP (op0, 0);
246 /* Put complex operands first and constants second. */
247 if (swap_commutative_operands_p (op0, op1))
248 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
250 return gen_rtx_fmt_ee (code, mode, op0, op1);
253 /* Replace all occurrences of OLD in X with NEW and try to simplify the
254 resulting RTX. Return a new RTX which is as simplified as possible. */
257 simplify_replace_rtx (x, old, new)
258 rtx x;
259 rtx old;
260 rtx new;
262 enum rtx_code code = GET_CODE (x);
263 enum machine_mode mode = GET_MODE (x);
265 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
266 to build a new expression substituting recursively. If we can't do
267 anything, return our input. */
269 if (x == old)
270 return new;
272 switch (GET_RTX_CLASS (code))
274 case '1':
276 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
277 rtx op = (XEXP (x, 0) == old
278 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
280 return simplify_gen_unary (code, mode, op, op_mode);
283 case '2':
284 case 'c':
285 return
286 simplify_gen_binary (code, mode,
287 simplify_replace_rtx (XEXP (x, 0), old, new),
288 simplify_replace_rtx (XEXP (x, 1), old, new));
289 case '<':
291 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
292 ? GET_MODE (XEXP (x, 0))
293 : GET_MODE (XEXP (x, 1)));
294 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
295 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
297 return
298 simplify_gen_relational (code, mode,
299 (op_mode != VOIDmode
300 ? op_mode
301 : GET_MODE (op0) != VOIDmode
302 ? GET_MODE (op0)
303 : GET_MODE (op1)),
304 op0, op1);
307 case '3':
308 case 'b':
310 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
311 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
313 return
314 simplify_gen_ternary (code, mode,
315 (op_mode != VOIDmode
316 ? op_mode
317 : GET_MODE (op0)),
318 op0,
319 simplify_replace_rtx (XEXP (x, 1), old, new),
320 simplify_replace_rtx (XEXP (x, 2), old, new));
323 case 'x':
324 /* The only case we try to handle is a SUBREG. */
325 if (code == SUBREG)
327 rtx exp;
328 exp = simplify_gen_subreg (GET_MODE (x),
329 simplify_replace_rtx (SUBREG_REG (x),
330 old, new),
331 GET_MODE (SUBREG_REG (x)),
332 SUBREG_BYTE (x));
333 if (exp)
334 x = exp;
336 return x;
338 case 'o':
339 if (code == MEM)
340 return replace_equiv_address_nv (x,
341 simplify_replace_rtx (XEXP (x, 0),
342 old, new));
344 if (REG_P (x) && REG_P (old) && REGNO (x) == REGNO (old))
345 return new;
347 return x;
349 default:
350 return x;
352 return x;
355 /* Try to simplify a unary operation CODE whose output mode is to be
356 MODE with input operand OP whose mode was originally OP_MODE.
357 Return zero if no simplification can be made. */
359 simplify_unary_operation (code, mode, op, op_mode)
360 enum rtx_code code;
361 enum machine_mode mode;
362 rtx op;
363 enum machine_mode op_mode;
365 unsigned int width = GET_MODE_BITSIZE (mode);
366 rtx trueop = avoid_constant_pool_reference (op);
368 /* The order of these tests is critical so that, for example, we don't
369 check the wrong mode (input vs. output) for a conversion operation,
370 such as FIX. At some point, this should be simplified. */
372 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
373 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
375 HOST_WIDE_INT hv, lv;
376 REAL_VALUE_TYPE d;
378 if (GET_CODE (trueop) == CONST_INT)
379 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
380 else
381 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
383 REAL_VALUE_FROM_INT (d, lv, hv, mode);
384 d = real_value_truncate (mode, d);
385 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
387 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
388 && (GET_CODE (trueop) == CONST_DOUBLE
389 || GET_CODE (trueop) == CONST_INT))
391 HOST_WIDE_INT hv, lv;
392 REAL_VALUE_TYPE d;
394 if (GET_CODE (trueop) == CONST_INT)
395 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
396 else
397 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
399 if (op_mode == VOIDmode)
401 /* We don't know how to interpret negative-looking numbers in
402 this case, so don't try to fold those. */
403 if (hv < 0)
404 return 0;
406 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
408 else
409 hv = 0, lv &= GET_MODE_MASK (op_mode);
411 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
412 d = real_value_truncate (mode, d);
413 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
416 if (GET_CODE (trueop) == CONST_INT
417 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
419 HOST_WIDE_INT arg0 = INTVAL (trueop);
420 HOST_WIDE_INT val;
422 switch (code)
424 case NOT:
425 val = ~ arg0;
426 break;
428 case NEG:
429 val = - arg0;
430 break;
432 case ABS:
433 val = (arg0 >= 0 ? arg0 : - arg0);
434 break;
436 case FFS:
437 /* Don't use ffs here. Instead, get low order bit and then its
438 number. If arg0 is zero, this will return 0, as desired. */
439 arg0 &= GET_MODE_MASK (mode);
440 val = exact_log2 (arg0 & (- arg0)) + 1;
441 break;
443 case TRUNCATE:
444 val = arg0;
445 break;
447 case ZERO_EXTEND:
448 /* When zero-extending a CONST_INT, we need to know its
449 original mode. */
450 if (op_mode == VOIDmode)
451 abort ();
452 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
454 /* If we were really extending the mode,
455 we would have to distinguish between zero-extension
456 and sign-extension. */
457 if (width != GET_MODE_BITSIZE (op_mode))
458 abort ();
459 val = arg0;
461 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
462 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
463 else
464 return 0;
465 break;
467 case SIGN_EXTEND:
468 if (op_mode == VOIDmode)
469 op_mode = mode;
470 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
472 /* If we were really extending the mode,
473 we would have to distinguish between zero-extension
474 and sign-extension. */
475 if (width != GET_MODE_BITSIZE (op_mode))
476 abort ();
477 val = arg0;
479 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
482 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
483 if (val
484 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
485 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
487 else
488 return 0;
489 break;
491 case SQRT:
492 case FLOAT_EXTEND:
493 case FLOAT_TRUNCATE:
494 case SS_TRUNCATE:
495 case US_TRUNCATE:
496 return 0;
498 default:
499 abort ();
502 val = trunc_int_for_mode (val, mode);
504 return GEN_INT (val);
507 /* We can do some operations on integer CONST_DOUBLEs. Also allow
508 for a DImode operation on a CONST_INT. */
509 else if (GET_MODE (trueop) == VOIDmode
510 && width <= HOST_BITS_PER_WIDE_INT * 2
511 && (GET_CODE (trueop) == CONST_DOUBLE
512 || GET_CODE (trueop) == CONST_INT))
514 unsigned HOST_WIDE_INT l1, lv;
515 HOST_WIDE_INT h1, hv;
517 if (GET_CODE (trueop) == CONST_DOUBLE)
518 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
519 else
520 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
522 switch (code)
524 case NOT:
525 lv = ~ l1;
526 hv = ~ h1;
527 break;
529 case NEG:
530 neg_double (l1, h1, &lv, &hv);
531 break;
533 case ABS:
534 if (h1 < 0)
535 neg_double (l1, h1, &lv, &hv);
536 else
537 lv = l1, hv = h1;
538 break;
540 case FFS:
541 hv = 0;
542 if (l1 == 0)
543 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
544 else
545 lv = exact_log2 (l1 & (-l1)) + 1;
546 break;
548 case TRUNCATE:
549 /* This is just a change-of-mode, so do nothing. */
550 lv = l1, hv = h1;
551 break;
553 case ZERO_EXTEND:
554 if (op_mode == VOIDmode)
555 abort ();
557 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
558 return 0;
560 hv = 0;
561 lv = l1 & GET_MODE_MASK (op_mode);
562 break;
564 case SIGN_EXTEND:
565 if (op_mode == VOIDmode
566 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
567 return 0;
568 else
570 lv = l1 & GET_MODE_MASK (op_mode);
571 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
572 && (lv & ((HOST_WIDE_INT) 1
573 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
574 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
576 hv = HWI_SIGN_EXTEND (lv);
578 break;
580 case SQRT:
581 return 0;
583 default:
584 return 0;
587 return immed_double_const (lv, hv, mode);
590 else if (GET_CODE (trueop) == CONST_DOUBLE
591 && GET_MODE_CLASS (mode) == MODE_FLOAT)
593 REAL_VALUE_TYPE d;
594 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
596 switch (code)
598 case SQRT:
599 /* We don't attempt to optimize this. */
600 return 0;
602 case ABS: d = REAL_VALUE_ABS (d); break;
603 case NEG: d = REAL_VALUE_NEGATE (d); break;
604 case FLOAT_TRUNCATE: d = real_value_truncate (mode, d); break;
605 case FLOAT_EXTEND: /* All this does is change the mode. */ break;
606 case FIX: d = REAL_VALUE_RNDZINT (d); break;
607 case UNSIGNED_FIX: d = REAL_VALUE_UNSIGNED_RNDZINT (d); break;
608 default:
609 abort ();
611 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
614 else if (GET_CODE (trueop) == CONST_DOUBLE
615 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
616 && GET_MODE_CLASS (mode) == MODE_INT
617 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
619 HOST_WIDE_INT i;
620 REAL_VALUE_TYPE d;
621 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
622 switch (code)
624 case FIX: i = REAL_VALUE_FIX (d); break;
625 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
626 default:
627 abort ();
629 return gen_int_mode (i, mode);
632 /* This was formerly used only for non-IEEE float.
633 eggert@twinsun.com says it is safe for IEEE also. */
634 else
636 enum rtx_code reversed;
637 /* There are some simplifications we can do even if the operands
638 aren't constant. */
639 switch (code)
641 case NOT:
642 /* (not (not X)) == X. */
643 if (GET_CODE (op) == NOT)
644 return XEXP (op, 0);
646 /* (not (eq X Y)) == (ne X Y), etc. */
647 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
648 && ((reversed = reversed_comparison_code (op, NULL_RTX))
649 != UNKNOWN))
650 return gen_rtx_fmt_ee (reversed,
651 op_mode, XEXP (op, 0), XEXP (op, 1));
652 break;
654 case NEG:
655 /* (neg (neg X)) == X. */
656 if (GET_CODE (op) == NEG)
657 return XEXP (op, 0);
658 break;
660 case SIGN_EXTEND:
661 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
662 becomes just the MINUS if its mode is MODE. This allows
663 folding switch statements on machines using casesi (such as
664 the VAX). */
665 if (GET_CODE (op) == TRUNCATE
666 && GET_MODE (XEXP (op, 0)) == mode
667 && GET_CODE (XEXP (op, 0)) == MINUS
668 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
669 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
670 return XEXP (op, 0);
672 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
673 if (! POINTERS_EXTEND_UNSIGNED
674 && mode == Pmode && GET_MODE (op) == ptr_mode
675 && (CONSTANT_P (op)
676 || (GET_CODE (op) == SUBREG
677 && GET_CODE (SUBREG_REG (op)) == REG
678 && REG_POINTER (SUBREG_REG (op))
679 && GET_MODE (SUBREG_REG (op)) == Pmode)))
680 return convert_memory_address (Pmode, op);
681 #endif
682 break;
684 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
685 case ZERO_EXTEND:
686 if (POINTERS_EXTEND_UNSIGNED > 0
687 && mode == Pmode && GET_MODE (op) == ptr_mode
688 && (CONSTANT_P (op)
689 || (GET_CODE (op) == SUBREG
690 && GET_CODE (SUBREG_REG (op)) == REG
691 && REG_POINTER (SUBREG_REG (op))
692 && GET_MODE (SUBREG_REG (op)) == Pmode)))
693 return convert_memory_address (Pmode, op);
694 break;
695 #endif
697 default:
698 break;
701 return 0;
705 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
706 and OP1. Return 0 if no simplification is possible.
708 Don't use this for relational operations such as EQ or LT.
709 Use simplify_relational_operation instead. */
711 simplify_binary_operation (code, mode, op0, op1)
712 enum rtx_code code;
713 enum machine_mode mode;
714 rtx op0, op1;
716 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
717 HOST_WIDE_INT val;
718 unsigned int width = GET_MODE_BITSIZE (mode);
719 rtx tem;
720 rtx trueop0 = avoid_constant_pool_reference (op0);
721 rtx trueop1 = avoid_constant_pool_reference (op1);
723 /* Relational operations don't work here. We must know the mode
724 of the operands in order to do the comparison correctly.
725 Assuming a full word can give incorrect results.
726 Consider comparing 128 with -128 in QImode. */
728 if (GET_RTX_CLASS (code) == '<')
729 abort ();
731 /* Make sure the constant is second. */
732 if (GET_RTX_CLASS (code) == 'c'
733 && swap_commutative_operands_p (trueop0, trueop1))
735 tem = op0, op0 = op1, op1 = tem;
736 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
739 if (GET_MODE_CLASS (mode) == MODE_FLOAT
740 && GET_CODE (trueop0) == CONST_DOUBLE
741 && GET_CODE (trueop1) == CONST_DOUBLE
742 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
744 REAL_VALUE_TYPE f0, f1, value;
746 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
747 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
748 f0 = real_value_truncate (mode, f0);
749 f1 = real_value_truncate (mode, f1);
751 if (code == DIV
752 && !MODE_HAS_INFINITIES (mode)
753 && REAL_VALUES_EQUAL (f1, dconst0))
754 return 0;
756 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
758 value = real_value_truncate (mode, value);
759 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
762 /* We can fold some multi-word operations. */
763 if (GET_MODE_CLASS (mode) == MODE_INT
764 && width == HOST_BITS_PER_WIDE_INT * 2
765 && (GET_CODE (trueop0) == CONST_DOUBLE
766 || GET_CODE (trueop0) == CONST_INT)
767 && (GET_CODE (trueop1) == CONST_DOUBLE
768 || GET_CODE (trueop1) == CONST_INT))
770 unsigned HOST_WIDE_INT l1, l2, lv;
771 HOST_WIDE_INT h1, h2, hv;
773 if (GET_CODE (trueop0) == CONST_DOUBLE)
774 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
775 else
776 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
778 if (GET_CODE (trueop1) == CONST_DOUBLE)
779 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
780 else
781 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
783 switch (code)
785 case MINUS:
786 /* A - B == A + (-B). */
787 neg_double (l2, h2, &lv, &hv);
788 l2 = lv, h2 = hv;
790 /* .. fall through ... */
792 case PLUS:
793 add_double (l1, h1, l2, h2, &lv, &hv);
794 break;
796 case MULT:
797 mul_double (l1, h1, l2, h2, &lv, &hv);
798 break;
800 case DIV: case MOD: case UDIV: case UMOD:
801 /* We'd need to include tree.h to do this and it doesn't seem worth
802 it. */
803 return 0;
805 case AND:
806 lv = l1 & l2, hv = h1 & h2;
807 break;
809 case IOR:
810 lv = l1 | l2, hv = h1 | h2;
811 break;
813 case XOR:
814 lv = l1 ^ l2, hv = h1 ^ h2;
815 break;
817 case SMIN:
818 if (h1 < h2
819 || (h1 == h2
820 && ((unsigned HOST_WIDE_INT) l1
821 < (unsigned HOST_WIDE_INT) l2)))
822 lv = l1, hv = h1;
823 else
824 lv = l2, hv = h2;
825 break;
827 case SMAX:
828 if (h1 > h2
829 || (h1 == h2
830 && ((unsigned HOST_WIDE_INT) l1
831 > (unsigned HOST_WIDE_INT) l2)))
832 lv = l1, hv = h1;
833 else
834 lv = l2, hv = h2;
835 break;
837 case UMIN:
838 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
839 || (h1 == h2
840 && ((unsigned HOST_WIDE_INT) l1
841 < (unsigned HOST_WIDE_INT) l2)))
842 lv = l1, hv = h1;
843 else
844 lv = l2, hv = h2;
845 break;
847 case UMAX:
848 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
849 || (h1 == h2
850 && ((unsigned HOST_WIDE_INT) l1
851 > (unsigned HOST_WIDE_INT) l2)))
852 lv = l1, hv = h1;
853 else
854 lv = l2, hv = h2;
855 break;
857 case LSHIFTRT: case ASHIFTRT:
858 case ASHIFT:
859 case ROTATE: case ROTATERT:
860 #ifdef SHIFT_COUNT_TRUNCATED
861 if (SHIFT_COUNT_TRUNCATED)
862 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
863 #endif
865 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
866 return 0;
868 if (code == LSHIFTRT || code == ASHIFTRT)
869 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
870 code == ASHIFTRT);
871 else if (code == ASHIFT)
872 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
873 else if (code == ROTATE)
874 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
875 else /* code == ROTATERT */
876 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
877 break;
879 default:
880 return 0;
883 return immed_double_const (lv, hv, mode);
886 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
887 || width > HOST_BITS_PER_WIDE_INT || width == 0)
889 /* Even if we can't compute a constant result,
890 there are some cases worth simplifying. */
892 switch (code)
894 case PLUS:
895 /* Maybe simplify x + 0 to x. The two expressions are equivalent
896 when x is NaN, infinite, or finite and non-zero. They aren't
897 when x is -0 and the rounding mode is not towards -infinity,
898 since (-0) + 0 is then 0. */
899 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
900 return op0;
902 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
903 transformations are safe even for IEEE. */
904 if (GET_CODE (op0) == NEG)
905 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
906 else if (GET_CODE (op1) == NEG)
907 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
909 /* (~a) + 1 -> -a */
910 if (INTEGRAL_MODE_P (mode)
911 && GET_CODE (op0) == NOT
912 && trueop1 == const1_rtx)
913 return gen_rtx_NEG (mode, XEXP (op0, 0));
915 /* Handle both-operands-constant cases. We can only add
916 CONST_INTs to constants since the sum of relocatable symbols
917 can't be handled by most assemblers. Don't add CONST_INT
918 to CONST_INT since overflow won't be computed properly if wider
919 than HOST_BITS_PER_WIDE_INT. */
921 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
922 && GET_CODE (op1) == CONST_INT)
923 return plus_constant (op0, INTVAL (op1));
924 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
925 && GET_CODE (op0) == CONST_INT)
926 return plus_constant (op1, INTVAL (op0));
928 /* See if this is something like X * C - X or vice versa or
929 if the multiplication is written as a shift. If so, we can
930 distribute and make a new multiply, shift, or maybe just
931 have X (if C is 2 in the example above). But don't make
932 real multiply if we didn't have one before. */
934 if (! FLOAT_MODE_P (mode))
936 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
937 rtx lhs = op0, rhs = op1;
938 int had_mult = 0;
940 if (GET_CODE (lhs) == NEG)
941 coeff0 = -1, lhs = XEXP (lhs, 0);
942 else if (GET_CODE (lhs) == MULT
943 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
945 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
946 had_mult = 1;
948 else if (GET_CODE (lhs) == ASHIFT
949 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
950 && INTVAL (XEXP (lhs, 1)) >= 0
951 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
953 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
954 lhs = XEXP (lhs, 0);
957 if (GET_CODE (rhs) == NEG)
958 coeff1 = -1, rhs = XEXP (rhs, 0);
959 else if (GET_CODE (rhs) == MULT
960 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
962 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
963 had_mult = 1;
965 else if (GET_CODE (rhs) == ASHIFT
966 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
967 && INTVAL (XEXP (rhs, 1)) >= 0
968 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
970 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
971 rhs = XEXP (rhs, 0);
974 if (rtx_equal_p (lhs, rhs))
976 tem = simplify_gen_binary (MULT, mode, lhs,
977 GEN_INT (coeff0 + coeff1));
978 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
982 /* If one of the operands is a PLUS or a MINUS, see if we can
983 simplify this by the associative law.
984 Don't use the associative law for floating point.
985 The inaccuracy makes it nonassociative,
986 and subtle programs can break if operations are associated. */
988 if (INTEGRAL_MODE_P (mode)
989 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
990 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
991 || (GET_CODE (op0) == CONST
992 && GET_CODE (XEXP (op0, 0)) == PLUS)
993 || (GET_CODE (op1) == CONST
994 && GET_CODE (XEXP (op1, 0)) == PLUS))
995 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
996 return tem;
997 break;
999 case COMPARE:
1000 #ifdef HAVE_cc0
1001 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1002 using cc0, in which case we want to leave it as a COMPARE
1003 so we can distinguish it from a register-register-copy.
1005 In IEEE floating point, x-0 is not the same as x. */
1007 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1008 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1009 && trueop1 == CONST0_RTX (mode))
1010 return op0;
1011 #endif
1013 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1014 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1015 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1016 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1018 rtx xop00 = XEXP (op0, 0);
1019 rtx xop10 = XEXP (op1, 0);
1021 #ifdef HAVE_cc0
1022 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1023 #else
1024 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1025 && GET_MODE (xop00) == GET_MODE (xop10)
1026 && REGNO (xop00) == REGNO (xop10)
1027 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1028 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1029 #endif
1030 return xop00;
1032 break;
1034 case MINUS:
1035 /* We can't assume x-x is 0 even with non-IEEE floating point,
1036 but since it is zero except in very strange circumstances, we
1037 will treat it as zero with -funsafe-math-optimizations. */
1038 if (rtx_equal_p (trueop0, trueop1)
1039 && ! side_effects_p (op0)
1040 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1041 return CONST0_RTX (mode);
1043 /* Change subtraction from zero into negation. (0 - x) is the
1044 same as -x when x is NaN, infinite, or finite and non-zero.
1045 But if the mode has signed zeros, and does not round towards
1046 -infinity, then 0 - 0 is 0, not -0. */
1047 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1048 return gen_rtx_NEG (mode, op1);
1050 /* (-1 - a) is ~a. */
1051 if (trueop0 == constm1_rtx)
1052 return gen_rtx_NOT (mode, op1);
1054 /* Subtracting 0 has no effect unless the mode has signed zeros
1055 and supports rounding towards -infinity. In such a case,
1056 0 - 0 is -0. */
1057 if (!(HONOR_SIGNED_ZEROS (mode)
1058 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1059 && trueop1 == CONST0_RTX (mode))
1060 return op0;
1062 /* See if this is something like X * C - X or vice versa or
1063 if the multiplication is written as a shift. If so, we can
1064 distribute and make a new multiply, shift, or maybe just
1065 have X (if C is 2 in the example above). But don't make
1066 real multiply if we didn't have one before. */
1068 if (! FLOAT_MODE_P (mode))
1070 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1071 rtx lhs = op0, rhs = op1;
1072 int had_mult = 0;
1074 if (GET_CODE (lhs) == NEG)
1075 coeff0 = -1, lhs = XEXP (lhs, 0);
1076 else if (GET_CODE (lhs) == MULT
1077 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1079 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1080 had_mult = 1;
1082 else if (GET_CODE (lhs) == ASHIFT
1083 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1084 && INTVAL (XEXP (lhs, 1)) >= 0
1085 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1087 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1088 lhs = XEXP (lhs, 0);
1091 if (GET_CODE (rhs) == NEG)
1092 coeff1 = - 1, rhs = XEXP (rhs, 0);
1093 else if (GET_CODE (rhs) == MULT
1094 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1096 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1097 had_mult = 1;
1099 else if (GET_CODE (rhs) == ASHIFT
1100 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1101 && INTVAL (XEXP (rhs, 1)) >= 0
1102 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1104 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1105 rhs = XEXP (rhs, 0);
1108 if (rtx_equal_p (lhs, rhs))
1110 tem = simplify_gen_binary (MULT, mode, lhs,
1111 GEN_INT (coeff0 - coeff1));
1112 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1116 /* (a - (-b)) -> (a + b). True even for IEEE. */
1117 if (GET_CODE (op1) == NEG)
1118 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1120 /* If one of the operands is a PLUS or a MINUS, see if we can
1121 simplify this by the associative law.
1122 Don't use the associative law for floating point.
1123 The inaccuracy makes it nonassociative,
1124 and subtle programs can break if operations are associated. */
1126 if (INTEGRAL_MODE_P (mode)
1127 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1128 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1129 || (GET_CODE (op0) == CONST
1130 && GET_CODE (XEXP (op0, 0)) == PLUS)
1131 || (GET_CODE (op1) == CONST
1132 && GET_CODE (XEXP (op1, 0)) == PLUS))
1133 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1134 return tem;
1136 /* Don't let a relocatable value get a negative coeff. */
1137 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1138 return simplify_gen_binary (PLUS, mode,
1139 op0,
1140 neg_const_int (mode, op1));
1142 /* (x - (x & y)) -> (x & ~y) */
1143 if (GET_CODE (op1) == AND)
1145 if (rtx_equal_p (op0, XEXP (op1, 0)))
1146 return simplify_gen_binary (AND, mode, op0,
1147 gen_rtx_NOT (mode, XEXP (op1, 1)));
1148 if (rtx_equal_p (op0, XEXP (op1, 1)))
1149 return simplify_gen_binary (AND, mode, op0,
1150 gen_rtx_NOT (mode, XEXP (op1, 0)));
1152 break;
1154 case MULT:
1155 if (trueop1 == constm1_rtx)
1157 tem = simplify_unary_operation (NEG, mode, op0, mode);
1159 return tem ? tem : gen_rtx_NEG (mode, op0);
1162 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1163 x is NaN, since x * 0 is then also NaN. Nor is it valid
1164 when the mode has signed zeros, since multiplying a negative
1165 number by 0 will give -0, not 0. */
1166 if (!HONOR_NANS (mode)
1167 && !HONOR_SIGNED_ZEROS (mode)
1168 && trueop1 == CONST0_RTX (mode)
1169 && ! side_effects_p (op0))
1170 return op1;
1172 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1173 However, ANSI says we can drop signals,
1174 so we can do this anyway. */
1175 if (trueop1 == CONST1_RTX (mode))
1176 return op0;
1178 /* Convert multiply by constant power of two into shift unless
1179 we are still generating RTL. This test is a kludge. */
1180 if (GET_CODE (trueop1) == CONST_INT
1181 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1182 /* If the mode is larger than the host word size, and the
1183 uppermost bit is set, then this isn't a power of two due
1184 to implicit sign extension. */
1185 && (width <= HOST_BITS_PER_WIDE_INT
1186 || val != HOST_BITS_PER_WIDE_INT - 1)
1187 && ! rtx_equal_function_value_matters)
1188 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1190 /* x*2 is x+x and x*(-1) is -x */
1191 if (GET_CODE (trueop1) == CONST_DOUBLE
1192 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1193 && GET_MODE (op0) == mode)
1195 REAL_VALUE_TYPE d;
1196 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1198 if (REAL_VALUES_EQUAL (d, dconst2))
1199 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1201 if (REAL_VALUES_EQUAL (d, dconstm1))
1202 return gen_rtx_NEG (mode, op0);
1204 break;
1206 case IOR:
1207 if (trueop1 == const0_rtx)
1208 return op0;
1209 if (GET_CODE (trueop1) == CONST_INT
1210 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1211 == GET_MODE_MASK (mode)))
1212 return op1;
1213 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1214 return op0;
1215 /* A | (~A) -> -1 */
1216 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1217 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1218 && ! side_effects_p (op0)
1219 && GET_MODE_CLASS (mode) != MODE_CC)
1220 return constm1_rtx;
1221 break;
1223 case XOR:
1224 if (trueop1 == const0_rtx)
1225 return op0;
1226 if (GET_CODE (trueop1) == CONST_INT
1227 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1228 == GET_MODE_MASK (mode)))
1229 return gen_rtx_NOT (mode, op0);
1230 if (trueop0 == trueop1 && ! side_effects_p (op0)
1231 && GET_MODE_CLASS (mode) != MODE_CC)
1232 return const0_rtx;
1233 break;
1235 case AND:
1236 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1237 return const0_rtx;
1238 if (GET_CODE (trueop1) == CONST_INT
1239 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1240 == GET_MODE_MASK (mode)))
1241 return op0;
1242 if (trueop0 == trueop1 && ! side_effects_p (op0)
1243 && GET_MODE_CLASS (mode) != MODE_CC)
1244 return op0;
1245 /* A & (~A) -> 0 */
1246 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1247 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1248 && ! side_effects_p (op0)
1249 && GET_MODE_CLASS (mode) != MODE_CC)
1250 return const0_rtx;
1251 break;
1253 case UDIV:
1254 /* Convert divide by power of two into shift (divide by 1 handled
1255 below). */
1256 if (GET_CODE (trueop1) == CONST_INT
1257 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1258 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1260 /* ... fall through ... */
1262 case DIV:
1263 if (trueop1 == CONST1_RTX (mode))
1265 /* On some platforms DIV uses narrower mode than its
1266 operands. */
1267 rtx x = gen_lowpart_common (mode, op0);
1268 if (x)
1269 return x;
1270 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1271 return gen_lowpart_SUBREG (mode, op0);
1272 else
1273 return op0;
1276 /* Maybe change 0 / x to 0. This transformation isn't safe for
1277 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1278 Nor is it safe for modes with signed zeros, since dividing
1279 0 by a negative number gives -0, not 0. */
1280 if (!HONOR_NANS (mode)
1281 && !HONOR_SIGNED_ZEROS (mode)
1282 && trueop0 == CONST0_RTX (mode)
1283 && ! side_effects_p (op1))
1284 return op0;
1286 /* Change division by a constant into multiplication. Only do
1287 this with -funsafe-math-optimizations. */
1288 else if (GET_CODE (trueop1) == CONST_DOUBLE
1289 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1290 && trueop1 != CONST0_RTX (mode)
1291 && flag_unsafe_math_optimizations)
1293 REAL_VALUE_TYPE d;
1294 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1296 if (! REAL_VALUES_EQUAL (d, dconst0))
1298 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1299 return gen_rtx_MULT (mode, op0,
1300 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1303 break;
1305 case UMOD:
1306 /* Handle modulus by power of two (mod with 1 handled below). */
1307 if (GET_CODE (trueop1) == CONST_INT
1308 && exact_log2 (INTVAL (trueop1)) > 0)
1309 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1311 /* ... fall through ... */
1313 case MOD:
1314 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1315 && ! side_effects_p (op0) && ! side_effects_p (op1))
1316 return const0_rtx;
1317 break;
1319 case ROTATERT:
1320 case ROTATE:
1321 /* Rotating ~0 always results in ~0. */
1322 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1323 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1324 && ! side_effects_p (op1))
1325 return op0;
1327 /* ... fall through ... */
1329 case ASHIFT:
1330 case ASHIFTRT:
1331 case LSHIFTRT:
1332 if (trueop1 == const0_rtx)
1333 return op0;
1334 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1335 return op0;
1336 break;
1338 case SMIN:
1339 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1340 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1341 && ! side_effects_p (op0))
1342 return op1;
1343 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1344 return op0;
1345 break;
1347 case SMAX:
1348 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1349 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1350 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1351 && ! side_effects_p (op0))
1352 return op1;
1353 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1354 return op0;
1355 break;
1357 case UMIN:
1358 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1359 return op1;
1360 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1361 return op0;
1362 break;
1364 case UMAX:
1365 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1366 return op1;
1367 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1368 return op0;
1369 break;
1371 case SS_PLUS:
1372 case US_PLUS:
1373 case SS_MINUS:
1374 case US_MINUS:
1375 /* ??? There are simplifications that can be done. */
1376 return 0;
1378 default:
1379 abort ();
1382 return 0;
1385 /* Get the integer argument values in two forms:
1386 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1388 arg0 = INTVAL (trueop0);
1389 arg1 = INTVAL (trueop1);
1391 if (width < HOST_BITS_PER_WIDE_INT)
1393 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1394 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1396 arg0s = arg0;
1397 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1398 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1400 arg1s = arg1;
1401 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1402 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1404 else
1406 arg0s = arg0;
1407 arg1s = arg1;
1410 /* Compute the value of the arithmetic. */
1412 switch (code)
1414 case PLUS:
1415 val = arg0s + arg1s;
1416 break;
1418 case MINUS:
1419 val = arg0s - arg1s;
1420 break;
1422 case MULT:
1423 val = arg0s * arg1s;
1424 break;
1426 case DIV:
1427 if (arg1s == 0
1428 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1429 && arg1s == -1))
1430 return 0;
1431 val = arg0s / arg1s;
1432 break;
1434 case MOD:
1435 if (arg1s == 0
1436 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1437 && arg1s == -1))
1438 return 0;
1439 val = arg0s % arg1s;
1440 break;
1442 case UDIV:
1443 if (arg1 == 0
1444 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1445 && arg1s == -1))
1446 return 0;
1447 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1448 break;
1450 case UMOD:
1451 if (arg1 == 0
1452 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1453 && arg1s == -1))
1454 return 0;
1455 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1456 break;
1458 case AND:
1459 val = arg0 & arg1;
1460 break;
1462 case IOR:
1463 val = arg0 | arg1;
1464 break;
1466 case XOR:
1467 val = arg0 ^ arg1;
1468 break;
1470 case LSHIFTRT:
1471 /* If shift count is undefined, don't fold it; let the machine do
1472 what it wants. But truncate it if the machine will do that. */
1473 if (arg1 < 0)
1474 return 0;
1476 #ifdef SHIFT_COUNT_TRUNCATED
1477 if (SHIFT_COUNT_TRUNCATED)
1478 arg1 %= width;
1479 #endif
1481 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1482 break;
1484 case ASHIFT:
1485 if (arg1 < 0)
1486 return 0;
1488 #ifdef SHIFT_COUNT_TRUNCATED
1489 if (SHIFT_COUNT_TRUNCATED)
1490 arg1 %= width;
1491 #endif
1493 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1494 break;
1496 case ASHIFTRT:
1497 if (arg1 < 0)
1498 return 0;
1500 #ifdef SHIFT_COUNT_TRUNCATED
1501 if (SHIFT_COUNT_TRUNCATED)
1502 arg1 %= width;
1503 #endif
1505 val = arg0s >> arg1;
1507 /* Bootstrap compiler may not have sign extended the right shift.
1508 Manually extend the sign to insure bootstrap cc matches gcc. */
1509 if (arg0s < 0 && arg1 > 0)
1510 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1512 break;
1514 case ROTATERT:
1515 if (arg1 < 0)
1516 return 0;
1518 arg1 %= width;
1519 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1520 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1521 break;
1523 case ROTATE:
1524 if (arg1 < 0)
1525 return 0;
1527 arg1 %= width;
1528 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1529 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1530 break;
1532 case COMPARE:
1533 /* Do nothing here. */
1534 return 0;
1536 case SMIN:
1537 val = arg0s <= arg1s ? arg0s : arg1s;
1538 break;
1540 case UMIN:
1541 val = ((unsigned HOST_WIDE_INT) arg0
1542 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1543 break;
1545 case SMAX:
1546 val = arg0s > arg1s ? arg0s : arg1s;
1547 break;
1549 case UMAX:
1550 val = ((unsigned HOST_WIDE_INT) arg0
1551 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1552 break;
1554 default:
1555 abort ();
1558 val = trunc_int_for_mode (val, mode);
1560 return GEN_INT (val);
1563 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1564 PLUS or MINUS.
1566 Rather than test for specific case, we do this by a brute-force method
1567 and do all possible simplifications until no more changes occur. Then
1568 we rebuild the operation.
1570 If FORCE is true, then always generate the rtx. This is used to
1571 canonicalize stuff emitted from simplify_gen_binary. Note that this
1572 can still fail if the rtx is too complex. It won't fail just because
1573 the result is not 'simpler' than the input, however. */
1575 struct simplify_plus_minus_op_data
1577 rtx op;
1578 int neg;
1581 static int
1582 simplify_plus_minus_op_data_cmp (p1, p2)
1583 const void *p1;
1584 const void *p2;
1586 const struct simplify_plus_minus_op_data *d1 = p1;
1587 const struct simplify_plus_minus_op_data *d2 = p2;
1589 return (commutative_operand_precedence (d2->op)
1590 - commutative_operand_precedence (d1->op));
1593 static rtx
1594 simplify_plus_minus (code, mode, op0, op1, force)
1595 enum rtx_code code;
1596 enum machine_mode mode;
1597 rtx op0, op1;
1598 int force;
1600 struct simplify_plus_minus_op_data ops[8];
1601 rtx result, tem;
1602 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1603 int first, negate, changed;
1604 int i, j;
1606 memset ((char *) ops, 0, sizeof ops);
1608 /* Set up the two operands and then expand them until nothing has been
1609 changed. If we run out of room in our array, give up; this should
1610 almost never happen. */
1612 ops[0].op = op0;
1613 ops[0].neg = 0;
1614 ops[1].op = op1;
1615 ops[1].neg = (code == MINUS);
1619 changed = 0;
1621 for (i = 0; i < n_ops; i++)
1623 rtx this_op = ops[i].op;
1624 int this_neg = ops[i].neg;
1625 enum rtx_code this_code = GET_CODE (this_op);
1627 switch (this_code)
1629 case PLUS:
1630 case MINUS:
1631 if (n_ops == 7)
1632 return NULL_RTX;
1634 ops[n_ops].op = XEXP (this_op, 1);
1635 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1636 n_ops++;
1638 ops[i].op = XEXP (this_op, 0);
1639 input_ops++;
1640 changed = 1;
1641 break;
1643 case NEG:
1644 ops[i].op = XEXP (this_op, 0);
1645 ops[i].neg = ! this_neg;
1646 changed = 1;
1647 break;
1649 case CONST:
1650 if (n_ops < 7
1651 && GET_CODE (XEXP (this_op, 0)) == PLUS
1652 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
1653 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
1655 ops[i].op = XEXP (XEXP (this_op, 0), 0);
1656 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
1657 ops[n_ops].neg = this_neg;
1658 n_ops++;
1659 input_consts++;
1660 changed = 1;
1662 break;
1664 case NOT:
1665 /* ~a -> (-a - 1) */
1666 if (n_ops != 7)
1668 ops[n_ops].op = constm1_rtx;
1669 ops[n_ops++].neg = this_neg;
1670 ops[i].op = XEXP (this_op, 0);
1671 ops[i].neg = !this_neg;
1672 changed = 1;
1674 break;
1676 case CONST_INT:
1677 if (this_neg)
1679 ops[i].op = neg_const_int (mode, this_op);
1680 ops[i].neg = 0;
1681 changed = 1;
1683 break;
1685 default:
1686 break;
1690 while (changed);
1692 /* If we only have two operands, we can't do anything. */
1693 if (n_ops <= 2 && !force)
1694 return NULL_RTX;
1696 /* Count the number of CONSTs we didn't split above. */
1697 for (i = 0; i < n_ops; i++)
1698 if (GET_CODE (ops[i].op) == CONST)
1699 input_consts++;
1701 /* Now simplify each pair of operands until nothing changes. The first
1702 time through just simplify constants against each other. */
1704 first = 1;
1707 changed = first;
1709 for (i = 0; i < n_ops - 1; i++)
1710 for (j = i + 1; j < n_ops; j++)
1712 rtx lhs = ops[i].op, rhs = ops[j].op;
1713 int lneg = ops[i].neg, rneg = ops[j].neg;
1715 if (lhs != 0 && rhs != 0
1716 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
1718 enum rtx_code ncode = PLUS;
1720 if (lneg != rneg)
1722 ncode = MINUS;
1723 if (lneg)
1724 tem = lhs, lhs = rhs, rhs = tem;
1726 else if (swap_commutative_operands_p (lhs, rhs))
1727 tem = lhs, lhs = rhs, rhs = tem;
1729 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1731 /* Reject "simplifications" that just wrap the two
1732 arguments in a CONST. Failure to do so can result
1733 in infinite recursion with simplify_binary_operation
1734 when it calls us to simplify CONST operations. */
1735 if (tem
1736 && ! (GET_CODE (tem) == CONST
1737 && GET_CODE (XEXP (tem, 0)) == ncode
1738 && XEXP (XEXP (tem, 0), 0) == lhs
1739 && XEXP (XEXP (tem, 0), 1) == rhs)
1740 /* Don't allow -x + -1 -> ~x simplifications in the
1741 first pass. This allows us the chance to combine
1742 the -1 with other constants. */
1743 && ! (first
1744 && GET_CODE (tem) == NOT
1745 && XEXP (tem, 0) == rhs))
1747 lneg &= rneg;
1748 if (GET_CODE (tem) == NEG)
1749 tem = XEXP (tem, 0), lneg = !lneg;
1750 if (GET_CODE (tem) == CONST_INT && lneg)
1751 tem = neg_const_int (mode, tem), lneg = 0;
1753 ops[i].op = tem;
1754 ops[i].neg = lneg;
1755 ops[j].op = NULL_RTX;
1756 changed = 1;
1761 first = 0;
1763 while (changed);
1765 /* Pack all the operands to the lower-numbered entries. */
1766 for (i = 0, j = 0; j < n_ops; j++)
1767 if (ops[j].op)
1768 ops[i++] = ops[j];
1769 n_ops = i;
1771 /* Sort the operations based on swap_commutative_operands_p. */
1772 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
1774 /* We suppressed creation of trivial CONST expressions in the
1775 combination loop to avoid recursion. Create one manually now.
1776 The combination loop should have ensured that there is exactly
1777 one CONST_INT, and the sort will have ensured that it is last
1778 in the array and that any other constant will be next-to-last. */
1780 if (n_ops > 1
1781 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
1782 && CONSTANT_P (ops[n_ops - 2].op))
1784 rtx value = ops[n_ops - 1].op;
1785 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
1786 value = neg_const_int (mode, value);
1787 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
1788 n_ops--;
1791 /* Count the number of CONSTs that we generated. */
1792 n_consts = 0;
1793 for (i = 0; i < n_ops; i++)
1794 if (GET_CODE (ops[i].op) == CONST)
1795 n_consts++;
1797 /* Give up if we didn't reduce the number of operands we had. Make
1798 sure we count a CONST as two operands. If we have the same
1799 number of operands, but have made more CONSTs than before, this
1800 is also an improvement, so accept it. */
1801 if (!force
1802 && (n_ops + n_consts > input_ops
1803 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
1804 return NULL_RTX;
1806 /* Put a non-negated operand first. If there aren't any, make all
1807 operands positive and negate the whole thing later. */
1809 negate = 0;
1810 for (i = 0; i < n_ops && ops[i].neg; i++)
1811 continue;
1812 if (i == n_ops)
1814 for (i = 0; i < n_ops; i++)
1815 ops[i].neg = 0;
1816 negate = 1;
1818 else if (i != 0)
1820 tem = ops[0].op;
1821 ops[0] = ops[i];
1822 ops[i].op = tem;
1823 ops[i].neg = 1;
1826 /* Now make the result by performing the requested operations. */
1827 result = ops[0].op;
1828 for (i = 1; i < n_ops; i++)
1829 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
1830 mode, result, ops[i].op);
1832 return negate ? gen_rtx_NEG (mode, result) : result;
1835 /* Like simplify_binary_operation except used for relational operators.
1836 MODE is the mode of the operands, not that of the result. If MODE
1837 is VOIDmode, both operands must also be VOIDmode and we compare the
1838 operands in "infinite precision".
1840 If no simplification is possible, this function returns zero. Otherwise,
1841 it returns either const_true_rtx or const0_rtx. */
1844 simplify_relational_operation (code, mode, op0, op1)
1845 enum rtx_code code;
1846 enum machine_mode mode;
1847 rtx op0, op1;
1849 int equal, op0lt, op0ltu, op1lt, op1ltu;
1850 rtx tem;
1851 rtx trueop0;
1852 rtx trueop1;
1854 if (mode == VOIDmode
1855 && (GET_MODE (op0) != VOIDmode
1856 || GET_MODE (op1) != VOIDmode))
1857 abort ();
1859 /* If op0 is a compare, extract the comparison arguments from it. */
1860 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1861 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1863 trueop0 = avoid_constant_pool_reference (op0);
1864 trueop1 = avoid_constant_pool_reference (op1);
1866 /* We can't simplify MODE_CC values since we don't know what the
1867 actual comparison is. */
1868 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1869 #ifdef HAVE_cc0
1870 || op0 == cc0_rtx
1871 #endif
1873 return 0;
1875 /* Make sure the constant is second. */
1876 if (swap_commutative_operands_p (trueop0, trueop1))
1878 tem = op0, op0 = op1, op1 = tem;
1879 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1880 code = swap_condition (code);
1883 /* For integer comparisons of A and B maybe we can simplify A - B and can
1884 then simplify a comparison of that with zero. If A and B are both either
1885 a register or a CONST_INT, this can't help; testing for these cases will
1886 prevent infinite recursion here and speed things up.
1888 If CODE is an unsigned comparison, then we can never do this optimization,
1889 because it gives an incorrect result if the subtraction wraps around zero.
1890 ANSI C defines unsigned operations such that they never overflow, and
1891 thus such cases can not be ignored. */
1893 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
1894 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
1895 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
1896 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1897 && code != GTU && code != GEU && code != LTU && code != LEU)
1898 return simplify_relational_operation (signed_condition (code),
1899 mode, tem, const0_rtx);
1901 if (flag_unsafe_math_optimizations && code == ORDERED)
1902 return const_true_rtx;
1904 if (flag_unsafe_math_optimizations && code == UNORDERED)
1905 return const0_rtx;
1907 /* For modes without NaNs, if the two operands are equal, we know the
1908 result. */
1909 if (!HONOR_NANS (GET_MODE (trueop0)) && rtx_equal_p (trueop0, trueop1))
1910 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1912 /* If the operands are floating-point constants, see if we can fold
1913 the result. */
1914 else if (GET_CODE (trueop0) == CONST_DOUBLE
1915 && GET_CODE (trueop1) == CONST_DOUBLE
1916 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
1918 REAL_VALUE_TYPE d0, d1;
1920 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
1921 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
1923 /* Comparisons are unordered iff at least one of the values is NaN. */
1924 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
1925 switch (code)
1927 case UNEQ:
1928 case UNLT:
1929 case UNGT:
1930 case UNLE:
1931 case UNGE:
1932 case NE:
1933 case UNORDERED:
1934 return const_true_rtx;
1935 case EQ:
1936 case LT:
1937 case GT:
1938 case LE:
1939 case GE:
1940 case LTGT:
1941 case ORDERED:
1942 return const0_rtx;
1943 default:
1944 return 0;
1947 equal = REAL_VALUES_EQUAL (d0, d1);
1948 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
1949 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
1952 /* Otherwise, see if the operands are both integers. */
1953 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1954 && (GET_CODE (trueop0) == CONST_DOUBLE
1955 || GET_CODE (trueop0) == CONST_INT)
1956 && (GET_CODE (trueop1) == CONST_DOUBLE
1957 || GET_CODE (trueop1) == CONST_INT))
1959 int width = GET_MODE_BITSIZE (mode);
1960 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1961 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1963 /* Get the two words comprising each integer constant. */
1964 if (GET_CODE (trueop0) == CONST_DOUBLE)
1966 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
1967 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
1969 else
1971 l0u = l0s = INTVAL (trueop0);
1972 h0u = h0s = HWI_SIGN_EXTEND (l0s);
1975 if (GET_CODE (trueop1) == CONST_DOUBLE)
1977 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
1978 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
1980 else
1982 l1u = l1s = INTVAL (trueop1);
1983 h1u = h1s = HWI_SIGN_EXTEND (l1s);
1986 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1987 we have to sign or zero-extend the values. */
1988 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
1990 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
1991 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
1993 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1994 l0s |= ((HOST_WIDE_INT) (-1) << width);
1996 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1997 l1s |= ((HOST_WIDE_INT) (-1) << width);
1999 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2000 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2002 equal = (h0u == h1u && l0u == l1u);
2003 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2004 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2005 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2006 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2009 /* Otherwise, there are some code-specific tests we can make. */
2010 else
2012 switch (code)
2014 case EQ:
2015 /* References to the frame plus a constant or labels cannot
2016 be zero, but a SYMBOL_REF can due to #pragma weak. */
2017 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2018 || GET_CODE (trueop0) == LABEL_REF)
2019 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2020 /* On some machines, the ap reg can be 0 sometimes. */
2021 && op0 != arg_pointer_rtx
2022 #endif
2024 return const0_rtx;
2025 break;
2027 case NE:
2028 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2029 || GET_CODE (trueop0) == LABEL_REF)
2030 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2031 && op0 != arg_pointer_rtx
2032 #endif
2034 return const_true_rtx;
2035 break;
2037 case GEU:
2038 /* Unsigned values are never negative. */
2039 if (trueop1 == const0_rtx)
2040 return const_true_rtx;
2041 break;
2043 case LTU:
2044 if (trueop1 == const0_rtx)
2045 return const0_rtx;
2046 break;
2048 case LEU:
2049 /* Unsigned values are never greater than the largest
2050 unsigned value. */
2051 if (GET_CODE (trueop1) == CONST_INT
2052 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2053 && INTEGRAL_MODE_P (mode))
2054 return const_true_rtx;
2055 break;
2057 case GTU:
2058 if (GET_CODE (trueop1) == CONST_INT
2059 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2060 && INTEGRAL_MODE_P (mode))
2061 return const0_rtx;
2062 break;
2064 case LT:
2065 /* Optimize abs(x) < 0.0. */
2066 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2068 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2069 : trueop0;
2070 if (GET_CODE (tem) == ABS)
2071 return const0_rtx;
2073 break;
2075 case GE:
2076 /* Optimize abs(x) >= 0.0. */
2077 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2079 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2080 : trueop0;
2081 if (GET_CODE (tem) == ABS)
2082 return const1_rtx;
2084 break;
2086 default:
2087 break;
2090 return 0;
2093 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2094 as appropriate. */
2095 switch (code)
2097 case EQ:
2098 case UNEQ:
2099 return equal ? const_true_rtx : const0_rtx;
2100 case NE:
2101 case LTGT:
2102 return ! equal ? const_true_rtx : const0_rtx;
2103 case LT:
2104 case UNLT:
2105 return op0lt ? const_true_rtx : const0_rtx;
2106 case GT:
2107 case UNGT:
2108 return op1lt ? const_true_rtx : const0_rtx;
2109 case LTU:
2110 return op0ltu ? const_true_rtx : const0_rtx;
2111 case GTU:
2112 return op1ltu ? const_true_rtx : const0_rtx;
2113 case LE:
2114 case UNLE:
2115 return equal || op0lt ? const_true_rtx : const0_rtx;
2116 case GE:
2117 case UNGE:
2118 return equal || op1lt ? const_true_rtx : const0_rtx;
2119 case LEU:
2120 return equal || op0ltu ? const_true_rtx : const0_rtx;
2121 case GEU:
2122 return equal || op1ltu ? const_true_rtx : const0_rtx;
2123 case ORDERED:
2124 return const_true_rtx;
2125 case UNORDERED:
2126 return const0_rtx;
2127 default:
2128 abort ();
2132 /* Simplify CODE, an operation with result mode MODE and three operands,
2133 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2134 a constant. Return 0 if no simplifications is possible. */
2137 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2138 enum rtx_code code;
2139 enum machine_mode mode, op0_mode;
2140 rtx op0, op1, op2;
2142 unsigned int width = GET_MODE_BITSIZE (mode);
2144 /* VOIDmode means "infinite" precision. */
2145 if (width == 0)
2146 width = HOST_BITS_PER_WIDE_INT;
2148 switch (code)
2150 case SIGN_EXTRACT:
2151 case ZERO_EXTRACT:
2152 if (GET_CODE (op0) == CONST_INT
2153 && GET_CODE (op1) == CONST_INT
2154 && GET_CODE (op2) == CONST_INT
2155 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2156 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2158 /* Extracting a bit-field from a constant */
2159 HOST_WIDE_INT val = INTVAL (op0);
2161 if (BITS_BIG_ENDIAN)
2162 val >>= (GET_MODE_BITSIZE (op0_mode)
2163 - INTVAL (op2) - INTVAL (op1));
2164 else
2165 val >>= INTVAL (op2);
2167 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2169 /* First zero-extend. */
2170 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2171 /* If desired, propagate sign bit. */
2172 if (code == SIGN_EXTRACT
2173 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2174 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2177 /* Clear the bits that don't belong in our mode,
2178 unless they and our sign bit are all one.
2179 So we get either a reasonable negative value or a reasonable
2180 unsigned value for this mode. */
2181 if (width < HOST_BITS_PER_WIDE_INT
2182 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2183 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2184 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2186 return GEN_INT (val);
2188 break;
2190 case IF_THEN_ELSE:
2191 if (GET_CODE (op0) == CONST_INT)
2192 return op0 != const0_rtx ? op1 : op2;
2194 /* Convert a == b ? b : a to "a". */
2195 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2196 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2197 && rtx_equal_p (XEXP (op0, 0), op1)
2198 && rtx_equal_p (XEXP (op0, 1), op2))
2199 return op1;
2200 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2201 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2202 && rtx_equal_p (XEXP (op0, 1), op1)
2203 && rtx_equal_p (XEXP (op0, 0), op2))
2204 return op2;
2205 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2207 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2208 ? GET_MODE (XEXP (op0, 1))
2209 : GET_MODE (XEXP (op0, 0)));
2210 rtx temp;
2211 if (cmp_mode == VOIDmode)
2212 cmp_mode = op0_mode;
2213 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2214 XEXP (op0, 0), XEXP (op0, 1));
2216 /* See if any simplifications were possible. */
2217 if (temp == const0_rtx)
2218 return op2;
2219 else if (temp == const1_rtx)
2220 return op1;
2221 else if (temp)
2222 op0 = temp;
2224 /* Look for happy constants in op1 and op2. */
2225 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2227 HOST_WIDE_INT t = INTVAL (op1);
2228 HOST_WIDE_INT f = INTVAL (op2);
2230 if (t == STORE_FLAG_VALUE && f == 0)
2231 code = GET_CODE (op0);
2232 else if (t == 0 && f == STORE_FLAG_VALUE)
2234 enum rtx_code tmp;
2235 tmp = reversed_comparison_code (op0, NULL_RTX);
2236 if (tmp == UNKNOWN)
2237 break;
2238 code = tmp;
2240 else
2241 break;
2243 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2246 break;
2248 default:
2249 abort ();
2252 return 0;
2255 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2256 Return 0 if no simplifications is possible. */
2258 simplify_subreg (outermode, op, innermode, byte)
2259 rtx op;
2260 unsigned int byte;
2261 enum machine_mode outermode, innermode;
2263 /* Little bit of sanity checking. */
2264 if (innermode == VOIDmode || outermode == VOIDmode
2265 || innermode == BLKmode || outermode == BLKmode)
2266 abort ();
2268 if (GET_MODE (op) != innermode
2269 && GET_MODE (op) != VOIDmode)
2270 abort ();
2272 if (byte % GET_MODE_SIZE (outermode)
2273 || byte >= GET_MODE_SIZE (innermode))
2274 abort ();
2276 if (outermode == innermode && !byte)
2277 return op;
2279 /* Simplify subregs of vector constants. */
2280 if (GET_CODE (op) == CONST_VECTOR)
2282 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
2283 int offset = byte / elt_size;
2284 rtx elt;
2286 if (GET_MODE_INNER (innermode) == outermode)
2288 elt = CONST_VECTOR_ELT (op, offset);
2290 /* ?? We probably don't need this copy_rtx because constants
2291 can be shared. ?? */
2293 return copy_rtx (elt);
2295 else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
2296 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
2298 return (gen_rtx_CONST_VECTOR
2299 (outermode,
2300 gen_rtvec_v (GET_MODE_NUNITS (outermode),
2301 &CONST_VECTOR_ELT (op, offset))));
2303 else if (GET_MODE_CLASS (outermode) == MODE_INT
2304 && (GET_MODE_SIZE (outermode) % elt_size == 0))
2306 /* This happens when the target register size is smaller then
2307 the vector mode, and we synthesize operations with vectors
2308 of elements that are smaller than the register size. */
2309 HOST_WIDE_INT sum = 0, high = 0;
2310 unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
2311 unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
2312 unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
2313 int shift = BITS_PER_UNIT * elt_size;
2315 for (; n_elts--; i += step)
2317 elt = CONST_VECTOR_ELT (op, i);
2318 if (GET_CODE (elt) == CONST_DOUBLE
2319 && GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
2321 elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
2322 elt);
2323 if (! elt)
2324 return NULL_RTX;
2326 if (GET_CODE (elt) != CONST_INT)
2327 return NULL_RTX;
2328 high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
2329 sum = (sum << shift) + INTVAL (elt);
2331 if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
2332 return GEN_INT (trunc_int_for_mode (sum, outermode));
2333 else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
2334 return immed_double_const (high, sum, outermode);
2335 else
2336 return NULL_RTX;
2338 else if (GET_MODE_CLASS (outermode) == MODE_INT
2339 && (elt_size % GET_MODE_SIZE (outermode) == 0))
2341 enum machine_mode new_mode
2342 = int_mode_for_mode (GET_MODE_INNER (innermode));
2343 int subbyte = byte % elt_size;
2345 op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
2346 if (! op)
2347 return NULL_RTX;
2348 return simplify_subreg (outermode, op, new_mode, subbyte);
2350 else if (GET_MODE_CLASS (outermode) == MODE_INT)
2351 /* This shouldn't happen, but let's not do anything stupid. */
2352 return NULL_RTX;
2355 /* Attempt to simplify constant to non-SUBREG expression. */
2356 if (CONSTANT_P (op))
2358 int offset, part;
2359 unsigned HOST_WIDE_INT val = 0;
2361 if (GET_MODE_CLASS (outermode) == MODE_VECTOR_INT
2362 || GET_MODE_CLASS (outermode) == MODE_VECTOR_FLOAT)
2364 /* Construct a CONST_VECTOR from individual subregs. */
2365 enum machine_mode submode = GET_MODE_INNER (outermode);
2366 int subsize = GET_MODE_UNIT_SIZE (outermode);
2367 int i, elts = GET_MODE_NUNITS (outermode);
2368 rtvec v = rtvec_alloc (elts);
2369 rtx elt;
2371 for (i = 0; i < elts; i++, byte += subsize)
2373 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
2374 /* ??? It would be nice if we could actually make such subregs
2375 on targets that allow such relocations. */
2376 elt = simplify_subreg (submode, op, innermode, byte);
2377 if (! elt)
2378 return NULL_RTX;
2379 RTVEC_ELT (v, i) = elt;
2381 return gen_rtx_CONST_VECTOR (outermode, v);
2384 /* ??? This code is partly redundant with code below, but can handle
2385 the subregs of floats and similar corner cases.
2386 Later it we should move all simplification code here and rewrite
2387 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2388 using SIMPLIFY_SUBREG. */
2389 if (subreg_lowpart_offset (outermode, innermode) == byte
2390 && GET_CODE (op) != CONST_VECTOR)
2392 rtx new = gen_lowpart_if_possible (outermode, op);
2393 if (new)
2394 return new;
2397 /* Similar comment as above apply here. */
2398 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2399 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2400 && GET_MODE_CLASS (outermode) == MODE_INT)
2402 rtx new = constant_subword (op,
2403 (byte / UNITS_PER_WORD),
2404 innermode);
2405 if (new)
2406 return new;
2409 if (GET_MODE_CLASS (outermode) != MODE_INT
2410 && GET_MODE_CLASS (outermode) != MODE_CC)
2412 enum machine_mode new_mode = int_mode_for_mode (outermode);
2414 if (new_mode != innermode || byte != 0)
2416 op = simplify_subreg (new_mode, op, innermode, byte);
2417 if (! op)
2418 return NULL_RTX;
2419 return simplify_subreg (outermode, op, new_mode, 0);
2423 offset = byte * BITS_PER_UNIT;
2424 switch (GET_CODE (op))
2426 case CONST_DOUBLE:
2427 if (GET_MODE (op) != VOIDmode)
2428 break;
2430 /* We can't handle this case yet. */
2431 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2432 return NULL_RTX;
2434 part = offset >= HOST_BITS_PER_WIDE_INT;
2435 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2436 && BYTES_BIG_ENDIAN)
2437 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2438 && WORDS_BIG_ENDIAN))
2439 part = !part;
2440 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2441 offset %= HOST_BITS_PER_WIDE_INT;
2443 /* We've already picked the word we want from a double, so
2444 pretend this is actually an integer. */
2445 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2447 /* FALLTHROUGH */
2448 case CONST_INT:
2449 if (GET_CODE (op) == CONST_INT)
2450 val = INTVAL (op);
2452 /* We don't handle synthetizing of non-integral constants yet. */
2453 if (GET_MODE_CLASS (outermode) != MODE_INT)
2454 return NULL_RTX;
2456 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2458 if (WORDS_BIG_ENDIAN)
2459 offset = (GET_MODE_BITSIZE (innermode)
2460 - GET_MODE_BITSIZE (outermode) - offset);
2461 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2462 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2463 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2464 - 2 * (offset % BITS_PER_WORD));
2467 if (offset >= HOST_BITS_PER_WIDE_INT)
2468 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2469 else
2471 val >>= offset;
2472 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2473 val = trunc_int_for_mode (val, outermode);
2474 return GEN_INT (val);
2476 default:
2477 break;
2481 /* Changing mode twice with SUBREG => just change it once,
2482 or not at all if changing back op starting mode. */
2483 if (GET_CODE (op) == SUBREG)
2485 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2486 int final_offset = byte + SUBREG_BYTE (op);
2487 rtx new;
2489 if (outermode == innermostmode
2490 && byte == 0 && SUBREG_BYTE (op) == 0)
2491 return SUBREG_REG (op);
2493 /* The SUBREG_BYTE represents offset, as if the value were stored
2494 in memory. Irritating exception is paradoxical subreg, where
2495 we define SUBREG_BYTE to be 0. On big endian machines, this
2496 value should be negative. For a moment, undo this exception. */
2497 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2499 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2500 if (WORDS_BIG_ENDIAN)
2501 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2502 if (BYTES_BIG_ENDIAN)
2503 final_offset += difference % UNITS_PER_WORD;
2505 if (SUBREG_BYTE (op) == 0
2506 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2508 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2509 if (WORDS_BIG_ENDIAN)
2510 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2511 if (BYTES_BIG_ENDIAN)
2512 final_offset += difference % UNITS_PER_WORD;
2515 /* See whether resulting subreg will be paradoxical. */
2516 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2518 /* In nonparadoxical subregs we can't handle negative offsets. */
2519 if (final_offset < 0)
2520 return NULL_RTX;
2521 /* Bail out in case resulting subreg would be incorrect. */
2522 if (final_offset % GET_MODE_SIZE (outermode)
2523 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2524 return NULL_RTX;
2526 else
2528 int offset = 0;
2529 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2531 /* In paradoxical subreg, see if we are still looking on lower part.
2532 If so, our SUBREG_BYTE will be 0. */
2533 if (WORDS_BIG_ENDIAN)
2534 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2535 if (BYTES_BIG_ENDIAN)
2536 offset += difference % UNITS_PER_WORD;
2537 if (offset == final_offset)
2538 final_offset = 0;
2539 else
2540 return NULL_RTX;
2543 /* Recurse for futher possible simplifications. */
2544 new = simplify_subreg (outermode, SUBREG_REG (op),
2545 GET_MODE (SUBREG_REG (op)),
2546 final_offset);
2547 if (new)
2548 return new;
2549 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2552 /* SUBREG of a hard register => just change the register number
2553 and/or mode. If the hard register is not valid in that mode,
2554 suppress this simplification. If the hard register is the stack,
2555 frame, or argument pointer, leave this as a SUBREG. */
2557 if (REG_P (op)
2558 && (! REG_FUNCTION_VALUE_P (op)
2559 || ! rtx_equal_function_value_matters)
2560 #ifdef CLASS_CANNOT_CHANGE_MODE
2561 && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode, innermode)
2562 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2563 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT
2564 && (TEST_HARD_REG_BIT
2565 (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE],
2566 REGNO (op))))
2567 #endif
2568 && REGNO (op) < FIRST_PSEUDO_REGISTER
2569 && ((reload_completed && !frame_pointer_needed)
2570 || (REGNO (op) != FRAME_POINTER_REGNUM
2571 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2572 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2573 #endif
2575 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2576 && REGNO (op) != ARG_POINTER_REGNUM
2577 #endif
2578 && REGNO (op) != STACK_POINTER_REGNUM)
2580 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2583 /* ??? We do allow it if the current REG is not valid for
2584 its mode. This is a kludge to work around how float/complex
2585 arguments are passed on 32-bit Sparc and should be fixed. */
2586 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2587 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2589 rtx x = gen_rtx_REG (outermode, final_regno);
2591 /* Propagate original regno. We don't have any way to specify
2592 the offset inside orignal regno, so do so only for lowpart.
2593 The information is used only by alias analysis that can not
2594 grog partial register anyway. */
2596 if (subreg_lowpart_offset (outermode, innermode) == byte)
2597 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2598 return x;
2602 /* If we have a SUBREG of a register that we are replacing and we are
2603 replacing it with a MEM, make a new MEM and try replacing the
2604 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2605 or if we would be widening it. */
2607 if (GET_CODE (op) == MEM
2608 && ! mode_dependent_address_p (XEXP (op, 0))
2609 /* Allow splitting of volatile memory references in case we don't
2610 have instruction to move the whole thing. */
2611 && (! MEM_VOLATILE_P (op)
2612 || ! have_insn_for (SET, innermode))
2613 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2614 return adjust_address_nv (op, outermode, byte);
2616 /* Handle complex values represented as CONCAT
2617 of real and imaginary part. */
2618 if (GET_CODE (op) == CONCAT)
2620 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2621 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2622 unsigned int final_offset;
2623 rtx res;
2625 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2626 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2627 if (res)
2628 return res;
2629 /* We can at least simplify it by referring directly to the relevant part. */
2630 return gen_rtx_SUBREG (outermode, part, final_offset);
2633 return NULL_RTX;
2635 /* Make a SUBREG operation or equivalent if it folds. */
2638 simplify_gen_subreg (outermode, op, innermode, byte)
2639 rtx op;
2640 unsigned int byte;
2641 enum machine_mode outermode, innermode;
2643 rtx new;
2644 /* Little bit of sanity checking. */
2645 if (innermode == VOIDmode || outermode == VOIDmode
2646 || innermode == BLKmode || outermode == BLKmode)
2647 abort ();
2649 if (GET_MODE (op) != innermode
2650 && GET_MODE (op) != VOIDmode)
2651 abort ();
2653 if (byte % GET_MODE_SIZE (outermode)
2654 || byte >= GET_MODE_SIZE (innermode))
2655 abort ();
2657 if (GET_CODE (op) == QUEUED)
2658 return NULL_RTX;
2660 new = simplify_subreg (outermode, op, innermode, byte);
2661 if (new)
2662 return new;
2664 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2665 return NULL_RTX;
2667 return gen_rtx_SUBREG (outermode, op, byte);
2669 /* Simplify X, an rtx expression.
2671 Return the simplified expression or NULL if no simplifications
2672 were possible.
2674 This is the preferred entry point into the simplification routines;
2675 however, we still allow passes to call the more specific routines.
2677 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2678 code that need to be unified.
2680 1. fold_rtx in cse.c. This code uses various CSE specific
2681 information to aid in RTL simplification.
2683 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2684 it uses combine specific information to aid in RTL
2685 simplification.
2687 3. The routines in this file.
2690 Long term we want to only have one body of simplification code; to
2691 get to that state I recommend the following steps:
2693 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2694 which are not pass dependent state into these routines.
2696 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2697 use this routine whenever possible.
2699 3. Allow for pass dependent state to be provided to these
2700 routines and add simplifications based on the pass dependent
2701 state. Remove code from cse.c & combine.c that becomes
2702 redundant/dead.
2704 It will take time, but ultimately the compiler will be easier to
2705 maintain and improve. It's totally silly that when we add a
2706 simplification that it needs to be added to 4 places (3 for RTL
2707 simplification and 1 for tree simplification. */
2710 simplify_rtx (x)
2711 rtx x;
2713 enum rtx_code code = GET_CODE (x);
2714 enum machine_mode mode = GET_MODE (x);
2716 switch (GET_RTX_CLASS (code))
2718 case '1':
2719 return simplify_unary_operation (code, mode,
2720 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2721 case 'c':
2722 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
2724 rtx tem;
2726 tem = XEXP (x, 0);
2727 XEXP (x, 0) = XEXP (x, 1);
2728 XEXP (x, 1) = tem;
2729 return simplify_binary_operation (code, mode,
2730 XEXP (x, 0), XEXP (x, 1));
2733 case '2':
2734 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2736 case '3':
2737 case 'b':
2738 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2739 XEXP (x, 0), XEXP (x, 1),
2740 XEXP (x, 2));
2742 case '<':
2743 return simplify_relational_operation (code,
2744 ((GET_MODE (XEXP (x, 0))
2745 != VOIDmode)
2746 ? GET_MODE (XEXP (x, 0))
2747 : GET_MODE (XEXP (x, 1))),
2748 XEXP (x, 0), XEXP (x, 1));
2749 case 'x':
2750 /* The only case we try to handle is a SUBREG. */
2751 if (code == SUBREG)
2752 return simplify_gen_subreg (mode, SUBREG_REG (x),
2753 GET_MODE (SUBREG_REG (x)),
2754 SUBREG_BYTE (x));
2755 return NULL;
2756 default:
2757 return NULL;