* gcc.dg/i386-asm-4.c: New test.
[official-gcc.git] / gcc / simplify-rtx.c
blob91df355b9ac7bb4959f789bbb5e2e5940cb48323
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
56 rtx, int);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
66 static rtx
67 neg_const_int (enum machine_mode mode, rtx i)
69 return gen_int_mode (- INTVAL (i), mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 bool
76 mode_signbit_p (enum machine_mode mode, rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
84 width = GET_MODE_BITSIZE (mode);
85 if (width == 0)
86 return false;
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && GET_CODE (x) == CONST_INT)
90 val = INTVAL (x);
91 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_DOUBLE
93 && CONST_DOUBLE_LOW (x) == 0)
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
98 else
99 return false;
101 if (width < HOST_BITS_PER_WIDE_INT)
102 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
103 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 /* Make a binary operation by properly ordering the operands and
107 seeing if the expression folds. */
110 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
111 rtx op1)
113 rtx tem;
115 /* Put complex operands first and constants second if commutative. */
116 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
117 && swap_commutative_operands_p (op0, op1))
118 tem = op0, op0 = op1, op1 = tem;
120 /* If this simplifies, do it. */
121 tem = simplify_binary_operation (code, mode, op0, op1);
122 if (tem)
123 return tem;
125 /* Handle addition and subtraction specially. Otherwise, just form
126 the operation. */
128 if (code == PLUS || code == MINUS)
130 tem = simplify_plus_minus (code, mode, op0, op1, 1);
131 if (tem)
132 return tem;
135 return gen_rtx_fmt_ee (code, mode, op0, op1);
138 /* If X is a MEM referencing the constant pool, return the real value.
139 Otherwise return X. */
141 avoid_constant_pool_reference (rtx x)
143 rtx c, tmp, addr;
144 enum machine_mode cmode;
146 switch (GET_CODE (x))
148 case MEM:
149 break;
151 case FLOAT_EXTEND:
152 /* Handle float extensions of constant pool references. */
153 tmp = XEXP (x, 0);
154 c = avoid_constant_pool_reference (tmp);
155 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
157 REAL_VALUE_TYPE d;
159 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
160 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
162 return x;
164 default:
165 return x;
168 addr = XEXP (x, 0);
170 /* Call target hook to avoid the effects of -fpic etc.... */
171 addr = targetm.delegitimize_address (addr);
173 if (GET_CODE (addr) == LO_SUM)
174 addr = XEXP (addr, 1);
176 if (GET_CODE (addr) != SYMBOL_REF
177 || ! CONSTANT_POOL_ADDRESS_P (addr))
178 return x;
180 c = get_pool_constant (addr);
181 cmode = get_pool_mode (addr);
183 /* If we're accessing the constant in a different mode than it was
184 originally stored, attempt to fix that up via subreg simplifications.
185 If that fails we have no choice but to return the original memory. */
186 if (cmode != GET_MODE (x))
188 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
189 return c ? c : x;
192 return c;
195 /* Make a unary operation by first seeing if it folds and otherwise making
196 the specified operation. */
199 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
200 enum machine_mode op_mode)
202 rtx tem;
204 /* If this simplifies, use it. */
205 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
206 return tem;
208 return gen_rtx_fmt_e (code, mode, op);
211 /* Likewise for ternary operations. */
214 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
215 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
217 rtx tem;
219 /* If this simplifies, use it. */
220 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
221 op0, op1, op2)))
222 return tem;
224 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
227 /* Likewise, for relational operations.
228 CMP_MODE specifies mode comparison is done in. */
231 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
232 enum machine_mode cmp_mode, rtx op0, rtx op1)
234 rtx tem;
236 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
237 op0, op1)))
238 return tem;
240 return gen_rtx_fmt_ee (code, mode, op0, op1);
243 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
244 resulting RTX. Return a new RTX which is as simplified as possible. */
247 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
249 enum rtx_code code = GET_CODE (x);
250 enum machine_mode mode = GET_MODE (x);
251 enum machine_mode op_mode;
252 rtx op0, op1, op2;
254 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
255 to build a new expression substituting recursively. If we can't do
256 anything, return our input. */
258 if (x == old_rtx)
259 return new_rtx;
261 switch (GET_RTX_CLASS (code))
263 case RTX_UNARY:
264 op0 = XEXP (x, 0);
265 op_mode = GET_MODE (op0);
266 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
267 if (op0 == XEXP (x, 0))
268 return x;
269 return simplify_gen_unary (code, mode, op0, op_mode);
271 case RTX_BIN_ARITH:
272 case RTX_COMM_ARITH:
273 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
274 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
275 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
276 return x;
277 return simplify_gen_binary (code, mode, op0, op1);
279 case RTX_COMPARE:
280 case RTX_COMM_COMPARE:
281 op0 = XEXP (x, 0);
282 op1 = XEXP (x, 1);
283 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
284 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
285 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
286 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
287 return x;
288 return simplify_gen_relational (code, mode, op_mode, op0, op1);
290 case RTX_TERNARY:
291 case RTX_BITFIELD_OPS:
292 op0 = XEXP (x, 0);
293 op_mode = GET_MODE (op0);
294 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
295 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
296 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
297 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
298 return x;
299 if (op_mode == VOIDmode)
300 op_mode = GET_MODE (op0);
301 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
303 case RTX_EXTRA:
304 /* The only case we try to handle is a SUBREG. */
305 if (code == SUBREG)
307 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
308 if (op0 == SUBREG_REG (x))
309 return x;
310 op0 = simplify_gen_subreg (GET_MODE (x), op0,
311 GET_MODE (SUBREG_REG (x)),
312 SUBREG_BYTE (x));
313 return op0 ? op0 : x;
315 break;
317 case RTX_OBJ:
318 if (code == MEM)
320 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
321 if (op0 == XEXP (x, 0))
322 return x;
323 return replace_equiv_address_nv (x, op0);
325 else if (code == LO_SUM)
327 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
328 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
330 /* (lo_sum (high x) x) -> x */
331 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
332 return op1;
334 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
335 return x;
336 return gen_rtx_LO_SUM (mode, op0, op1);
338 else if (code == REG)
340 if (rtx_equal_p (x, old_rtx))
341 return new_rtx;
343 break;
345 default:
346 break;
348 return x;
351 /* Try to simplify a unary operation CODE whose output mode is to be
352 MODE with input operand OP whose mode was originally OP_MODE.
353 Return zero if no simplification can be made. */
355 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
356 rtx op, enum machine_mode op_mode)
358 unsigned int width = GET_MODE_BITSIZE (mode);
359 rtx trueop = avoid_constant_pool_reference (op);
361 if (code == VEC_DUPLICATE)
363 gcc_assert (VECTOR_MODE_P (mode));
364 if (GET_MODE (trueop) != VOIDmode)
366 if (!VECTOR_MODE_P (GET_MODE (trueop)))
367 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (trueop));
368 else
369 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
370 (GET_MODE (trueop)));
372 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
373 || GET_CODE (trueop) == CONST_VECTOR)
375 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
376 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
377 rtvec v = rtvec_alloc (n_elts);
378 unsigned int i;
380 if (GET_CODE (trueop) != CONST_VECTOR)
381 for (i = 0; i < n_elts; i++)
382 RTVEC_ELT (v, i) = trueop;
383 else
385 enum machine_mode inmode = GET_MODE (trueop);
386 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
387 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
389 gcc_assert (in_n_elts < n_elts);
390 gcc_assert ((n_elts % in_n_elts) == 0);
391 for (i = 0; i < n_elts; i++)
392 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
394 return gen_rtx_CONST_VECTOR (mode, v);
397 else if (GET_CODE (op) == CONST)
398 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
400 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
402 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
403 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
404 enum machine_mode opmode = GET_MODE (trueop);
405 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
406 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
407 rtvec v = rtvec_alloc (n_elts);
408 unsigned int i;
410 gcc_assert (op_n_elts == n_elts);
411 for (i = 0; i < n_elts; i++)
413 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
414 CONST_VECTOR_ELT (trueop, i),
415 GET_MODE_INNER (opmode));
416 if (!x)
417 return 0;
418 RTVEC_ELT (v, i) = x;
420 return gen_rtx_CONST_VECTOR (mode, v);
423 /* The order of these tests is critical so that, for example, we don't
424 check the wrong mode (input vs. output) for a conversion operation,
425 such as FIX. At some point, this should be simplified. */
427 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
428 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
430 HOST_WIDE_INT hv, lv;
431 REAL_VALUE_TYPE d;
433 if (GET_CODE (trueop) == CONST_INT)
434 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
435 else
436 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
438 REAL_VALUE_FROM_INT (d, lv, hv, mode);
439 d = real_value_truncate (mode, d);
440 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
442 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
443 && (GET_CODE (trueop) == CONST_DOUBLE
444 || GET_CODE (trueop) == CONST_INT))
446 HOST_WIDE_INT hv, lv;
447 REAL_VALUE_TYPE d;
449 if (GET_CODE (trueop) == CONST_INT)
450 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
451 else
452 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
454 if (op_mode == VOIDmode)
456 /* We don't know how to interpret negative-looking numbers in
457 this case, so don't try to fold those. */
458 if (hv < 0)
459 return 0;
461 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
463 else
464 hv = 0, lv &= GET_MODE_MASK (op_mode);
466 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
467 d = real_value_truncate (mode, d);
468 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
471 if (GET_CODE (trueop) == CONST_INT
472 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
474 HOST_WIDE_INT arg0 = INTVAL (trueop);
475 HOST_WIDE_INT val;
477 switch (code)
479 case NOT:
480 val = ~ arg0;
481 break;
483 case NEG:
484 val = - arg0;
485 break;
487 case ABS:
488 val = (arg0 >= 0 ? arg0 : - arg0);
489 break;
491 case FFS:
492 /* Don't use ffs here. Instead, get low order bit and then its
493 number. If arg0 is zero, this will return 0, as desired. */
494 arg0 &= GET_MODE_MASK (mode);
495 val = exact_log2 (arg0 & (- arg0)) + 1;
496 break;
498 case CLZ:
499 arg0 &= GET_MODE_MASK (mode);
500 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
502 else
503 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
504 break;
506 case CTZ:
507 arg0 &= GET_MODE_MASK (mode);
508 if (arg0 == 0)
510 /* Even if the value at zero is undefined, we have to come
511 up with some replacement. Seems good enough. */
512 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
513 val = GET_MODE_BITSIZE (mode);
515 else
516 val = exact_log2 (arg0 & -arg0);
517 break;
519 case POPCOUNT:
520 arg0 &= GET_MODE_MASK (mode);
521 val = 0;
522 while (arg0)
523 val++, arg0 &= arg0 - 1;
524 break;
526 case PARITY:
527 arg0 &= GET_MODE_MASK (mode);
528 val = 0;
529 while (arg0)
530 val++, arg0 &= arg0 - 1;
531 val &= 1;
532 break;
534 case TRUNCATE:
535 val = arg0;
536 break;
538 case ZERO_EXTEND:
539 /* When zero-extending a CONST_INT, we need to know its
540 original mode. */
541 gcc_assert (op_mode != VOIDmode);
542 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
544 /* If we were really extending the mode,
545 we would have to distinguish between zero-extension
546 and sign-extension. */
547 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
548 val = arg0;
550 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
551 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
552 else
553 return 0;
554 break;
556 case SIGN_EXTEND:
557 if (op_mode == VOIDmode)
558 op_mode = mode;
559 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
561 /* If we were really extending the mode,
562 we would have to distinguish between zero-extension
563 and sign-extension. */
564 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
565 val = arg0;
567 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
570 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
571 if (val
572 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
573 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
575 else
576 return 0;
577 break;
579 case SQRT:
580 case FLOAT_EXTEND:
581 case FLOAT_TRUNCATE:
582 case SS_TRUNCATE:
583 case US_TRUNCATE:
584 return 0;
586 default:
587 gcc_unreachable ();
590 val = trunc_int_for_mode (val, mode);
592 return GEN_INT (val);
595 /* We can do some operations on integer CONST_DOUBLEs. Also allow
596 for a DImode operation on a CONST_INT. */
597 else if (GET_MODE (trueop) == VOIDmode
598 && width <= HOST_BITS_PER_WIDE_INT * 2
599 && (GET_CODE (trueop) == CONST_DOUBLE
600 || GET_CODE (trueop) == CONST_INT))
602 unsigned HOST_WIDE_INT l1, lv;
603 HOST_WIDE_INT h1, hv;
605 if (GET_CODE (trueop) == CONST_DOUBLE)
606 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
607 else
608 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
610 switch (code)
612 case NOT:
613 lv = ~ l1;
614 hv = ~ h1;
615 break;
617 case NEG:
618 neg_double (l1, h1, &lv, &hv);
619 break;
621 case ABS:
622 if (h1 < 0)
623 neg_double (l1, h1, &lv, &hv);
624 else
625 lv = l1, hv = h1;
626 break;
628 case FFS:
629 hv = 0;
630 if (l1 == 0)
632 if (h1 == 0)
633 lv = 0;
634 else
635 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
637 else
638 lv = exact_log2 (l1 & -l1) + 1;
639 break;
641 case CLZ:
642 hv = 0;
643 if (h1 != 0)
644 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
645 - HOST_BITS_PER_WIDE_INT;
646 else if (l1 != 0)
647 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
648 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
649 lv = GET_MODE_BITSIZE (mode);
650 break;
652 case CTZ:
653 hv = 0;
654 if (l1 != 0)
655 lv = exact_log2 (l1 & -l1);
656 else if (h1 != 0)
657 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
658 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
659 lv = GET_MODE_BITSIZE (mode);
660 break;
662 case POPCOUNT:
663 hv = 0;
664 lv = 0;
665 while (l1)
666 lv++, l1 &= l1 - 1;
667 while (h1)
668 lv++, h1 &= h1 - 1;
669 break;
671 case PARITY:
672 hv = 0;
673 lv = 0;
674 while (l1)
675 lv++, l1 &= l1 - 1;
676 while (h1)
677 lv++, h1 &= h1 - 1;
678 lv &= 1;
679 break;
681 case TRUNCATE:
682 /* This is just a change-of-mode, so do nothing. */
683 lv = l1, hv = h1;
684 break;
686 case ZERO_EXTEND:
687 gcc_assert (op_mode != VOIDmode);
689 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
690 return 0;
692 hv = 0;
693 lv = l1 & GET_MODE_MASK (op_mode);
694 break;
696 case SIGN_EXTEND:
697 if (op_mode == VOIDmode
698 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
699 return 0;
700 else
702 lv = l1 & GET_MODE_MASK (op_mode);
703 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
704 && (lv & ((HOST_WIDE_INT) 1
705 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
706 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
708 hv = HWI_SIGN_EXTEND (lv);
710 break;
712 case SQRT:
713 return 0;
715 default:
716 return 0;
719 return immed_double_const (lv, hv, mode);
722 else if (GET_CODE (trueop) == CONST_DOUBLE
723 && GET_MODE_CLASS (mode) == MODE_FLOAT)
725 REAL_VALUE_TYPE d, t;
726 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
728 switch (code)
730 case SQRT:
731 if (HONOR_SNANS (mode) && real_isnan (&d))
732 return 0;
733 real_sqrt (&t, mode, &d);
734 d = t;
735 break;
736 case ABS:
737 d = REAL_VALUE_ABS (d);
738 break;
739 case NEG:
740 d = REAL_VALUE_NEGATE (d);
741 break;
742 case FLOAT_TRUNCATE:
743 d = real_value_truncate (mode, d);
744 break;
745 case FLOAT_EXTEND:
746 /* All this does is change the mode. */
747 break;
748 case FIX:
749 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
750 break;
751 case NOT:
753 long tmp[4];
754 int i;
756 real_to_target (tmp, &d, GET_MODE (trueop));
757 for (i = 0; i < 4; i++)
758 tmp[i] = ~tmp[i];
759 real_from_target (&d, tmp, mode);
761 default:
762 gcc_unreachable ();
764 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
767 else if (GET_CODE (trueop) == CONST_DOUBLE
768 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
769 && GET_MODE_CLASS (mode) == MODE_INT
770 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
772 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
773 operators are intentionally left unspecified (to ease implementation
774 by target backends), for consistency, this routine implements the
775 same semantics for constant folding as used by the middle-end. */
777 HOST_WIDE_INT xh, xl, th, tl;
778 REAL_VALUE_TYPE x, t;
779 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
780 switch (code)
782 case FIX:
783 if (REAL_VALUE_ISNAN (x))
784 return const0_rtx;
786 /* Test against the signed upper bound. */
787 if (width > HOST_BITS_PER_WIDE_INT)
789 th = ((unsigned HOST_WIDE_INT) 1
790 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
791 tl = -1;
793 else
795 th = 0;
796 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
798 real_from_integer (&t, VOIDmode, tl, th, 0);
799 if (REAL_VALUES_LESS (t, x))
801 xh = th;
802 xl = tl;
803 break;
806 /* Test against the signed lower bound. */
807 if (width > HOST_BITS_PER_WIDE_INT)
809 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
810 tl = 0;
812 else
814 th = -1;
815 tl = (HOST_WIDE_INT) -1 << (width - 1);
817 real_from_integer (&t, VOIDmode, tl, th, 0);
818 if (REAL_VALUES_LESS (x, t))
820 xh = th;
821 xl = tl;
822 break;
824 REAL_VALUE_TO_INT (&xl, &xh, x);
825 break;
827 case UNSIGNED_FIX:
828 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
829 return const0_rtx;
831 /* Test against the unsigned upper bound. */
832 if (width == 2*HOST_BITS_PER_WIDE_INT)
834 th = -1;
835 tl = -1;
837 else if (width >= HOST_BITS_PER_WIDE_INT)
839 th = ((unsigned HOST_WIDE_INT) 1
840 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
841 tl = -1;
843 else
845 th = 0;
846 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
848 real_from_integer (&t, VOIDmode, tl, th, 1);
849 if (REAL_VALUES_LESS (t, x))
851 xh = th;
852 xl = tl;
853 break;
856 REAL_VALUE_TO_INT (&xl, &xh, x);
857 break;
859 default:
860 gcc_unreachable ();
862 return immed_double_const (xl, xh, mode);
865 /* This was formerly used only for non-IEEE float.
866 eggert@twinsun.com says it is safe for IEEE also. */
867 else
869 enum rtx_code reversed;
870 rtx temp;
872 /* There are some simplifications we can do even if the operands
873 aren't constant. */
874 switch (code)
876 case NOT:
877 /* (not (not X)) == X. */
878 if (GET_CODE (op) == NOT)
879 return XEXP (op, 0);
881 /* (not (eq X Y)) == (ne X Y), etc. */
882 if (COMPARISON_P (op)
883 && (mode == BImode || STORE_FLAG_VALUE == -1)
884 && ((reversed = reversed_comparison_code (op, NULL_RTX))
885 != UNKNOWN))
886 return simplify_gen_relational (reversed, mode, VOIDmode,
887 XEXP (op, 0), XEXP (op, 1));
889 /* (not (plus X -1)) can become (neg X). */
890 if (GET_CODE (op) == PLUS
891 && XEXP (op, 1) == constm1_rtx)
892 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
894 /* Similarly, (not (neg X)) is (plus X -1). */
895 if (GET_CODE (op) == NEG)
896 return plus_constant (XEXP (op, 0), -1);
898 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
899 if (GET_CODE (op) == XOR
900 && GET_CODE (XEXP (op, 1)) == CONST_INT
901 && (temp = simplify_unary_operation (NOT, mode,
902 XEXP (op, 1),
903 mode)) != 0)
904 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
906 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
907 if (GET_CODE (op) == PLUS
908 && GET_CODE (XEXP (op, 1)) == CONST_INT
909 && mode_signbit_p (mode, XEXP (op, 1))
910 && (temp = simplify_unary_operation (NOT, mode,
911 XEXP (op, 1),
912 mode)) != 0)
913 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
917 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
918 operands other than 1, but that is not valid. We could do a
919 similar simplification for (not (lshiftrt C X)) where C is
920 just the sign bit, but this doesn't seem common enough to
921 bother with. */
922 if (GET_CODE (op) == ASHIFT
923 && XEXP (op, 0) == const1_rtx)
925 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
926 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
929 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
930 by reversing the comparison code if valid. */
931 if (STORE_FLAG_VALUE == -1
932 && COMPARISON_P (op)
933 && (reversed = reversed_comparison_code (op, NULL_RTX))
934 != UNKNOWN)
935 return simplify_gen_relational (reversed, mode, VOIDmode,
936 XEXP (op, 0), XEXP (op, 1));
938 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
939 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
940 so we can perform the above simplification. */
942 if (STORE_FLAG_VALUE == -1
943 && GET_CODE (op) == ASHIFTRT
944 && GET_CODE (XEXP (op, 1)) == CONST_INT
945 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
946 return simplify_gen_relational (GE, mode, VOIDmode,
947 XEXP (op, 0), const0_rtx);
949 break;
951 case NEG:
952 /* (neg (neg X)) == X. */
953 if (GET_CODE (op) == NEG)
954 return XEXP (op, 0);
956 /* (neg (plus X 1)) can become (not X). */
957 if (GET_CODE (op) == PLUS
958 && XEXP (op, 1) == const1_rtx)
959 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
961 /* Similarly, (neg (not X)) is (plus X 1). */
962 if (GET_CODE (op) == NOT)
963 return plus_constant (XEXP (op, 0), 1);
965 /* (neg (minus X Y)) can become (minus Y X). This transformation
966 isn't safe for modes with signed zeros, since if X and Y are
967 both +0, (minus Y X) is the same as (minus X Y). If the
968 rounding mode is towards +infinity (or -infinity) then the two
969 expressions will be rounded differently. */
970 if (GET_CODE (op) == MINUS
971 && !HONOR_SIGNED_ZEROS (mode)
972 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
973 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
974 XEXP (op, 0));
976 if (GET_CODE (op) == PLUS
977 && !HONOR_SIGNED_ZEROS (mode)
978 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
980 /* (neg (plus A C)) is simplified to (minus -C A). */
981 if (GET_CODE (XEXP (op, 1)) == CONST_INT
982 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
984 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
985 mode);
986 if (temp)
987 return simplify_gen_binary (MINUS, mode, temp,
988 XEXP (op, 0));
991 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
992 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
993 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
996 /* (neg (mult A B)) becomes (mult (neg A) B).
997 This works even for floating-point values. */
998 if (GET_CODE (op) == MULT
999 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1001 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1002 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1005 /* NEG commutes with ASHIFT since it is multiplication. Only do
1006 this if we can then eliminate the NEG (e.g., if the operand
1007 is a constant). */
1008 if (GET_CODE (op) == ASHIFT)
1010 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1011 mode);
1012 if (temp)
1013 return simplify_gen_binary (ASHIFT, mode, temp,
1014 XEXP (op, 1));
1017 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1018 C is equal to the width of MODE minus 1. */
1019 if (GET_CODE (op) == ASHIFTRT
1020 && GET_CODE (XEXP (op, 1)) == CONST_INT
1021 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1022 return simplify_gen_binary (LSHIFTRT, mode,
1023 XEXP (op, 0), XEXP (op, 1));
1025 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1026 C is equal to the width of MODE minus 1. */
1027 if (GET_CODE (op) == LSHIFTRT
1028 && GET_CODE (XEXP (op, 1)) == CONST_INT
1029 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1030 return simplify_gen_binary (ASHIFTRT, mode,
1031 XEXP (op, 0), XEXP (op, 1));
1033 break;
1035 case SIGN_EXTEND:
1036 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1037 becomes just the MINUS if its mode is MODE. This allows
1038 folding switch statements on machines using casesi (such as
1039 the VAX). */
1040 if (GET_CODE (op) == TRUNCATE
1041 && GET_MODE (XEXP (op, 0)) == mode
1042 && GET_CODE (XEXP (op, 0)) == MINUS
1043 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1044 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1045 return XEXP (op, 0);
1047 /* Check for a sign extension of a subreg of a promoted
1048 variable, where the promotion is sign-extended, and the
1049 target mode is the same as the variable's promotion. */
1050 if (GET_CODE (op) == SUBREG
1051 && SUBREG_PROMOTED_VAR_P (op)
1052 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1053 && GET_MODE (XEXP (op, 0)) == mode)
1054 return XEXP (op, 0);
1056 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1057 if (! POINTERS_EXTEND_UNSIGNED
1058 && mode == Pmode && GET_MODE (op) == ptr_mode
1059 && (CONSTANT_P (op)
1060 || (GET_CODE (op) == SUBREG
1061 && REG_P (SUBREG_REG (op))
1062 && REG_POINTER (SUBREG_REG (op))
1063 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1064 return convert_memory_address (Pmode, op);
1065 #endif
1066 break;
1068 case ZERO_EXTEND:
1069 /* Check for a zero extension of a subreg of a promoted
1070 variable, where the promotion is zero-extended, and the
1071 target mode is the same as the variable's promotion. */
1072 if (GET_CODE (op) == SUBREG
1073 && SUBREG_PROMOTED_VAR_P (op)
1074 && SUBREG_PROMOTED_UNSIGNED_P (op)
1075 && GET_MODE (XEXP (op, 0)) == mode)
1076 return XEXP (op, 0);
1078 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1079 if (POINTERS_EXTEND_UNSIGNED > 0
1080 && mode == Pmode && GET_MODE (op) == ptr_mode
1081 && (CONSTANT_P (op)
1082 || (GET_CODE (op) == SUBREG
1083 && REG_P (SUBREG_REG (op))
1084 && REG_POINTER (SUBREG_REG (op))
1085 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1086 return convert_memory_address (Pmode, op);
1087 #endif
1088 break;
1090 default:
1091 break;
1094 return 0;
1098 /* Subroutine of simplify_binary_operation to simplify a commutative,
1099 associative binary operation CODE with result mode MODE, operating
1100 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1101 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1102 canonicalization is possible. */
1104 static rtx
1105 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1106 rtx op0, rtx op1)
1108 rtx tem;
1110 /* Linearize the operator to the left. */
1111 if (GET_CODE (op1) == code)
1113 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1114 if (GET_CODE (op0) == code)
1116 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1117 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1120 /* "a op (b op c)" becomes "(b op c) op a". */
1121 if (! swap_commutative_operands_p (op1, op0))
1122 return simplify_gen_binary (code, mode, op1, op0);
1124 tem = op0;
1125 op0 = op1;
1126 op1 = tem;
1129 if (GET_CODE (op0) == code)
1131 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1132 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1134 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1135 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1138 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1139 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1140 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1141 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1142 if (tem != 0)
1143 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1145 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1146 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1147 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1148 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1149 if (tem != 0)
1150 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1153 return 0;
1156 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1157 and OP1. Return 0 if no simplification is possible.
1159 Don't use this for relational operations such as EQ or LT.
1160 Use simplify_relational_operation instead. */
1162 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1163 rtx op0, rtx op1)
1165 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1166 HOST_WIDE_INT val;
1167 unsigned int width = GET_MODE_BITSIZE (mode);
1168 rtx trueop0, trueop1;
1169 rtx tem;
1171 /* Relational operations don't work here. We must know the mode
1172 of the operands in order to do the comparison correctly.
1173 Assuming a full word can give incorrect results.
1174 Consider comparing 128 with -128 in QImode. */
1175 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1176 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1178 /* Make sure the constant is second. */
1179 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1180 && swap_commutative_operands_p (op0, op1))
1182 tem = op0, op0 = op1, op1 = tem;
1185 trueop0 = avoid_constant_pool_reference (op0);
1186 trueop1 = avoid_constant_pool_reference (op1);
1188 if (VECTOR_MODE_P (mode)
1189 && code != VEC_CONCAT
1190 && GET_CODE (trueop0) == CONST_VECTOR
1191 && GET_CODE (trueop1) == CONST_VECTOR)
1193 unsigned n_elts = GET_MODE_NUNITS (mode);
1194 enum machine_mode op0mode = GET_MODE (trueop0);
1195 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
1196 enum machine_mode op1mode = GET_MODE (trueop1);
1197 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
1198 rtvec v = rtvec_alloc (n_elts);
1199 unsigned int i;
1201 gcc_assert (op0_n_elts == n_elts);
1202 gcc_assert (op1_n_elts == n_elts);
1203 for (i = 0; i < n_elts; i++)
1205 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1206 CONST_VECTOR_ELT (trueop0, i),
1207 CONST_VECTOR_ELT (trueop1, i));
1208 if (!x)
1209 return 0;
1210 RTVEC_ELT (v, i) = x;
1213 return gen_rtx_CONST_VECTOR (mode, v);
1216 if (VECTOR_MODE_P (mode)
1217 && code == VEC_CONCAT
1218 && CONSTANT_P (trueop0) && CONSTANT_P (trueop1))
1220 unsigned n_elts = GET_MODE_NUNITS (mode);
1221 rtvec v = rtvec_alloc (n_elts);
1223 gcc_assert (n_elts >= 2);
1224 if (n_elts == 2)
1226 gcc_assert (GET_CODE (trueop0) != CONST_VECTOR);
1227 gcc_assert (GET_CODE (trueop1) != CONST_VECTOR);
1229 RTVEC_ELT (v, 0) = trueop0;
1230 RTVEC_ELT (v, 1) = trueop1;
1232 else
1234 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (trueop0));
1235 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (trueop1));
1236 unsigned i;
1238 gcc_assert (GET_CODE (trueop0) == CONST_VECTOR);
1239 gcc_assert (GET_CODE (trueop1) == CONST_VECTOR);
1240 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
1242 for (i = 0; i < op0_n_elts; ++i)
1243 RTVEC_ELT (v, i) = XVECEXP (trueop0, 0, i);
1244 for (i = 0; i < op1_n_elts; ++i)
1245 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (trueop1, 0, i);
1248 return gen_rtx_CONST_VECTOR (mode, v);
1251 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1252 && GET_CODE (trueop0) == CONST_DOUBLE
1253 && GET_CODE (trueop1) == CONST_DOUBLE
1254 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1256 if (code == AND
1257 || code == IOR
1258 || code == XOR)
1260 long tmp0[4];
1261 long tmp1[4];
1262 REAL_VALUE_TYPE r;
1263 int i;
1265 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1266 GET_MODE (op0));
1267 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1268 GET_MODE (op1));
1269 for (i = 0; i < 4; i++)
1271 switch (code)
1273 case AND:
1274 tmp0[i] &= tmp1[i];
1275 break;
1276 case IOR:
1277 tmp0[i] |= tmp1[i];
1278 break;
1279 case XOR:
1280 tmp0[i] ^= tmp1[i];
1281 break;
1282 default:
1283 gcc_unreachable ();
1286 real_from_target (&r, tmp0, mode);
1287 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1289 else
1291 REAL_VALUE_TYPE f0, f1, value;
1293 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1294 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1295 f0 = real_value_truncate (mode, f0);
1296 f1 = real_value_truncate (mode, f1);
1298 if (HONOR_SNANS (mode)
1299 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1300 return 0;
1302 if (code == DIV
1303 && REAL_VALUES_EQUAL (f1, dconst0)
1304 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1305 return 0;
1307 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1308 && flag_trapping_math
1309 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
1311 int s0 = REAL_VALUE_NEGATIVE (f0);
1312 int s1 = REAL_VALUE_NEGATIVE (f1);
1314 switch (code)
1316 case PLUS:
1317 /* Inf + -Inf = NaN plus exception. */
1318 if (s0 != s1)
1319 return 0;
1320 break;
1321 case MINUS:
1322 /* Inf - Inf = NaN plus exception. */
1323 if (s0 == s1)
1324 return 0;
1325 break;
1326 case DIV:
1327 /* Inf / Inf = NaN plus exception. */
1328 return 0;
1329 default:
1330 break;
1334 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1335 && flag_trapping_math
1336 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
1337 || (REAL_VALUE_ISINF (f1)
1338 && REAL_VALUES_EQUAL (f0, dconst0))))
1339 /* Inf * 0 = NaN plus exception. */
1340 return 0;
1342 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1344 value = real_value_truncate (mode, value);
1345 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1349 /* We can fold some multi-word operations. */
1350 if (GET_MODE_CLASS (mode) == MODE_INT
1351 && width == HOST_BITS_PER_WIDE_INT * 2
1352 && (GET_CODE (trueop0) == CONST_DOUBLE
1353 || GET_CODE (trueop0) == CONST_INT)
1354 && (GET_CODE (trueop1) == CONST_DOUBLE
1355 || GET_CODE (trueop1) == CONST_INT))
1357 unsigned HOST_WIDE_INT l1, l2, lv, lt;
1358 HOST_WIDE_INT h1, h2, hv, ht;
1360 if (GET_CODE (trueop0) == CONST_DOUBLE)
1361 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1362 else
1363 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1365 if (GET_CODE (trueop1) == CONST_DOUBLE)
1366 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1367 else
1368 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1370 switch (code)
1372 case MINUS:
1373 /* A - B == A + (-B). */
1374 neg_double (l2, h2, &lv, &hv);
1375 l2 = lv, h2 = hv;
1377 /* Fall through.... */
1379 case PLUS:
1380 add_double (l1, h1, l2, h2, &lv, &hv);
1381 break;
1383 case MULT:
1384 mul_double (l1, h1, l2, h2, &lv, &hv);
1385 break;
1387 case DIV:
1388 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1389 &lv, &hv, &lt, &ht))
1390 return 0;
1391 break;
1393 case MOD:
1394 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1395 &lt, &ht, &lv, &hv))
1396 return 0;
1397 break;
1399 case UDIV:
1400 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1401 &lv, &hv, &lt, &ht))
1402 return 0;
1403 break;
1405 case UMOD:
1406 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1407 &lt, &ht, &lv, &hv))
1408 return 0;
1409 break;
1411 case AND:
1412 lv = l1 & l2, hv = h1 & h2;
1413 break;
1415 case IOR:
1416 lv = l1 | l2, hv = h1 | h2;
1417 break;
1419 case XOR:
1420 lv = l1 ^ l2, hv = h1 ^ h2;
1421 break;
1423 case SMIN:
1424 if (h1 < h2
1425 || (h1 == h2
1426 && ((unsigned HOST_WIDE_INT) l1
1427 < (unsigned HOST_WIDE_INT) l2)))
1428 lv = l1, hv = h1;
1429 else
1430 lv = l2, hv = h2;
1431 break;
1433 case SMAX:
1434 if (h1 > h2
1435 || (h1 == h2
1436 && ((unsigned HOST_WIDE_INT) l1
1437 > (unsigned HOST_WIDE_INT) l2)))
1438 lv = l1, hv = h1;
1439 else
1440 lv = l2, hv = h2;
1441 break;
1443 case UMIN:
1444 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1445 || (h1 == h2
1446 && ((unsigned HOST_WIDE_INT) l1
1447 < (unsigned HOST_WIDE_INT) l2)))
1448 lv = l1, hv = h1;
1449 else
1450 lv = l2, hv = h2;
1451 break;
1453 case UMAX:
1454 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1455 || (h1 == h2
1456 && ((unsigned HOST_WIDE_INT) l1
1457 > (unsigned HOST_WIDE_INT) l2)))
1458 lv = l1, hv = h1;
1459 else
1460 lv = l2, hv = h2;
1461 break;
1463 case LSHIFTRT: case ASHIFTRT:
1464 case ASHIFT:
1465 case ROTATE: case ROTATERT:
1466 if (SHIFT_COUNT_TRUNCATED)
1467 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1469 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1470 return 0;
1472 if (code == LSHIFTRT || code == ASHIFTRT)
1473 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1474 code == ASHIFTRT);
1475 else if (code == ASHIFT)
1476 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1477 else if (code == ROTATE)
1478 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1479 else /* code == ROTATERT */
1480 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1481 break;
1483 default:
1484 return 0;
1487 return immed_double_const (lv, hv, mode);
1490 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1491 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1493 /* Even if we can't compute a constant result,
1494 there are some cases worth simplifying. */
1496 switch (code)
1498 case PLUS:
1499 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1500 when x is NaN, infinite, or finite and nonzero. They aren't
1501 when x is -0 and the rounding mode is not towards -infinity,
1502 since (-0) + 0 is then 0. */
1503 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1504 return op0;
1506 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1507 transformations are safe even for IEEE. */
1508 if (GET_CODE (op0) == NEG)
1509 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1510 else if (GET_CODE (op1) == NEG)
1511 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1513 /* (~a) + 1 -> -a */
1514 if (INTEGRAL_MODE_P (mode)
1515 && GET_CODE (op0) == NOT
1516 && trueop1 == const1_rtx)
1517 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1519 /* Handle both-operands-constant cases. We can only add
1520 CONST_INTs to constants since the sum of relocatable symbols
1521 can't be handled by most assemblers. Don't add CONST_INT
1522 to CONST_INT since overflow won't be computed properly if wider
1523 than HOST_BITS_PER_WIDE_INT. */
1525 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1526 && GET_CODE (op1) == CONST_INT)
1527 return plus_constant (op0, INTVAL (op1));
1528 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1529 && GET_CODE (op0) == CONST_INT)
1530 return plus_constant (op1, INTVAL (op0));
1532 /* See if this is something like X * C - X or vice versa or
1533 if the multiplication is written as a shift. If so, we can
1534 distribute and make a new multiply, shift, or maybe just
1535 have X (if C is 2 in the example above). But don't make
1536 something more expensive than we had before. */
1538 if (! FLOAT_MODE_P (mode))
1540 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1541 rtx lhs = op0, rhs = op1;
1543 if (GET_CODE (lhs) == NEG)
1544 coeff0 = -1, lhs = XEXP (lhs, 0);
1545 else if (GET_CODE (lhs) == MULT
1546 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1548 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1550 else if (GET_CODE (lhs) == ASHIFT
1551 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1552 && INTVAL (XEXP (lhs, 1)) >= 0
1553 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1555 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1556 lhs = XEXP (lhs, 0);
1559 if (GET_CODE (rhs) == NEG)
1560 coeff1 = -1, rhs = XEXP (rhs, 0);
1561 else if (GET_CODE (rhs) == MULT
1562 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1564 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1566 else if (GET_CODE (rhs) == ASHIFT
1567 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1568 && INTVAL (XEXP (rhs, 1)) >= 0
1569 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1571 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1572 rhs = XEXP (rhs, 0);
1575 if (rtx_equal_p (lhs, rhs))
1577 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1578 tem = simplify_gen_binary (MULT, mode, lhs,
1579 GEN_INT (coeff0 + coeff1));
1580 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1581 ? tem : 0;
1585 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1586 if ((GET_CODE (op1) == CONST_INT
1587 || GET_CODE (op1) == CONST_DOUBLE)
1588 && GET_CODE (op0) == XOR
1589 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1590 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1591 && mode_signbit_p (mode, op1))
1592 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1593 simplify_gen_binary (XOR, mode, op1,
1594 XEXP (op0, 1)));
1596 /* If one of the operands is a PLUS or a MINUS, see if we can
1597 simplify this by the associative law.
1598 Don't use the associative law for floating point.
1599 The inaccuracy makes it nonassociative,
1600 and subtle programs can break if operations are associated. */
1602 if (INTEGRAL_MODE_P (mode)
1603 && (plus_minus_operand_p (op0)
1604 || plus_minus_operand_p (op1))
1605 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1606 return tem;
1608 /* Reassociate floating point addition only when the user
1609 specifies unsafe math optimizations. */
1610 if (FLOAT_MODE_P (mode)
1611 && flag_unsafe_math_optimizations)
1613 tem = simplify_associative_operation (code, mode, op0, op1);
1614 if (tem)
1615 return tem;
1617 break;
1619 case COMPARE:
1620 #ifdef HAVE_cc0
1621 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1622 using cc0, in which case we want to leave it as a COMPARE
1623 so we can distinguish it from a register-register-copy.
1625 In IEEE floating point, x-0 is not the same as x. */
1627 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1628 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1629 && trueop1 == CONST0_RTX (mode))
1630 return op0;
1631 #endif
1633 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1634 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1635 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1636 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1638 rtx xop00 = XEXP (op0, 0);
1639 rtx xop10 = XEXP (op1, 0);
1641 #ifdef HAVE_cc0
1642 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1643 #else
1644 if (REG_P (xop00) && REG_P (xop10)
1645 && GET_MODE (xop00) == GET_MODE (xop10)
1646 && REGNO (xop00) == REGNO (xop10)
1647 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1648 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1649 #endif
1650 return xop00;
1652 break;
1654 case MINUS:
1655 /* We can't assume x-x is 0 even with non-IEEE floating point,
1656 but since it is zero except in very strange circumstances, we
1657 will treat it as zero with -funsafe-math-optimizations. */
1658 if (rtx_equal_p (trueop0, trueop1)
1659 && ! side_effects_p (op0)
1660 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1661 return CONST0_RTX (mode);
1663 /* Change subtraction from zero into negation. (0 - x) is the
1664 same as -x when x is NaN, infinite, or finite and nonzero.
1665 But if the mode has signed zeros, and does not round towards
1666 -infinity, then 0 - 0 is 0, not -0. */
1667 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1668 return simplify_gen_unary (NEG, mode, op1, mode);
1670 /* (-1 - a) is ~a. */
1671 if (trueop0 == constm1_rtx)
1672 return simplify_gen_unary (NOT, mode, op1, mode);
1674 /* Subtracting 0 has no effect unless the mode has signed zeros
1675 and supports rounding towards -infinity. In such a case,
1676 0 - 0 is -0. */
1677 if (!(HONOR_SIGNED_ZEROS (mode)
1678 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1679 && trueop1 == CONST0_RTX (mode))
1680 return op0;
1682 /* See if this is something like X * C - X or vice versa or
1683 if the multiplication is written as a shift. If so, we can
1684 distribute and make a new multiply, shift, or maybe just
1685 have X (if C is 2 in the example above). But don't make
1686 something more expensive than we had before. */
1688 if (! FLOAT_MODE_P (mode))
1690 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1691 rtx lhs = op0, rhs = op1;
1693 if (GET_CODE (lhs) == NEG)
1694 coeff0 = -1, lhs = XEXP (lhs, 0);
1695 else if (GET_CODE (lhs) == MULT
1696 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1698 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1700 else if (GET_CODE (lhs) == ASHIFT
1701 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1702 && INTVAL (XEXP (lhs, 1)) >= 0
1703 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1705 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1706 lhs = XEXP (lhs, 0);
1709 if (GET_CODE (rhs) == NEG)
1710 coeff1 = - 1, rhs = XEXP (rhs, 0);
1711 else if (GET_CODE (rhs) == MULT
1712 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1714 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1716 else if (GET_CODE (rhs) == ASHIFT
1717 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1718 && INTVAL (XEXP (rhs, 1)) >= 0
1719 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1721 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1722 rhs = XEXP (rhs, 0);
1725 if (rtx_equal_p (lhs, rhs))
1727 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1728 tem = simplify_gen_binary (MULT, mode, lhs,
1729 GEN_INT (coeff0 - coeff1));
1730 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1731 ? tem : 0;
1735 /* (a - (-b)) -> (a + b). True even for IEEE. */
1736 if (GET_CODE (op1) == NEG)
1737 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1739 /* (-x - c) may be simplified as (-c - x). */
1740 if (GET_CODE (op0) == NEG
1741 && (GET_CODE (op1) == CONST_INT
1742 || GET_CODE (op1) == CONST_DOUBLE))
1744 tem = simplify_unary_operation (NEG, mode, op1, mode);
1745 if (tem)
1746 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1749 /* If one of the operands is a PLUS or a MINUS, see if we can
1750 simplify this by the associative law.
1751 Don't use the associative law for floating point.
1752 The inaccuracy makes it nonassociative,
1753 and subtle programs can break if operations are associated. */
1755 if (INTEGRAL_MODE_P (mode)
1756 && (plus_minus_operand_p (op0)
1757 || plus_minus_operand_p (op1))
1758 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1759 return tem;
1761 /* Don't let a relocatable value get a negative coeff. */
1762 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1763 return simplify_gen_binary (PLUS, mode,
1764 op0,
1765 neg_const_int (mode, op1));
1767 /* (x - (x & y)) -> (x & ~y) */
1768 if (GET_CODE (op1) == AND)
1770 if (rtx_equal_p (op0, XEXP (op1, 0)))
1772 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1773 GET_MODE (XEXP (op1, 1)));
1774 return simplify_gen_binary (AND, mode, op0, tem);
1776 if (rtx_equal_p (op0, XEXP (op1, 1)))
1778 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1779 GET_MODE (XEXP (op1, 0)));
1780 return simplify_gen_binary (AND, mode, op0, tem);
1783 break;
1785 case MULT:
1786 if (trueop1 == constm1_rtx)
1787 return simplify_gen_unary (NEG, mode, op0, mode);
1789 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1790 x is NaN, since x * 0 is then also NaN. Nor is it valid
1791 when the mode has signed zeros, since multiplying a negative
1792 number by 0 will give -0, not 0. */
1793 if (!HONOR_NANS (mode)
1794 && !HONOR_SIGNED_ZEROS (mode)
1795 && trueop1 == CONST0_RTX (mode)
1796 && ! side_effects_p (op0))
1797 return op1;
1799 /* In IEEE floating point, x*1 is not equivalent to x for
1800 signalling NaNs. */
1801 if (!HONOR_SNANS (mode)
1802 && trueop1 == CONST1_RTX (mode))
1803 return op0;
1805 /* Convert multiply by constant power of two into shift unless
1806 we are still generating RTL. This test is a kludge. */
1807 if (GET_CODE (trueop1) == CONST_INT
1808 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1809 /* If the mode is larger than the host word size, and the
1810 uppermost bit is set, then this isn't a power of two due
1811 to implicit sign extension. */
1812 && (width <= HOST_BITS_PER_WIDE_INT
1813 || val != HOST_BITS_PER_WIDE_INT - 1))
1814 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1816 /* x*2 is x+x and x*(-1) is -x */
1817 if (GET_CODE (trueop1) == CONST_DOUBLE
1818 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1819 && GET_MODE (op0) == mode)
1821 REAL_VALUE_TYPE d;
1822 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1824 if (REAL_VALUES_EQUAL (d, dconst2))
1825 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1827 if (REAL_VALUES_EQUAL (d, dconstm1))
1828 return simplify_gen_unary (NEG, mode, op0, mode);
1831 /* Reassociate multiplication, but for floating point MULTs
1832 only when the user specifies unsafe math optimizations. */
1833 if (! FLOAT_MODE_P (mode)
1834 || flag_unsafe_math_optimizations)
1836 tem = simplify_associative_operation (code, mode, op0, op1);
1837 if (tem)
1838 return tem;
1840 break;
1842 case IOR:
1843 if (trueop1 == const0_rtx)
1844 return op0;
1845 if (GET_CODE (trueop1) == CONST_INT
1846 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1847 == GET_MODE_MASK (mode)))
1848 return op1;
1849 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1850 return op0;
1851 /* A | (~A) -> -1 */
1852 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1853 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1854 && ! side_effects_p (op0)
1855 && GET_MODE_CLASS (mode) != MODE_CC)
1856 return constm1_rtx;
1857 tem = simplify_associative_operation (code, mode, op0, op1);
1858 if (tem)
1859 return tem;
1860 break;
1862 case XOR:
1863 if (trueop1 == const0_rtx)
1864 return op0;
1865 if (GET_CODE (trueop1) == CONST_INT
1866 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1867 == GET_MODE_MASK (mode)))
1868 return simplify_gen_unary (NOT, mode, op0, mode);
1869 if (trueop0 == trueop1
1870 && ! side_effects_p (op0)
1871 && GET_MODE_CLASS (mode) != MODE_CC)
1872 return const0_rtx;
1874 /* Canonicalize XOR of the most significant bit to PLUS. */
1875 if ((GET_CODE (op1) == CONST_INT
1876 || GET_CODE (op1) == CONST_DOUBLE)
1877 && mode_signbit_p (mode, op1))
1878 return simplify_gen_binary (PLUS, mode, op0, op1);
1879 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1880 if ((GET_CODE (op1) == CONST_INT
1881 || GET_CODE (op1) == CONST_DOUBLE)
1882 && GET_CODE (op0) == PLUS
1883 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1884 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1885 && mode_signbit_p (mode, XEXP (op0, 1)))
1886 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1887 simplify_gen_binary (XOR, mode, op1,
1888 XEXP (op0, 1)));
1890 tem = simplify_associative_operation (code, mode, op0, op1);
1891 if (tem)
1892 return tem;
1893 break;
1895 case AND:
1896 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1897 return const0_rtx;
1898 /* If we are turning off bits already known off in OP0, we need
1899 not do an AND. */
1900 if (GET_CODE (trueop1) == CONST_INT
1901 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1902 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1903 return op0;
1904 if (trueop0 == trueop1 && ! side_effects_p (op0)
1905 && GET_MODE_CLASS (mode) != MODE_CC)
1906 return op0;
1907 /* A & (~A) -> 0 */
1908 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1909 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1910 && ! side_effects_p (op0)
1911 && GET_MODE_CLASS (mode) != MODE_CC)
1912 return const0_rtx;
1914 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
1915 there are no non-zero bits of C outside of X's mode. */
1916 if ((GET_CODE (op0) == SIGN_EXTEND
1917 || GET_CODE (op0) == ZERO_EXTEND)
1918 && GET_CODE (trueop1) == CONST_INT
1919 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1920 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
1921 & INTVAL (trueop1)) == 0)
1923 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
1924 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
1925 gen_int_mode (INTVAL (trueop1),
1926 imode));
1927 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
1930 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1931 ((A & N) + B) & M -> (A + B) & M
1932 Similarly if (N & M) == 0,
1933 ((A | N) + B) & M -> (A + B) & M
1934 and for - instead of + and/or ^ instead of |. */
1935 if (GET_CODE (trueop1) == CONST_INT
1936 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1937 && ~INTVAL (trueop1)
1938 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
1939 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
1941 rtx pmop[2];
1942 int which;
1944 pmop[0] = XEXP (op0, 0);
1945 pmop[1] = XEXP (op0, 1);
1947 for (which = 0; which < 2; which++)
1949 tem = pmop[which];
1950 switch (GET_CODE (tem))
1952 case AND:
1953 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1954 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
1955 == INTVAL (trueop1))
1956 pmop[which] = XEXP (tem, 0);
1957 break;
1958 case IOR:
1959 case XOR:
1960 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1961 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
1962 pmop[which] = XEXP (tem, 0);
1963 break;
1964 default:
1965 break;
1969 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
1971 tem = simplify_gen_binary (GET_CODE (op0), mode,
1972 pmop[0], pmop[1]);
1973 return simplify_gen_binary (code, mode, tem, op1);
1976 tem = simplify_associative_operation (code, mode, op0, op1);
1977 if (tem)
1978 return tem;
1979 break;
1981 case UDIV:
1982 /* 0/x is 0 (or x&0 if x has side-effects). */
1983 if (trueop0 == const0_rtx)
1984 return side_effects_p (op1)
1985 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1986 : const0_rtx;
1987 /* x/1 is x. */
1988 if (trueop1 == const1_rtx)
1990 /* Handle narrowing UDIV. */
1991 rtx x = gen_lowpart_common (mode, op0);
1992 if (x)
1993 return x;
1994 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1995 return gen_lowpart_SUBREG (mode, op0);
1996 return op0;
1998 /* Convert divide by power of two into shift. */
1999 if (GET_CODE (trueop1) == CONST_INT
2000 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
2001 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
2002 break;
2004 case DIV:
2005 /* Handle floating point and integers separately. */
2006 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2008 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2009 safe for modes with NaNs, since 0.0 / 0.0 will then be
2010 NaN rather than 0.0. Nor is it safe for modes with signed
2011 zeros, since dividing 0 by a negative number gives -0.0 */
2012 if (trueop0 == CONST0_RTX (mode)
2013 && !HONOR_NANS (mode)
2014 && !HONOR_SIGNED_ZEROS (mode)
2015 && ! side_effects_p (op1))
2016 return op0;
2017 /* x/1.0 is x. */
2018 if (trueop1 == CONST1_RTX (mode)
2019 && !HONOR_SNANS (mode))
2020 return op0;
2022 if (GET_CODE (trueop1) == CONST_DOUBLE
2023 && trueop1 != CONST0_RTX (mode))
2025 REAL_VALUE_TYPE d;
2026 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2028 /* x/-1.0 is -x. */
2029 if (REAL_VALUES_EQUAL (d, dconstm1)
2030 && !HONOR_SNANS (mode))
2031 return simplify_gen_unary (NEG, mode, op0, mode);
2033 /* Change FP division by a constant into multiplication.
2034 Only do this with -funsafe-math-optimizations. */
2035 if (flag_unsafe_math_optimizations
2036 && !REAL_VALUES_EQUAL (d, dconst0))
2038 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2039 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2040 return simplify_gen_binary (MULT, mode, op0, tem);
2044 else
2046 /* 0/x is 0 (or x&0 if x has side-effects). */
2047 if (trueop0 == const0_rtx)
2048 return side_effects_p (op1)
2049 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2050 : const0_rtx;
2051 /* x/1 is x. */
2052 if (trueop1 == const1_rtx)
2054 /* Handle narrowing DIV. */
2055 rtx x = gen_lowpart_common (mode, op0);
2056 if (x)
2057 return x;
2058 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2059 return gen_lowpart_SUBREG (mode, op0);
2060 return op0;
2062 /* x/-1 is -x. */
2063 if (trueop1 == constm1_rtx)
2065 rtx x = gen_lowpart_common (mode, op0);
2066 if (!x)
2067 x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2068 ? gen_lowpart_SUBREG (mode, op0) : op0;
2069 return simplify_gen_unary (NEG, mode, x, mode);
2072 break;
2074 case UMOD:
2075 /* 0%x is 0 (or x&0 if x has side-effects). */
2076 if (trueop0 == const0_rtx)
2077 return side_effects_p (op1)
2078 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2079 : const0_rtx;
2080 /* x%1 is 0 (of x&0 if x has side-effects). */
2081 if (trueop1 == const1_rtx)
2082 return side_effects_p (op0)
2083 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2084 : const0_rtx;
2085 /* Implement modulus by power of two as AND. */
2086 if (GET_CODE (trueop1) == CONST_INT
2087 && exact_log2 (INTVAL (trueop1)) > 0)
2088 return simplify_gen_binary (AND, mode, op0,
2089 GEN_INT (INTVAL (op1) - 1));
2090 break;
2092 case MOD:
2093 /* 0%x is 0 (or x&0 if x has side-effects). */
2094 if (trueop0 == const0_rtx)
2095 return side_effects_p (op1)
2096 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2097 : const0_rtx;
2098 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2099 if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
2100 return side_effects_p (op0)
2101 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2102 : const0_rtx;
2103 break;
2105 case ROTATERT:
2106 case ROTATE:
2107 case ASHIFTRT:
2108 /* Rotating ~0 always results in ~0. */
2109 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2110 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2111 && ! side_effects_p (op1))
2112 return op0;
2114 /* Fall through.... */
2116 case ASHIFT:
2117 case LSHIFTRT:
2118 if (trueop1 == const0_rtx)
2119 return op0;
2120 if (trueop0 == const0_rtx && ! side_effects_p (op1))
2121 return op0;
2122 break;
2124 case SMIN:
2125 if (width <= HOST_BITS_PER_WIDE_INT
2126 && GET_CODE (trueop1) == CONST_INT
2127 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2128 && ! side_effects_p (op0))
2129 return op1;
2130 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2131 return op0;
2132 tem = simplify_associative_operation (code, mode, op0, op1);
2133 if (tem)
2134 return tem;
2135 break;
2137 case SMAX:
2138 if (width <= HOST_BITS_PER_WIDE_INT
2139 && GET_CODE (trueop1) == CONST_INT
2140 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2141 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2142 && ! side_effects_p (op0))
2143 return op1;
2144 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2145 return op0;
2146 tem = simplify_associative_operation (code, mode, op0, op1);
2147 if (tem)
2148 return tem;
2149 break;
2151 case UMIN:
2152 if (trueop1 == const0_rtx && ! side_effects_p (op0))
2153 return op1;
2154 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2155 return op0;
2156 tem = simplify_associative_operation (code, mode, op0, op1);
2157 if (tem)
2158 return tem;
2159 break;
2161 case UMAX:
2162 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2163 return op1;
2164 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2165 return op0;
2166 tem = simplify_associative_operation (code, mode, op0, op1);
2167 if (tem)
2168 return tem;
2169 break;
2171 case SS_PLUS:
2172 case US_PLUS:
2173 case SS_MINUS:
2174 case US_MINUS:
2175 /* ??? There are simplifications that can be done. */
2176 return 0;
2178 case VEC_SELECT:
2179 if (!VECTOR_MODE_P (mode))
2181 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2182 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2183 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2184 gcc_assert (XVECLEN (trueop1, 0) == 1);
2185 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2187 if (GET_CODE (trueop0) == CONST_VECTOR)
2188 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2189 (trueop1, 0, 0)));
2191 else
2193 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2194 gcc_assert (GET_MODE_INNER (mode)
2195 == GET_MODE_INNER (GET_MODE (trueop0)));
2196 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2198 if (GET_CODE (trueop0) == CONST_VECTOR)
2200 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2201 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2202 rtvec v = rtvec_alloc (n_elts);
2203 unsigned int i;
2205 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2206 for (i = 0; i < n_elts; i++)
2208 rtx x = XVECEXP (trueop1, 0, i);
2210 gcc_assert (GET_CODE (x) == CONST_INT);
2211 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2212 INTVAL (x));
2215 return gen_rtx_CONST_VECTOR (mode, v);
2218 return 0;
2219 case VEC_CONCAT:
2221 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2222 ? GET_MODE (trueop0)
2223 : GET_MODE_INNER (mode));
2224 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2225 ? GET_MODE (trueop1)
2226 : GET_MODE_INNER (mode));
2228 gcc_assert (VECTOR_MODE_P (mode));
2229 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2230 == GET_MODE_SIZE (mode));
2232 if (VECTOR_MODE_P (op0_mode))
2233 gcc_assert (GET_MODE_INNER (mode)
2234 == GET_MODE_INNER (op0_mode));
2235 else
2236 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2238 if (VECTOR_MODE_P (op1_mode))
2239 gcc_assert (GET_MODE_INNER (mode)
2240 == GET_MODE_INNER (op1_mode));
2241 else
2242 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2244 if ((GET_CODE (trueop0) == CONST_VECTOR
2245 || GET_CODE (trueop0) == CONST_INT
2246 || GET_CODE (trueop0) == CONST_DOUBLE)
2247 && (GET_CODE (trueop1) == CONST_VECTOR
2248 || GET_CODE (trueop1) == CONST_INT
2249 || GET_CODE (trueop1) == CONST_DOUBLE))
2251 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2252 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2253 rtvec v = rtvec_alloc (n_elts);
2254 unsigned int i;
2255 unsigned in_n_elts = 1;
2257 if (VECTOR_MODE_P (op0_mode))
2258 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2259 for (i = 0; i < n_elts; i++)
2261 if (i < in_n_elts)
2263 if (!VECTOR_MODE_P (op0_mode))
2264 RTVEC_ELT (v, i) = trueop0;
2265 else
2266 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2268 else
2270 if (!VECTOR_MODE_P (op1_mode))
2271 RTVEC_ELT (v, i) = trueop1;
2272 else
2273 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2274 i - in_n_elts);
2278 return gen_rtx_CONST_VECTOR (mode, v);
2281 return 0;
2283 default:
2284 gcc_unreachable ();
2287 return 0;
2290 /* Get the integer argument values in two forms:
2291 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2293 arg0 = INTVAL (trueop0);
2294 arg1 = INTVAL (trueop1);
2296 if (width < HOST_BITS_PER_WIDE_INT)
2298 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2299 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2301 arg0s = arg0;
2302 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2303 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2305 arg1s = arg1;
2306 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2307 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2309 else
2311 arg0s = arg0;
2312 arg1s = arg1;
2315 /* Compute the value of the arithmetic. */
2317 switch (code)
2319 case PLUS:
2320 val = arg0s + arg1s;
2321 break;
2323 case MINUS:
2324 val = arg0s - arg1s;
2325 break;
2327 case MULT:
2328 val = arg0s * arg1s;
2329 break;
2331 case DIV:
2332 if (arg1s == 0
2333 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2334 && arg1s == -1))
2335 return 0;
2336 val = arg0s / arg1s;
2337 break;
2339 case MOD:
2340 if (arg1s == 0
2341 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2342 && arg1s == -1))
2343 return 0;
2344 val = arg0s % arg1s;
2345 break;
2347 case UDIV:
2348 if (arg1 == 0
2349 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2350 && arg1s == -1))
2351 return 0;
2352 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2353 break;
2355 case UMOD:
2356 if (arg1 == 0
2357 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2358 && arg1s == -1))
2359 return 0;
2360 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2361 break;
2363 case AND:
2364 val = arg0 & arg1;
2365 break;
2367 case IOR:
2368 val = arg0 | arg1;
2369 break;
2371 case XOR:
2372 val = arg0 ^ arg1;
2373 break;
2375 case LSHIFTRT:
2376 case ASHIFT:
2377 case ASHIFTRT:
2378 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure the
2379 value is in range. We can't return any old value for out-of-range
2380 arguments because either the middle-end (via shift_truncation_mask)
2381 or the back-end might be relying on target-specific knowledge.
2382 Nor can we rely on shift_truncation_mask, since the shift might
2383 not be part of an ashlM3, lshrM3 or ashrM3 instruction. */
2384 if (SHIFT_COUNT_TRUNCATED)
2385 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
2386 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
2387 return 0;
2389 val = (code == ASHIFT
2390 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
2391 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
2393 /* Sign-extend the result for arithmetic right shifts. */
2394 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
2395 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
2396 break;
2398 case ROTATERT:
2399 if (arg1 < 0)
2400 return 0;
2402 arg1 %= width;
2403 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2404 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2405 break;
2407 case ROTATE:
2408 if (arg1 < 0)
2409 return 0;
2411 arg1 %= width;
2412 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2413 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2414 break;
2416 case COMPARE:
2417 /* Do nothing here. */
2418 return 0;
2420 case SMIN:
2421 val = arg0s <= arg1s ? arg0s : arg1s;
2422 break;
2424 case UMIN:
2425 val = ((unsigned HOST_WIDE_INT) arg0
2426 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2427 break;
2429 case SMAX:
2430 val = arg0s > arg1s ? arg0s : arg1s;
2431 break;
2433 case UMAX:
2434 val = ((unsigned HOST_WIDE_INT) arg0
2435 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2436 break;
2438 case SS_PLUS:
2439 case US_PLUS:
2440 case SS_MINUS:
2441 case US_MINUS:
2442 /* ??? There are simplifications that can be done. */
2443 return 0;
2445 default:
2446 gcc_unreachable ();
2449 val = trunc_int_for_mode (val, mode);
2451 return GEN_INT (val);
2454 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2455 PLUS or MINUS.
2457 Rather than test for specific case, we do this by a brute-force method
2458 and do all possible simplifications until no more changes occur. Then
2459 we rebuild the operation.
2461 If FORCE is true, then always generate the rtx. This is used to
2462 canonicalize stuff emitted from simplify_gen_binary. Note that this
2463 can still fail if the rtx is too complex. It won't fail just because
2464 the result is not 'simpler' than the input, however. */
2466 struct simplify_plus_minus_op_data
2468 rtx op;
2469 int neg;
2472 static int
2473 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2475 const struct simplify_plus_minus_op_data *d1 = p1;
2476 const struct simplify_plus_minus_op_data *d2 = p2;
2478 return (commutative_operand_precedence (d2->op)
2479 - commutative_operand_precedence (d1->op));
2482 static rtx
2483 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2484 rtx op1, int force)
2486 struct simplify_plus_minus_op_data ops[8];
2487 rtx result, tem;
2488 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2489 int first, changed;
2490 int i, j;
2492 memset (ops, 0, sizeof ops);
2494 /* Set up the two operands and then expand them until nothing has been
2495 changed. If we run out of room in our array, give up; this should
2496 almost never happen. */
2498 ops[0].op = op0;
2499 ops[0].neg = 0;
2500 ops[1].op = op1;
2501 ops[1].neg = (code == MINUS);
2505 changed = 0;
2507 for (i = 0; i < n_ops; i++)
2509 rtx this_op = ops[i].op;
2510 int this_neg = ops[i].neg;
2511 enum rtx_code this_code = GET_CODE (this_op);
2513 switch (this_code)
2515 case PLUS:
2516 case MINUS:
2517 if (n_ops == 7)
2518 return NULL_RTX;
2520 ops[n_ops].op = XEXP (this_op, 1);
2521 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2522 n_ops++;
2524 ops[i].op = XEXP (this_op, 0);
2525 input_ops++;
2526 changed = 1;
2527 break;
2529 case NEG:
2530 ops[i].op = XEXP (this_op, 0);
2531 ops[i].neg = ! this_neg;
2532 changed = 1;
2533 break;
2535 case CONST:
2536 if (n_ops < 7
2537 && GET_CODE (XEXP (this_op, 0)) == PLUS
2538 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2539 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2541 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2542 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2543 ops[n_ops].neg = this_neg;
2544 n_ops++;
2545 input_consts++;
2546 changed = 1;
2548 break;
2550 case NOT:
2551 /* ~a -> (-a - 1) */
2552 if (n_ops != 7)
2554 ops[n_ops].op = constm1_rtx;
2555 ops[n_ops++].neg = this_neg;
2556 ops[i].op = XEXP (this_op, 0);
2557 ops[i].neg = !this_neg;
2558 changed = 1;
2560 break;
2562 case CONST_INT:
2563 if (this_neg)
2565 ops[i].op = neg_const_int (mode, this_op);
2566 ops[i].neg = 0;
2567 changed = 1;
2569 break;
2571 default:
2572 break;
2576 while (changed);
2578 /* If we only have two operands, we can't do anything. */
2579 if (n_ops <= 2 && !force)
2580 return NULL_RTX;
2582 /* Count the number of CONSTs we didn't split above. */
2583 for (i = 0; i < n_ops; i++)
2584 if (GET_CODE (ops[i].op) == CONST)
2585 input_consts++;
2587 /* Now simplify each pair of operands until nothing changes. The first
2588 time through just simplify constants against each other. */
2590 first = 1;
2593 changed = first;
2595 for (i = 0; i < n_ops - 1; i++)
2596 for (j = i + 1; j < n_ops; j++)
2598 rtx lhs = ops[i].op, rhs = ops[j].op;
2599 int lneg = ops[i].neg, rneg = ops[j].neg;
2601 if (lhs != 0 && rhs != 0
2602 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2604 enum rtx_code ncode = PLUS;
2606 if (lneg != rneg)
2608 ncode = MINUS;
2609 if (lneg)
2610 tem = lhs, lhs = rhs, rhs = tem;
2612 else if (swap_commutative_operands_p (lhs, rhs))
2613 tem = lhs, lhs = rhs, rhs = tem;
2615 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2617 /* Reject "simplifications" that just wrap the two
2618 arguments in a CONST. Failure to do so can result
2619 in infinite recursion with simplify_binary_operation
2620 when it calls us to simplify CONST operations. */
2621 if (tem
2622 && ! (GET_CODE (tem) == CONST
2623 && GET_CODE (XEXP (tem, 0)) == ncode
2624 && XEXP (XEXP (tem, 0), 0) == lhs
2625 && XEXP (XEXP (tem, 0), 1) == rhs)
2626 /* Don't allow -x + -1 -> ~x simplifications in the
2627 first pass. This allows us the chance to combine
2628 the -1 with other constants. */
2629 && ! (first
2630 && GET_CODE (tem) == NOT
2631 && XEXP (tem, 0) == rhs))
2633 lneg &= rneg;
2634 if (GET_CODE (tem) == NEG)
2635 tem = XEXP (tem, 0), lneg = !lneg;
2636 if (GET_CODE (tem) == CONST_INT && lneg)
2637 tem = neg_const_int (mode, tem), lneg = 0;
2639 ops[i].op = tem;
2640 ops[i].neg = lneg;
2641 ops[j].op = NULL_RTX;
2642 changed = 1;
2647 first = 0;
2649 while (changed);
2651 /* Pack all the operands to the lower-numbered entries. */
2652 for (i = 0, j = 0; j < n_ops; j++)
2653 if (ops[j].op)
2654 ops[i++] = ops[j];
2655 n_ops = i;
2657 /* Sort the operations based on swap_commutative_operands_p. */
2658 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2660 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2661 if (n_ops == 2
2662 && GET_CODE (ops[1].op) == CONST_INT
2663 && CONSTANT_P (ops[0].op)
2664 && ops[0].neg)
2665 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2667 /* We suppressed creation of trivial CONST expressions in the
2668 combination loop to avoid recursion. Create one manually now.
2669 The combination loop should have ensured that there is exactly
2670 one CONST_INT, and the sort will have ensured that it is last
2671 in the array and that any other constant will be next-to-last. */
2673 if (n_ops > 1
2674 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2675 && CONSTANT_P (ops[n_ops - 2].op))
2677 rtx value = ops[n_ops - 1].op;
2678 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2679 value = neg_const_int (mode, value);
2680 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2681 n_ops--;
2684 /* Count the number of CONSTs that we generated. */
2685 n_consts = 0;
2686 for (i = 0; i < n_ops; i++)
2687 if (GET_CODE (ops[i].op) == CONST)
2688 n_consts++;
2690 /* Give up if we didn't reduce the number of operands we had. Make
2691 sure we count a CONST as two operands. If we have the same
2692 number of operands, but have made more CONSTs than before, this
2693 is also an improvement, so accept it. */
2694 if (!force
2695 && (n_ops + n_consts > input_ops
2696 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2697 return NULL_RTX;
2699 /* Put a non-negated operand first, if possible. */
2701 for (i = 0; i < n_ops && ops[i].neg; i++)
2702 continue;
2703 if (i == n_ops)
2704 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2705 else if (i != 0)
2707 tem = ops[0].op;
2708 ops[0] = ops[i];
2709 ops[i].op = tem;
2710 ops[i].neg = 1;
2713 /* Now make the result by performing the requested operations. */
2714 result = ops[0].op;
2715 for (i = 1; i < n_ops; i++)
2716 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2717 mode, result, ops[i].op);
2719 return result;
2722 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2723 static bool
2724 plus_minus_operand_p (rtx x)
2726 return GET_CODE (x) == PLUS
2727 || GET_CODE (x) == MINUS
2728 || (GET_CODE (x) == CONST
2729 && GET_CODE (XEXP (x, 0)) == PLUS
2730 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
2731 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
2734 /* Like simplify_binary_operation except used for relational operators.
2735 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2736 not also be VOIDmode.
2738 CMP_MODE specifies in which mode the comparison is done in, so it is
2739 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2740 the operands or, if both are VOIDmode, the operands are compared in
2741 "infinite precision". */
2743 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2744 enum machine_mode cmp_mode, rtx op0, rtx op1)
2746 rtx tem, trueop0, trueop1;
2748 if (cmp_mode == VOIDmode)
2749 cmp_mode = GET_MODE (op0);
2750 if (cmp_mode == VOIDmode)
2751 cmp_mode = GET_MODE (op1);
2753 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2754 if (tem)
2756 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2758 if (tem == const0_rtx)
2759 return CONST0_RTX (mode);
2760 #ifdef FLOAT_STORE_FLAG_VALUE
2762 REAL_VALUE_TYPE val;
2763 val = FLOAT_STORE_FLAG_VALUE (mode);
2764 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2766 #else
2767 return NULL_RTX;
2768 #endif
2770 if (VECTOR_MODE_P (mode))
2772 if (tem == const0_rtx)
2773 return CONST0_RTX (mode);
2774 #ifdef VECTOR_STORE_FLAG_VALUE
2776 int i, units;
2777 rtvec v;
2779 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2780 if (val == NULL_RTX)
2781 return NULL_RTX;
2782 if (val == const1_rtx)
2783 return CONST1_RTX (mode);
2785 units = GET_MODE_NUNITS (mode);
2786 v = rtvec_alloc (units);
2787 for (i = 0; i < units; i++)
2788 RTVEC_ELT (v, i) = val;
2789 return gen_rtx_raw_CONST_VECTOR (mode, v);
2791 #else
2792 return NULL_RTX;
2793 #endif
2796 return tem;
2799 /* For the following tests, ensure const0_rtx is op1. */
2800 if (swap_commutative_operands_p (op0, op1)
2801 || (op0 == const0_rtx && op1 != const0_rtx))
2802 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2804 /* If op0 is a compare, extract the comparison arguments from it. */
2805 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2806 return simplify_relational_operation (code, mode, VOIDmode,
2807 XEXP (op0, 0), XEXP (op0, 1));
2809 if (mode == VOIDmode
2810 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2811 || CC0_P (op0))
2812 return NULL_RTX;
2814 trueop0 = avoid_constant_pool_reference (op0);
2815 trueop1 = avoid_constant_pool_reference (op1);
2816 return simplify_relational_operation_1 (code, mode, cmp_mode,
2817 trueop0, trueop1);
2820 /* This part of simplify_relational_operation is only used when CMP_MODE
2821 is not in class MODE_CC (i.e. it is a real comparison).
2823 MODE is the mode of the result, while CMP_MODE specifies in which
2824 mode the comparison is done in, so it is the mode of the operands. */
2826 static rtx
2827 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2828 enum machine_mode cmp_mode, rtx op0, rtx op1)
2830 enum rtx_code op0code = GET_CODE (op0);
2832 if (GET_CODE (op1) == CONST_INT)
2834 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2836 /* If op0 is a comparison, extract the comparison arguments form it. */
2837 if (code == NE)
2839 if (GET_MODE (op0) == cmp_mode)
2840 return simplify_rtx (op0);
2841 else
2842 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2843 XEXP (op0, 0), XEXP (op0, 1));
2845 else if (code == EQ)
2847 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
2848 if (new_code != UNKNOWN)
2849 return simplify_gen_relational (new_code, mode, VOIDmode,
2850 XEXP (op0, 0), XEXP (op0, 1));
2855 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
2856 if ((code == EQ || code == NE)
2857 && (op0code == PLUS || op0code == MINUS)
2858 && CONSTANT_P (op1)
2859 && CONSTANT_P (XEXP (op0, 1))
2860 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
2862 rtx x = XEXP (op0, 0);
2863 rtx c = XEXP (op0, 1);
2865 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
2866 cmp_mode, op1, c);
2867 return simplify_gen_relational (code, mode, cmp_mode, x, c);
2870 return NULL_RTX;
2873 /* Check if the given comparison (done in the given MODE) is actually a
2874 tautology or a contradiction.
2875 If no simplification is possible, this function returns zero.
2876 Otherwise, it returns either const_true_rtx or const0_rtx. */
2879 simplify_const_relational_operation (enum rtx_code code,
2880 enum machine_mode mode,
2881 rtx op0, rtx op1)
2883 int equal, op0lt, op0ltu, op1lt, op1ltu;
2884 rtx tem;
2885 rtx trueop0;
2886 rtx trueop1;
2888 gcc_assert (mode != VOIDmode
2889 || (GET_MODE (op0) == VOIDmode
2890 && GET_MODE (op1) == VOIDmode));
2892 /* If op0 is a compare, extract the comparison arguments from it. */
2893 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2894 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2896 /* We can't simplify MODE_CC values since we don't know what the
2897 actual comparison is. */
2898 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2899 return 0;
2901 /* Make sure the constant is second. */
2902 if (swap_commutative_operands_p (op0, op1))
2904 tem = op0, op0 = op1, op1 = tem;
2905 code = swap_condition (code);
2908 trueop0 = avoid_constant_pool_reference (op0);
2909 trueop1 = avoid_constant_pool_reference (op1);
2911 /* For integer comparisons of A and B maybe we can simplify A - B and can
2912 then simplify a comparison of that with zero. If A and B are both either
2913 a register or a CONST_INT, this can't help; testing for these cases will
2914 prevent infinite recursion here and speed things up.
2916 If CODE is an unsigned comparison, then we can never do this optimization,
2917 because it gives an incorrect result if the subtraction wraps around zero.
2918 ANSI C defines unsigned operations such that they never overflow, and
2919 thus such cases can not be ignored; but we cannot do it even for
2920 signed comparisons for languages such as Java, so test flag_wrapv. */
2922 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2923 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
2924 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
2925 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2926 /* We cannot do this for == or != if tem is a nonzero address. */
2927 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2928 && code != GTU && code != GEU && code != LTU && code != LEU)
2929 return simplify_const_relational_operation (signed_condition (code),
2930 mode, tem, const0_rtx);
2932 if (flag_unsafe_math_optimizations && code == ORDERED)
2933 return const_true_rtx;
2935 if (flag_unsafe_math_optimizations && code == UNORDERED)
2936 return const0_rtx;
2938 /* For modes without NaNs, if the two operands are equal, we know the
2939 result except if they have side-effects. */
2940 if (! HONOR_NANS (GET_MODE (trueop0))
2941 && rtx_equal_p (trueop0, trueop1)
2942 && ! side_effects_p (trueop0))
2943 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2945 /* If the operands are floating-point constants, see if we can fold
2946 the result. */
2947 else if (GET_CODE (trueop0) == CONST_DOUBLE
2948 && GET_CODE (trueop1) == CONST_DOUBLE
2949 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2951 REAL_VALUE_TYPE d0, d1;
2953 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2954 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2956 /* Comparisons are unordered iff at least one of the values is NaN. */
2957 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2958 switch (code)
2960 case UNEQ:
2961 case UNLT:
2962 case UNGT:
2963 case UNLE:
2964 case UNGE:
2965 case NE:
2966 case UNORDERED:
2967 return const_true_rtx;
2968 case EQ:
2969 case LT:
2970 case GT:
2971 case LE:
2972 case GE:
2973 case LTGT:
2974 case ORDERED:
2975 return const0_rtx;
2976 default:
2977 return 0;
2980 equal = REAL_VALUES_EQUAL (d0, d1);
2981 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2982 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2985 /* Otherwise, see if the operands are both integers. */
2986 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2987 && (GET_CODE (trueop0) == CONST_DOUBLE
2988 || GET_CODE (trueop0) == CONST_INT)
2989 && (GET_CODE (trueop1) == CONST_DOUBLE
2990 || GET_CODE (trueop1) == CONST_INT))
2992 int width = GET_MODE_BITSIZE (mode);
2993 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2994 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2996 /* Get the two words comprising each integer constant. */
2997 if (GET_CODE (trueop0) == CONST_DOUBLE)
2999 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3000 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3002 else
3004 l0u = l0s = INTVAL (trueop0);
3005 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3008 if (GET_CODE (trueop1) == CONST_DOUBLE)
3010 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3011 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3013 else
3015 l1u = l1s = INTVAL (trueop1);
3016 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3019 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3020 we have to sign or zero-extend the values. */
3021 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3023 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3024 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3026 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3027 l0s |= ((HOST_WIDE_INT) (-1) << width);
3029 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3030 l1s |= ((HOST_WIDE_INT) (-1) << width);
3032 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3033 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3035 equal = (h0u == h1u && l0u == l1u);
3036 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3037 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3038 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3039 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3042 /* Otherwise, there are some code-specific tests we can make. */
3043 else
3045 /* Optimize comparisons with upper and lower bounds. */
3046 if (SCALAR_INT_MODE_P (mode)
3047 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3049 rtx mmin, mmax;
3050 int sign;
3052 if (code == GEU
3053 || code == LEU
3054 || code == GTU
3055 || code == LTU)
3056 sign = 0;
3057 else
3058 sign = 1;
3060 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3062 tem = NULL_RTX;
3063 switch (code)
3065 case GEU:
3066 case GE:
3067 /* x >= min is always true. */
3068 if (rtx_equal_p (trueop1, mmin))
3069 tem = const_true_rtx;
3070 else
3071 break;
3073 case LEU:
3074 case LE:
3075 /* x <= max is always true. */
3076 if (rtx_equal_p (trueop1, mmax))
3077 tem = const_true_rtx;
3078 break;
3080 case GTU:
3081 case GT:
3082 /* x > max is always false. */
3083 if (rtx_equal_p (trueop1, mmax))
3084 tem = const0_rtx;
3085 break;
3087 case LTU:
3088 case LT:
3089 /* x < min is always false. */
3090 if (rtx_equal_p (trueop1, mmin))
3091 tem = const0_rtx;
3092 break;
3094 default:
3095 break;
3097 if (tem == const0_rtx
3098 || tem == const_true_rtx)
3099 return tem;
3102 switch (code)
3104 case EQ:
3105 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3106 return const0_rtx;
3107 break;
3109 case NE:
3110 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3111 return const_true_rtx;
3112 break;
3114 case LT:
3115 /* Optimize abs(x) < 0.0. */
3116 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
3118 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3119 : trueop0;
3120 if (GET_CODE (tem) == ABS)
3121 return const0_rtx;
3123 break;
3125 case GE:
3126 /* Optimize abs(x) >= 0.0. */
3127 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
3129 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3130 : trueop0;
3131 if (GET_CODE (tem) == ABS)
3132 return const_true_rtx;
3134 break;
3136 case UNGE:
3137 /* Optimize ! (abs(x) < 0.0). */
3138 if (trueop1 == CONST0_RTX (mode))
3140 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3141 : trueop0;
3142 if (GET_CODE (tem) == ABS)
3143 return const_true_rtx;
3145 break;
3147 default:
3148 break;
3151 return 0;
3154 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3155 as appropriate. */
3156 switch (code)
3158 case EQ:
3159 case UNEQ:
3160 return equal ? const_true_rtx : const0_rtx;
3161 case NE:
3162 case LTGT:
3163 return ! equal ? const_true_rtx : const0_rtx;
3164 case LT:
3165 case UNLT:
3166 return op0lt ? const_true_rtx : const0_rtx;
3167 case GT:
3168 case UNGT:
3169 return op1lt ? const_true_rtx : const0_rtx;
3170 case LTU:
3171 return op0ltu ? const_true_rtx : const0_rtx;
3172 case GTU:
3173 return op1ltu ? const_true_rtx : const0_rtx;
3174 case LE:
3175 case UNLE:
3176 return equal || op0lt ? const_true_rtx : const0_rtx;
3177 case GE:
3178 case UNGE:
3179 return equal || op1lt ? const_true_rtx : const0_rtx;
3180 case LEU:
3181 return equal || op0ltu ? const_true_rtx : const0_rtx;
3182 case GEU:
3183 return equal || op1ltu ? const_true_rtx : const0_rtx;
3184 case ORDERED:
3185 return const_true_rtx;
3186 case UNORDERED:
3187 return const0_rtx;
3188 default:
3189 gcc_unreachable ();
3193 /* Simplify CODE, an operation with result mode MODE and three operands,
3194 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3195 a constant. Return 0 if no simplifications is possible. */
3198 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3199 enum machine_mode op0_mode, rtx op0, rtx op1,
3200 rtx op2)
3202 unsigned int width = GET_MODE_BITSIZE (mode);
3204 /* VOIDmode means "infinite" precision. */
3205 if (width == 0)
3206 width = HOST_BITS_PER_WIDE_INT;
3208 switch (code)
3210 case SIGN_EXTRACT:
3211 case ZERO_EXTRACT:
3212 if (GET_CODE (op0) == CONST_INT
3213 && GET_CODE (op1) == CONST_INT
3214 && GET_CODE (op2) == CONST_INT
3215 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3216 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3218 /* Extracting a bit-field from a constant */
3219 HOST_WIDE_INT val = INTVAL (op0);
3221 if (BITS_BIG_ENDIAN)
3222 val >>= (GET_MODE_BITSIZE (op0_mode)
3223 - INTVAL (op2) - INTVAL (op1));
3224 else
3225 val >>= INTVAL (op2);
3227 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3229 /* First zero-extend. */
3230 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3231 /* If desired, propagate sign bit. */
3232 if (code == SIGN_EXTRACT
3233 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3234 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3237 /* Clear the bits that don't belong in our mode,
3238 unless they and our sign bit are all one.
3239 So we get either a reasonable negative value or a reasonable
3240 unsigned value for this mode. */
3241 if (width < HOST_BITS_PER_WIDE_INT
3242 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3243 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3244 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3246 return gen_int_mode (val, mode);
3248 break;
3250 case IF_THEN_ELSE:
3251 if (GET_CODE (op0) == CONST_INT)
3252 return op0 != const0_rtx ? op1 : op2;
3254 /* Convert c ? a : a into "a". */
3255 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3256 return op1;
3258 /* Convert a != b ? a : b into "a". */
3259 if (GET_CODE (op0) == NE
3260 && ! side_effects_p (op0)
3261 && ! HONOR_NANS (mode)
3262 && ! HONOR_SIGNED_ZEROS (mode)
3263 && ((rtx_equal_p (XEXP (op0, 0), op1)
3264 && rtx_equal_p (XEXP (op0, 1), op2))
3265 || (rtx_equal_p (XEXP (op0, 0), op2)
3266 && rtx_equal_p (XEXP (op0, 1), op1))))
3267 return op1;
3269 /* Convert a == b ? a : b into "b". */
3270 if (GET_CODE (op0) == EQ
3271 && ! side_effects_p (op0)
3272 && ! HONOR_NANS (mode)
3273 && ! HONOR_SIGNED_ZEROS (mode)
3274 && ((rtx_equal_p (XEXP (op0, 0), op1)
3275 && rtx_equal_p (XEXP (op0, 1), op2))
3276 || (rtx_equal_p (XEXP (op0, 0), op2)
3277 && rtx_equal_p (XEXP (op0, 1), op1))))
3278 return op2;
3280 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3282 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3283 ? GET_MODE (XEXP (op0, 1))
3284 : GET_MODE (XEXP (op0, 0)));
3285 rtx temp;
3287 /* Look for happy constants in op1 and op2. */
3288 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3290 HOST_WIDE_INT t = INTVAL (op1);
3291 HOST_WIDE_INT f = INTVAL (op2);
3293 if (t == STORE_FLAG_VALUE && f == 0)
3294 code = GET_CODE (op0);
3295 else if (t == 0 && f == STORE_FLAG_VALUE)
3297 enum rtx_code tmp;
3298 tmp = reversed_comparison_code (op0, NULL_RTX);
3299 if (tmp == UNKNOWN)
3300 break;
3301 code = tmp;
3303 else
3304 break;
3306 return simplify_gen_relational (code, mode, cmp_mode,
3307 XEXP (op0, 0), XEXP (op0, 1));
3310 if (cmp_mode == VOIDmode)
3311 cmp_mode = op0_mode;
3312 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3313 cmp_mode, XEXP (op0, 0),
3314 XEXP (op0, 1));
3316 /* See if any simplifications were possible. */
3317 if (temp)
3319 if (GET_CODE (temp) == CONST_INT)
3320 return temp == const0_rtx ? op2 : op1;
3321 else if (temp)
3322 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3325 break;
3327 case VEC_MERGE:
3328 gcc_assert (GET_MODE (op0) == mode);
3329 gcc_assert (GET_MODE (op1) == mode);
3330 gcc_assert (VECTOR_MODE_P (mode));
3331 op2 = avoid_constant_pool_reference (op2);
3332 if (GET_CODE (op2) == CONST_INT)
3334 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3335 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3336 int mask = (1 << n_elts) - 1;
3338 if (!(INTVAL (op2) & mask))
3339 return op1;
3340 if ((INTVAL (op2) & mask) == mask)
3341 return op0;
3343 op0 = avoid_constant_pool_reference (op0);
3344 op1 = avoid_constant_pool_reference (op1);
3345 if (GET_CODE (op0) == CONST_VECTOR
3346 && GET_CODE (op1) == CONST_VECTOR)
3348 rtvec v = rtvec_alloc (n_elts);
3349 unsigned int i;
3351 for (i = 0; i < n_elts; i++)
3352 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3353 ? CONST_VECTOR_ELT (op0, i)
3354 : CONST_VECTOR_ELT (op1, i));
3355 return gen_rtx_CONST_VECTOR (mode, v);
3358 break;
3360 default:
3361 gcc_unreachable ();
3364 return 0;
3367 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3368 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3370 Works by unpacking OP into a collection of 8-bit values
3371 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3372 and then repacking them again for OUTERMODE. */
3374 static rtx
3375 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3376 enum machine_mode innermode, unsigned int byte)
3378 /* We support up to 512-bit values (for V8DFmode). */
3379 enum {
3380 max_bitsize = 512,
3381 value_bit = 8,
3382 value_mask = (1 << value_bit) - 1
3384 unsigned char value[max_bitsize / value_bit];
3385 int value_start;
3386 int i;
3387 int elem;
3389 int num_elem;
3390 rtx * elems;
3391 int elem_bitsize;
3392 rtx result_s;
3393 rtvec result_v = NULL;
3394 enum mode_class outer_class;
3395 enum machine_mode outer_submode;
3397 /* Some ports misuse CCmode. */
3398 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3399 return op;
3401 /* We have no way to represent a complex constant at the rtl level. */
3402 if (COMPLEX_MODE_P (outermode))
3403 return NULL_RTX;
3405 /* Unpack the value. */
3407 if (GET_CODE (op) == CONST_VECTOR)
3409 num_elem = CONST_VECTOR_NUNITS (op);
3410 elems = &CONST_VECTOR_ELT (op, 0);
3411 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3413 else
3415 num_elem = 1;
3416 elems = &op;
3417 elem_bitsize = max_bitsize;
3419 /* If this asserts, it is too complicated; reducing value_bit may help. */
3420 gcc_assert (BITS_PER_UNIT % value_bit == 0);
3421 /* I don't know how to handle endianness of sub-units. */
3422 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
3424 for (elem = 0; elem < num_elem; elem++)
3426 unsigned char * vp;
3427 rtx el = elems[elem];
3429 /* Vectors are kept in target memory order. (This is probably
3430 a mistake.) */
3432 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3433 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3434 / BITS_PER_UNIT);
3435 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3436 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3437 unsigned bytele = (subword_byte % UNITS_PER_WORD
3438 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3439 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3442 switch (GET_CODE (el))
3444 case CONST_INT:
3445 for (i = 0;
3446 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3447 i += value_bit)
3448 *vp++ = INTVAL (el) >> i;
3449 /* CONST_INTs are always logically sign-extended. */
3450 for (; i < elem_bitsize; i += value_bit)
3451 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3452 break;
3454 case CONST_DOUBLE:
3455 if (GET_MODE (el) == VOIDmode)
3457 /* If this triggers, someone should have generated a
3458 CONST_INT instead. */
3459 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
3461 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3462 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3463 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3465 *vp++
3466 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3467 i += value_bit;
3469 /* It shouldn't matter what's done here, so fill it with
3470 zero. */
3471 for (; i < max_bitsize; i += value_bit)
3472 *vp++ = 0;
3474 else
3476 long tmp[max_bitsize / 32];
3477 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3479 gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT);
3480 gcc_assert (bitsize <= elem_bitsize);
3481 gcc_assert (bitsize % value_bit == 0);
3483 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3484 GET_MODE (el));
3486 /* real_to_target produces its result in words affected by
3487 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3488 and use WORDS_BIG_ENDIAN instead; see the documentation
3489 of SUBREG in rtl.texi. */
3490 for (i = 0; i < bitsize; i += value_bit)
3492 int ibase;
3493 if (WORDS_BIG_ENDIAN)
3494 ibase = bitsize - 1 - i;
3495 else
3496 ibase = i;
3497 *vp++ = tmp[ibase / 32] >> i % 32;
3500 /* It shouldn't matter what's done here, so fill it with
3501 zero. */
3502 for (; i < elem_bitsize; i += value_bit)
3503 *vp++ = 0;
3505 break;
3507 default:
3508 gcc_unreachable ();
3512 /* Now, pick the right byte to start with. */
3513 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3514 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3515 will already have offset 0. */
3516 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3518 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3519 - byte);
3520 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3521 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3522 byte = (subword_byte % UNITS_PER_WORD
3523 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3526 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3527 so if it's become negative it will instead be very large.) */
3528 gcc_assert (byte < GET_MODE_SIZE (innermode));
3530 /* Convert from bytes to chunks of size value_bit. */
3531 value_start = byte * (BITS_PER_UNIT / value_bit);
3533 /* Re-pack the value. */
3535 if (VECTOR_MODE_P (outermode))
3537 num_elem = GET_MODE_NUNITS (outermode);
3538 result_v = rtvec_alloc (num_elem);
3539 elems = &RTVEC_ELT (result_v, 0);
3540 outer_submode = GET_MODE_INNER (outermode);
3542 else
3544 num_elem = 1;
3545 elems = &result_s;
3546 outer_submode = outermode;
3549 outer_class = GET_MODE_CLASS (outer_submode);
3550 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3552 gcc_assert (elem_bitsize % value_bit == 0);
3553 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
3555 for (elem = 0; elem < num_elem; elem++)
3557 unsigned char *vp;
3559 /* Vectors are stored in target memory order. (This is probably
3560 a mistake.) */
3562 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3563 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3564 / BITS_PER_UNIT);
3565 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3566 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3567 unsigned bytele = (subword_byte % UNITS_PER_WORD
3568 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3569 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3572 switch (outer_class)
3574 case MODE_INT:
3575 case MODE_PARTIAL_INT:
3577 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3579 for (i = 0;
3580 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3581 i += value_bit)
3582 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3583 for (; i < elem_bitsize; i += value_bit)
3584 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3585 << (i - HOST_BITS_PER_WIDE_INT));
3587 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3588 know why. */
3589 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3590 elems[elem] = gen_int_mode (lo, outer_submode);
3591 else
3592 elems[elem] = immed_double_const (lo, hi, outer_submode);
3594 break;
3596 case MODE_FLOAT:
3598 REAL_VALUE_TYPE r;
3599 long tmp[max_bitsize / 32];
3601 /* real_from_target wants its input in words affected by
3602 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3603 and use WORDS_BIG_ENDIAN instead; see the documentation
3604 of SUBREG in rtl.texi. */
3605 for (i = 0; i < max_bitsize / 32; i++)
3606 tmp[i] = 0;
3607 for (i = 0; i < elem_bitsize; i += value_bit)
3609 int ibase;
3610 if (WORDS_BIG_ENDIAN)
3611 ibase = elem_bitsize - 1 - i;
3612 else
3613 ibase = i;
3614 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3617 real_from_target (&r, tmp, outer_submode);
3618 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3620 break;
3622 default:
3623 gcc_unreachable ();
3626 if (VECTOR_MODE_P (outermode))
3627 return gen_rtx_CONST_VECTOR (outermode, result_v);
3628 else
3629 return result_s;
3632 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3633 Return 0 if no simplifications are possible. */
3635 simplify_subreg (enum machine_mode outermode, rtx op,
3636 enum machine_mode innermode, unsigned int byte)
3638 /* Little bit of sanity checking. */
3639 gcc_assert (innermode != VOIDmode);
3640 gcc_assert (outermode != VOIDmode);
3641 gcc_assert (innermode != BLKmode);
3642 gcc_assert (outermode != BLKmode);
3644 gcc_assert (GET_MODE (op) == innermode
3645 || GET_MODE (op) == VOIDmode);
3647 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3648 gcc_assert (byte < GET_MODE_SIZE (innermode));
3650 if (outermode == innermode && !byte)
3651 return op;
3653 if (GET_CODE (op) == CONST_INT
3654 || GET_CODE (op) == CONST_DOUBLE
3655 || GET_CODE (op) == CONST_VECTOR)
3656 return simplify_immed_subreg (outermode, op, innermode, byte);
3658 /* Changing mode twice with SUBREG => just change it once,
3659 or not at all if changing back op starting mode. */
3660 if (GET_CODE (op) == SUBREG)
3662 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3663 int final_offset = byte + SUBREG_BYTE (op);
3664 rtx newx;
3666 if (outermode == innermostmode
3667 && byte == 0 && SUBREG_BYTE (op) == 0)
3668 return SUBREG_REG (op);
3670 /* The SUBREG_BYTE represents offset, as if the value were stored
3671 in memory. Irritating exception is paradoxical subreg, where
3672 we define SUBREG_BYTE to be 0. On big endian machines, this
3673 value should be negative. For a moment, undo this exception. */
3674 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3676 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3677 if (WORDS_BIG_ENDIAN)
3678 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3679 if (BYTES_BIG_ENDIAN)
3680 final_offset += difference % UNITS_PER_WORD;
3682 if (SUBREG_BYTE (op) == 0
3683 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3685 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3686 if (WORDS_BIG_ENDIAN)
3687 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3688 if (BYTES_BIG_ENDIAN)
3689 final_offset += difference % UNITS_PER_WORD;
3692 /* See whether resulting subreg will be paradoxical. */
3693 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3695 /* In nonparadoxical subregs we can't handle negative offsets. */
3696 if (final_offset < 0)
3697 return NULL_RTX;
3698 /* Bail out in case resulting subreg would be incorrect. */
3699 if (final_offset % GET_MODE_SIZE (outermode)
3700 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3701 return NULL_RTX;
3703 else
3705 int offset = 0;
3706 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3708 /* In paradoxical subreg, see if we are still looking on lower part.
3709 If so, our SUBREG_BYTE will be 0. */
3710 if (WORDS_BIG_ENDIAN)
3711 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3712 if (BYTES_BIG_ENDIAN)
3713 offset += difference % UNITS_PER_WORD;
3714 if (offset == final_offset)
3715 final_offset = 0;
3716 else
3717 return NULL_RTX;
3720 /* Recurse for further possible simplifications. */
3721 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
3722 final_offset);
3723 if (newx)
3724 return newx;
3725 if (validate_subreg (outermode, innermostmode,
3726 SUBREG_REG (op), final_offset))
3727 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3728 return NULL_RTX;
3731 /* SUBREG of a hard register => just change the register number
3732 and/or mode. If the hard register is not valid in that mode,
3733 suppress this simplification. If the hard register is the stack,
3734 frame, or argument pointer, leave this as a SUBREG. */
3736 if (REG_P (op)
3737 && REGNO (op) < FIRST_PSEUDO_REGISTER
3738 #ifdef CANNOT_CHANGE_MODE_CLASS
3739 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3740 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3741 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3742 #endif
3743 && ((reload_completed && !frame_pointer_needed)
3744 || (REGNO (op) != FRAME_POINTER_REGNUM
3745 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3746 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3747 #endif
3749 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3750 && REGNO (op) != ARG_POINTER_REGNUM
3751 #endif
3752 && REGNO (op) != STACK_POINTER_REGNUM
3753 && subreg_offset_representable_p (REGNO (op), innermode,
3754 byte, outermode))
3756 unsigned int regno = REGNO (op);
3757 unsigned int final_regno
3758 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
3760 /* ??? We do allow it if the current REG is not valid for
3761 its mode. This is a kludge to work around how float/complex
3762 arguments are passed on 32-bit SPARC and should be fixed. */
3763 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3764 || ! HARD_REGNO_MODE_OK (regno, innermode))
3766 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3768 /* Propagate original regno. We don't have any way to specify
3769 the offset inside original regno, so do so only for lowpart.
3770 The information is used only by alias analysis that can not
3771 grog partial register anyway. */
3773 if (subreg_lowpart_offset (outermode, innermode) == byte)
3774 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3775 return x;
3779 /* If we have a SUBREG of a register that we are replacing and we are
3780 replacing it with a MEM, make a new MEM and try replacing the
3781 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3782 or if we would be widening it. */
3784 if (MEM_P (op)
3785 && ! mode_dependent_address_p (XEXP (op, 0))
3786 /* Allow splitting of volatile memory references in case we don't
3787 have instruction to move the whole thing. */
3788 && (! MEM_VOLATILE_P (op)
3789 || ! have_insn_for (SET, innermode))
3790 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3791 return adjust_address_nv (op, outermode, byte);
3793 /* Handle complex values represented as CONCAT
3794 of real and imaginary part. */
3795 if (GET_CODE (op) == CONCAT)
3797 unsigned int inner_size, final_offset;
3798 rtx part, res;
3800 inner_size = GET_MODE_UNIT_SIZE (innermode);
3801 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
3802 final_offset = byte % inner_size;
3803 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
3804 return NULL_RTX;
3806 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3807 if (res)
3808 return res;
3809 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
3810 return gen_rtx_SUBREG (outermode, part, final_offset);
3811 return NULL_RTX;
3814 /* Optimize SUBREG truncations of zero and sign extended values. */
3815 if ((GET_CODE (op) == ZERO_EXTEND
3816 || GET_CODE (op) == SIGN_EXTEND)
3817 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3819 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3821 /* If we're requesting the lowpart of a zero or sign extension,
3822 there are three possibilities. If the outermode is the same
3823 as the origmode, we can omit both the extension and the subreg.
3824 If the outermode is not larger than the origmode, we can apply
3825 the truncation without the extension. Finally, if the outermode
3826 is larger than the origmode, but both are integer modes, we
3827 can just extend to the appropriate mode. */
3828 if (bitpos == 0)
3830 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3831 if (outermode == origmode)
3832 return XEXP (op, 0);
3833 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3834 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3835 subreg_lowpart_offset (outermode,
3836 origmode));
3837 if (SCALAR_INT_MODE_P (outermode))
3838 return simplify_gen_unary (GET_CODE (op), outermode,
3839 XEXP (op, 0), origmode);
3842 /* A SUBREG resulting from a zero extension may fold to zero if
3843 it extracts higher bits that the ZERO_EXTEND's source bits. */
3844 if (GET_CODE (op) == ZERO_EXTEND
3845 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3846 return CONST0_RTX (outermode);
3849 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
3850 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
3851 the outer subreg is effectively a truncation to the original mode. */
3852 if ((GET_CODE (op) == LSHIFTRT
3853 || GET_CODE (op) == ASHIFTRT)
3854 && SCALAR_INT_MODE_P (outermode)
3855 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
3856 to avoid the possibility that an outer LSHIFTRT shifts by more
3857 than the sign extension's sign_bit_copies and introduces zeros
3858 into the high bits of the result. */
3859 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
3860 && GET_CODE (XEXP (op, 1)) == CONST_INT
3861 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
3862 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3863 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3864 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3865 return simplify_gen_binary (ASHIFTRT, outermode,
3866 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3868 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
3869 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
3870 the outer subreg is effectively a truncation to the original mode. */
3871 if ((GET_CODE (op) == LSHIFTRT
3872 || GET_CODE (op) == ASHIFTRT)
3873 && SCALAR_INT_MODE_P (outermode)
3874 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
3875 && GET_CODE (XEXP (op, 1)) == CONST_INT
3876 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
3877 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3878 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3879 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3880 return simplify_gen_binary (LSHIFTRT, outermode,
3881 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3883 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
3884 to (ashift:QI (x:QI) C), where C is a suitable small constant and
3885 the outer subreg is effectively a truncation to the original mode. */
3886 if (GET_CODE (op) == ASHIFT
3887 && SCALAR_INT_MODE_P (outermode)
3888 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
3889 && GET_CODE (XEXP (op, 1)) == CONST_INT
3890 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
3891 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
3892 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3893 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3894 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3895 return simplify_gen_binary (ASHIFT, outermode,
3896 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3898 return NULL_RTX;
3901 /* Make a SUBREG operation or equivalent if it folds. */
3904 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3905 enum machine_mode innermode, unsigned int byte)
3907 rtx newx;
3909 newx = simplify_subreg (outermode, op, innermode, byte);
3910 if (newx)
3911 return newx;
3913 if (GET_CODE (op) == SUBREG
3914 || GET_CODE (op) == CONCAT
3915 || GET_MODE (op) == VOIDmode)
3916 return NULL_RTX;
3918 if (validate_subreg (outermode, innermode, op, byte))
3919 return gen_rtx_SUBREG (outermode, op, byte);
3921 return NULL_RTX;
3924 /* Simplify X, an rtx expression.
3926 Return the simplified expression or NULL if no simplifications
3927 were possible.
3929 This is the preferred entry point into the simplification routines;
3930 however, we still allow passes to call the more specific routines.
3932 Right now GCC has three (yes, three) major bodies of RTL simplification
3933 code that need to be unified.
3935 1. fold_rtx in cse.c. This code uses various CSE specific
3936 information to aid in RTL simplification.
3938 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3939 it uses combine specific information to aid in RTL
3940 simplification.
3942 3. The routines in this file.
3945 Long term we want to only have one body of simplification code; to
3946 get to that state I recommend the following steps:
3948 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3949 which are not pass dependent state into these routines.
3951 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3952 use this routine whenever possible.
3954 3. Allow for pass dependent state to be provided to these
3955 routines and add simplifications based on the pass dependent
3956 state. Remove code from cse.c & combine.c that becomes
3957 redundant/dead.
3959 It will take time, but ultimately the compiler will be easier to
3960 maintain and improve. It's totally silly that when we add a
3961 simplification that it needs to be added to 4 places (3 for RTL
3962 simplification and 1 for tree simplification. */
3965 simplify_rtx (rtx x)
3967 enum rtx_code code = GET_CODE (x);
3968 enum machine_mode mode = GET_MODE (x);
3970 switch (GET_RTX_CLASS (code))
3972 case RTX_UNARY:
3973 return simplify_unary_operation (code, mode,
3974 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3975 case RTX_COMM_ARITH:
3976 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3977 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3979 /* Fall through.... */
3981 case RTX_BIN_ARITH:
3982 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3984 case RTX_TERNARY:
3985 case RTX_BITFIELD_OPS:
3986 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3987 XEXP (x, 0), XEXP (x, 1),
3988 XEXP (x, 2));
3990 case RTX_COMPARE:
3991 case RTX_COMM_COMPARE:
3992 return simplify_relational_operation (code, mode,
3993 ((GET_MODE (XEXP (x, 0))
3994 != VOIDmode)
3995 ? GET_MODE (XEXP (x, 0))
3996 : GET_MODE (XEXP (x, 1))),
3997 XEXP (x, 0),
3998 XEXP (x, 1));
4000 case RTX_EXTRA:
4001 if (code == SUBREG)
4002 return simplify_gen_subreg (mode, SUBREG_REG (x),
4003 GET_MODE (SUBREG_REG (x)),
4004 SUBREG_BYTE (x));
4005 break;
4007 case RTX_OBJ:
4008 if (code == LO_SUM)
4010 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4011 if (GET_CODE (XEXP (x, 0)) == HIGH
4012 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4013 return XEXP (x, 1);
4015 break;
4017 default:
4018 break;
4020 return NULL;