* arm.md (stack_tie): New insn. Use an idiom that the alias code
[official-gcc.git] / gcc / simplify-rtx.c
blob95a2af09dc280de370e3a0aaaa45525a03ada558
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "function.h"
35 #include "expr.h"
36 #include "toplev.h"
37 #include "output.h"
38 #include "ggc.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
43 virtual regs here because the simplify_*_operation routines are called
44 by integrate.c, which is called before virtual register instantiation.
46 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
47 a header file so that their definitions can be shared with the
48 simplification routines in simplify-rtx.c. Until then, do not
49 change these macros without also changing the copy in simplify-rtx.c. */
51 #define FIXED_BASE_PLUS_P(X) \
52 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
53 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
54 || (X) == virtual_stack_vars_rtx \
55 || (X) == virtual_incoming_args_rtx \
56 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
57 && (XEXP (X, 0) == frame_pointer_rtx \
58 || XEXP (X, 0) == hard_frame_pointer_rtx \
59 || ((X) == arg_pointer_rtx \
60 && fixed_regs[ARG_POINTER_REGNUM]) \
61 || XEXP (X, 0) == virtual_stack_vars_rtx \
62 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
63 || GET_CODE (X) == ADDRESSOF)
65 /* Similar, but also allows reference to the stack pointer.
67 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
68 arg_pointer_rtx by itself is nonzero, because on at least one machine,
69 the i960, the arg pointer is zero when it is unused. */
71 #define NONZERO_BASE_PLUS_P(X) \
72 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
73 || (X) == virtual_stack_vars_rtx \
74 || (X) == virtual_incoming_args_rtx \
75 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
76 && (XEXP (X, 0) == frame_pointer_rtx \
77 || XEXP (X, 0) == hard_frame_pointer_rtx \
78 || ((X) == arg_pointer_rtx \
79 && fixed_regs[ARG_POINTER_REGNUM]) \
80 || XEXP (X, 0) == virtual_stack_vars_rtx \
81 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
82 || (X) == stack_pointer_rtx \
83 || (X) == virtual_stack_dynamic_rtx \
84 || (X) == virtual_outgoing_args_rtx \
85 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
86 && (XEXP (X, 0) == stack_pointer_rtx \
87 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
88 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
89 || GET_CODE (X) == ADDRESSOF)
91 /* Much code operates on (low, high) pairs; the low value is an
92 unsigned wide int, the high value a signed wide int. We
93 occasionally need to sign extend from low to high as if low were a
94 signed wide int. */
95 #define HWI_SIGN_EXTEND(low) \
96 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
98 static rtx neg_const_int PARAMS ((enum machine_mode, rtx));
99 static int simplify_plus_minus_op_data_cmp PARAMS ((const void *,
100 const void *));
101 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
102 enum machine_mode, rtx,
103 rtx, int));
105 /* Negate a CONST_INT rtx, truncating (because a conversion from a
106 maximally negative number can overflow). */
107 static rtx
108 neg_const_int (mode, i)
109 enum machine_mode mode;
110 rtx i;
112 return gen_int_mode (- INTVAL (i), mode);
116 /* Make a binary operation by properly ordering the operands and
117 seeing if the expression folds. */
120 simplify_gen_binary (code, mode, op0, op1)
121 enum rtx_code code;
122 enum machine_mode mode;
123 rtx op0, op1;
125 rtx tem;
127 /* Put complex operands first and constants second if commutative. */
128 if (GET_RTX_CLASS (code) == 'c'
129 && swap_commutative_operands_p (op0, op1))
130 tem = op0, op0 = op1, op1 = tem;
132 /* If this simplifies, do it. */
133 tem = simplify_binary_operation (code, mode, op0, op1);
134 if (tem)
135 return tem;
137 /* Handle addition and subtraction specially. Otherwise, just form
138 the operation. */
140 if (code == PLUS || code == MINUS)
142 tem = simplify_plus_minus (code, mode, op0, op1, 1);
143 if (tem)
144 return tem;
147 return gen_rtx_fmt_ee (code, mode, op0, op1);
150 /* If X is a MEM referencing the constant pool, return the real value.
151 Otherwise return X. */
153 avoid_constant_pool_reference (x)
154 rtx x;
156 rtx c, addr;
157 enum machine_mode cmode;
159 if (GET_CODE (x) != MEM)
160 return x;
161 addr = XEXP (x, 0);
163 if (GET_CODE (addr) != SYMBOL_REF
164 || ! CONSTANT_POOL_ADDRESS_P (addr))
165 return x;
167 c = get_pool_constant (addr);
168 cmode = get_pool_mode (addr);
170 /* If we're accessing the constant in a different mode than it was
171 originally stored, attempt to fix that up via subreg simplifications.
172 If that fails we have no choice but to return the original memory. */
173 if (cmode != GET_MODE (x))
175 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
176 return c ? c : x;
179 return c;
182 /* Make a unary operation by first seeing if it folds and otherwise making
183 the specified operation. */
186 simplify_gen_unary (code, mode, op, op_mode)
187 enum rtx_code code;
188 enum machine_mode mode;
189 rtx op;
190 enum machine_mode op_mode;
192 rtx tem;
194 /* If this simplifies, use it. */
195 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
196 return tem;
198 return gen_rtx_fmt_e (code, mode, op);
201 /* Likewise for ternary operations. */
204 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
205 enum rtx_code code;
206 enum machine_mode mode, op0_mode;
207 rtx op0, op1, op2;
209 rtx tem;
211 /* If this simplifies, use it. */
212 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
213 op0, op1, op2)))
214 return tem;
216 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
219 /* Likewise, for relational operations.
220 CMP_MODE specifies mode comparison is done in.
224 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
225 enum rtx_code code;
226 enum machine_mode mode;
227 enum machine_mode cmp_mode;
228 rtx op0, op1;
230 rtx tem;
232 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
233 return tem;
235 /* For the following tests, ensure const0_rtx is op1. */
236 if (op0 == const0_rtx && swap_commutative_operands_p (op0, op1))
237 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
239 /* If op0 is a compare, extract the comparison arguments from it. */
240 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
241 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
243 /* If op0 is a comparison, extract the comparison arguments form it. */
244 if (code == NE && op1 == const0_rtx
245 && GET_RTX_CLASS (GET_CODE (op0)) == '<')
246 return op0;
247 else if (code == EQ && op1 == const0_rtx)
249 /* The following tests GET_RTX_CLASS (GET_CODE (op0)) == '<'. */
250 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
251 if (new != UNKNOWN)
253 code = new;
254 mode = cmp_mode;
255 op1 = XEXP (op0, 1);
256 op0 = XEXP (op0, 0);
260 /* Put complex operands first and constants second. */
261 if (swap_commutative_operands_p (op0, op1))
262 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
264 return gen_rtx_fmt_ee (code, mode, op0, op1);
267 /* Replace all occurrences of OLD in X with NEW and try to simplify the
268 resulting RTX. Return a new RTX which is as simplified as possible. */
271 simplify_replace_rtx (x, old, new)
272 rtx x;
273 rtx old;
274 rtx new;
276 enum rtx_code code = GET_CODE (x);
277 enum machine_mode mode = GET_MODE (x);
279 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
280 to build a new expression substituting recursively. If we can't do
281 anything, return our input. */
283 if (x == old)
284 return new;
286 switch (GET_RTX_CLASS (code))
288 case '1':
290 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
291 rtx op = (XEXP (x, 0) == old
292 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
294 return simplify_gen_unary (code, mode, op, op_mode);
297 case '2':
298 case 'c':
299 return
300 simplify_gen_binary (code, mode,
301 simplify_replace_rtx (XEXP (x, 0), old, new),
302 simplify_replace_rtx (XEXP (x, 1), old, new));
303 case '<':
305 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
306 ? GET_MODE (XEXP (x, 0))
307 : GET_MODE (XEXP (x, 1)));
308 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
309 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
311 return
312 simplify_gen_relational (code, mode,
313 (op_mode != VOIDmode
314 ? op_mode
315 : GET_MODE (op0) != VOIDmode
316 ? GET_MODE (op0)
317 : GET_MODE (op1)),
318 op0, op1);
321 case '3':
322 case 'b':
324 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
325 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
327 return
328 simplify_gen_ternary (code, mode,
329 (op_mode != VOIDmode
330 ? op_mode
331 : GET_MODE (op0)),
332 op0,
333 simplify_replace_rtx (XEXP (x, 1), old, new),
334 simplify_replace_rtx (XEXP (x, 2), old, new));
337 case 'x':
338 /* The only case we try to handle is a SUBREG. */
339 if (code == SUBREG)
341 rtx exp;
342 exp = simplify_gen_subreg (GET_MODE (x),
343 simplify_replace_rtx (SUBREG_REG (x),
344 old, new),
345 GET_MODE (SUBREG_REG (x)),
346 SUBREG_BYTE (x));
347 if (exp)
348 x = exp;
350 return x;
352 case 'o':
353 if (code == MEM)
354 return replace_equiv_address_nv (x,
355 simplify_replace_rtx (XEXP (x, 0),
356 old, new));
358 if (REG_P (x) && REG_P (old) && REGNO (x) == REGNO (old))
359 return new;
361 return x;
363 default:
364 return x;
366 return x;
369 /* Try to simplify a unary operation CODE whose output mode is to be
370 MODE with input operand OP whose mode was originally OP_MODE.
371 Return zero if no simplification can be made. */
373 simplify_unary_operation (code, mode, op, op_mode)
374 enum rtx_code code;
375 enum machine_mode mode;
376 rtx op;
377 enum machine_mode op_mode;
379 unsigned int width = GET_MODE_BITSIZE (mode);
380 rtx trueop = avoid_constant_pool_reference (op);
382 /* The order of these tests is critical so that, for example, we don't
383 check the wrong mode (input vs. output) for a conversion operation,
384 such as FIX. At some point, this should be simplified. */
386 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
387 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
389 HOST_WIDE_INT hv, lv;
390 REAL_VALUE_TYPE d;
392 if (GET_CODE (trueop) == CONST_INT)
393 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
394 else
395 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
397 REAL_VALUE_FROM_INT (d, lv, hv, mode);
398 d = real_value_truncate (mode, d);
399 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
401 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
402 && (GET_CODE (trueop) == CONST_DOUBLE
403 || GET_CODE (trueop) == CONST_INT))
405 HOST_WIDE_INT hv, lv;
406 REAL_VALUE_TYPE d;
408 if (GET_CODE (trueop) == CONST_INT)
409 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
410 else
411 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
413 if (op_mode == VOIDmode)
415 /* We don't know how to interpret negative-looking numbers in
416 this case, so don't try to fold those. */
417 if (hv < 0)
418 return 0;
420 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
422 else
423 hv = 0, lv &= GET_MODE_MASK (op_mode);
425 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
426 d = real_value_truncate (mode, d);
427 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
430 if (GET_CODE (trueop) == CONST_INT
431 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
433 HOST_WIDE_INT arg0 = INTVAL (trueop);
434 HOST_WIDE_INT val;
436 switch (code)
438 case NOT:
439 val = ~ arg0;
440 break;
442 case NEG:
443 val = - arg0;
444 break;
446 case ABS:
447 val = (arg0 >= 0 ? arg0 : - arg0);
448 break;
450 case FFS:
451 /* Don't use ffs here. Instead, get low order bit and then its
452 number. If arg0 is zero, this will return 0, as desired. */
453 arg0 &= GET_MODE_MASK (mode);
454 val = exact_log2 (arg0 & (- arg0)) + 1;
455 break;
457 case TRUNCATE:
458 val = arg0;
459 break;
461 case ZERO_EXTEND:
462 /* When zero-extending a CONST_INT, we need to know its
463 original mode. */
464 if (op_mode == VOIDmode)
465 abort ();
466 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
468 /* If we were really extending the mode,
469 we would have to distinguish between zero-extension
470 and sign-extension. */
471 if (width != GET_MODE_BITSIZE (op_mode))
472 abort ();
473 val = arg0;
475 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
476 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
477 else
478 return 0;
479 break;
481 case SIGN_EXTEND:
482 if (op_mode == VOIDmode)
483 op_mode = mode;
484 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
486 /* If we were really extending the mode,
487 we would have to distinguish between zero-extension
488 and sign-extension. */
489 if (width != GET_MODE_BITSIZE (op_mode))
490 abort ();
491 val = arg0;
493 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
496 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
497 if (val
498 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
499 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
501 else
502 return 0;
503 break;
505 case SQRT:
506 case FLOAT_EXTEND:
507 case FLOAT_TRUNCATE:
508 case SS_TRUNCATE:
509 case US_TRUNCATE:
510 return 0;
512 default:
513 abort ();
516 val = trunc_int_for_mode (val, mode);
518 return GEN_INT (val);
521 /* We can do some operations on integer CONST_DOUBLEs. Also allow
522 for a DImode operation on a CONST_INT. */
523 else if (GET_MODE (trueop) == VOIDmode
524 && width <= HOST_BITS_PER_WIDE_INT * 2
525 && (GET_CODE (trueop) == CONST_DOUBLE
526 || GET_CODE (trueop) == CONST_INT))
528 unsigned HOST_WIDE_INT l1, lv;
529 HOST_WIDE_INT h1, hv;
531 if (GET_CODE (trueop) == CONST_DOUBLE)
532 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
533 else
534 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
536 switch (code)
538 case NOT:
539 lv = ~ l1;
540 hv = ~ h1;
541 break;
543 case NEG:
544 neg_double (l1, h1, &lv, &hv);
545 break;
547 case ABS:
548 if (h1 < 0)
549 neg_double (l1, h1, &lv, &hv);
550 else
551 lv = l1, hv = h1;
552 break;
554 case FFS:
555 hv = 0;
556 if (l1 == 0)
557 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
558 else
559 lv = exact_log2 (l1 & (-l1)) + 1;
560 break;
562 case TRUNCATE:
563 /* This is just a change-of-mode, so do nothing. */
564 lv = l1, hv = h1;
565 break;
567 case ZERO_EXTEND:
568 if (op_mode == VOIDmode)
569 abort ();
571 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
572 return 0;
574 hv = 0;
575 lv = l1 & GET_MODE_MASK (op_mode);
576 break;
578 case SIGN_EXTEND:
579 if (op_mode == VOIDmode
580 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
581 return 0;
582 else
584 lv = l1 & GET_MODE_MASK (op_mode);
585 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
586 && (lv & ((HOST_WIDE_INT) 1
587 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
588 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
590 hv = HWI_SIGN_EXTEND (lv);
592 break;
594 case SQRT:
595 return 0;
597 default:
598 return 0;
601 return immed_double_const (lv, hv, mode);
604 else if (GET_CODE (trueop) == CONST_DOUBLE
605 && GET_MODE_CLASS (mode) == MODE_FLOAT)
607 REAL_VALUE_TYPE d;
608 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
610 switch (code)
612 case SQRT:
613 /* We don't attempt to optimize this. */
614 return 0;
616 case ABS: d = REAL_VALUE_ABS (d); break;
617 case NEG: d = REAL_VALUE_NEGATE (d); break;
618 case FLOAT_TRUNCATE: d = real_value_truncate (mode, d); break;
619 case FLOAT_EXTEND: /* All this does is change the mode. */ break;
620 case FIX: d = REAL_VALUE_RNDZINT (d); break;
621 case UNSIGNED_FIX: d = REAL_VALUE_UNSIGNED_RNDZINT (d); break;
622 default:
623 abort ();
625 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
628 else if (GET_CODE (trueop) == CONST_DOUBLE
629 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
630 && GET_MODE_CLASS (mode) == MODE_INT
631 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
633 HOST_WIDE_INT i;
634 REAL_VALUE_TYPE d;
635 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
636 switch (code)
638 case FIX: i = REAL_VALUE_FIX (d); break;
639 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
640 default:
641 abort ();
643 return gen_int_mode (i, mode);
646 /* This was formerly used only for non-IEEE float.
647 eggert@twinsun.com says it is safe for IEEE also. */
648 else
650 enum rtx_code reversed;
651 /* There are some simplifications we can do even if the operands
652 aren't constant. */
653 switch (code)
655 case NOT:
656 /* (not (not X)) == X. */
657 if (GET_CODE (op) == NOT)
658 return XEXP (op, 0);
660 /* (not (eq X Y)) == (ne X Y), etc. */
661 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
662 && ((reversed = reversed_comparison_code (op, NULL_RTX))
663 != UNKNOWN))
664 return gen_rtx_fmt_ee (reversed,
665 op_mode, XEXP (op, 0), XEXP (op, 1));
666 break;
668 case NEG:
669 /* (neg (neg X)) == X. */
670 if (GET_CODE (op) == NEG)
671 return XEXP (op, 0);
672 break;
674 case SIGN_EXTEND:
675 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
676 becomes just the MINUS if its mode is MODE. This allows
677 folding switch statements on machines using casesi (such as
678 the VAX). */
679 if (GET_CODE (op) == TRUNCATE
680 && GET_MODE (XEXP (op, 0)) == mode
681 && GET_CODE (XEXP (op, 0)) == MINUS
682 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
683 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
684 return XEXP (op, 0);
686 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
687 if (! POINTERS_EXTEND_UNSIGNED
688 && mode == Pmode && GET_MODE (op) == ptr_mode
689 && (CONSTANT_P (op)
690 || (GET_CODE (op) == SUBREG
691 && GET_CODE (SUBREG_REG (op)) == REG
692 && REG_POINTER (SUBREG_REG (op))
693 && GET_MODE (SUBREG_REG (op)) == Pmode)))
694 return convert_memory_address (Pmode, op);
695 #endif
696 break;
698 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
699 case ZERO_EXTEND:
700 if (POINTERS_EXTEND_UNSIGNED > 0
701 && mode == Pmode && GET_MODE (op) == ptr_mode
702 && (CONSTANT_P (op)
703 || (GET_CODE (op) == SUBREG
704 && GET_CODE (SUBREG_REG (op)) == REG
705 && REG_POINTER (SUBREG_REG (op))
706 && GET_MODE (SUBREG_REG (op)) == Pmode)))
707 return convert_memory_address (Pmode, op);
708 break;
709 #endif
711 default:
712 break;
715 return 0;
719 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
720 and OP1. Return 0 if no simplification is possible.
722 Don't use this for relational operations such as EQ or LT.
723 Use simplify_relational_operation instead. */
725 simplify_binary_operation (code, mode, op0, op1)
726 enum rtx_code code;
727 enum machine_mode mode;
728 rtx op0, op1;
730 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
731 HOST_WIDE_INT val;
732 unsigned int width = GET_MODE_BITSIZE (mode);
733 rtx tem;
734 rtx trueop0 = avoid_constant_pool_reference (op0);
735 rtx trueop1 = avoid_constant_pool_reference (op1);
737 /* Relational operations don't work here. We must know the mode
738 of the operands in order to do the comparison correctly.
739 Assuming a full word can give incorrect results.
740 Consider comparing 128 with -128 in QImode. */
742 if (GET_RTX_CLASS (code) == '<')
743 abort ();
745 /* Make sure the constant is second. */
746 if (GET_RTX_CLASS (code) == 'c'
747 && swap_commutative_operands_p (trueop0, trueop1))
749 tem = op0, op0 = op1, op1 = tem;
750 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
753 if (GET_MODE_CLASS (mode) == MODE_FLOAT
754 && GET_CODE (trueop0) == CONST_DOUBLE
755 && GET_CODE (trueop1) == CONST_DOUBLE
756 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
758 REAL_VALUE_TYPE f0, f1, value;
760 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
761 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
762 f0 = real_value_truncate (mode, f0);
763 f1 = real_value_truncate (mode, f1);
765 if (code == DIV
766 && !MODE_HAS_INFINITIES (mode)
767 && REAL_VALUES_EQUAL (f1, dconst0))
768 return 0;
770 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
772 value = real_value_truncate (mode, value);
773 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
776 /* We can fold some multi-word operations. */
777 if (GET_MODE_CLASS (mode) == MODE_INT
778 && width == HOST_BITS_PER_WIDE_INT * 2
779 && (GET_CODE (trueop0) == CONST_DOUBLE
780 || GET_CODE (trueop0) == CONST_INT)
781 && (GET_CODE (trueop1) == CONST_DOUBLE
782 || GET_CODE (trueop1) == CONST_INT))
784 unsigned HOST_WIDE_INT l1, l2, lv;
785 HOST_WIDE_INT h1, h2, hv;
787 if (GET_CODE (trueop0) == CONST_DOUBLE)
788 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
789 else
790 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
792 if (GET_CODE (trueop1) == CONST_DOUBLE)
793 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
794 else
795 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
797 switch (code)
799 case MINUS:
800 /* A - B == A + (-B). */
801 neg_double (l2, h2, &lv, &hv);
802 l2 = lv, h2 = hv;
804 /* .. fall through ... */
806 case PLUS:
807 add_double (l1, h1, l2, h2, &lv, &hv);
808 break;
810 case MULT:
811 mul_double (l1, h1, l2, h2, &lv, &hv);
812 break;
814 case DIV: case MOD: case UDIV: case UMOD:
815 /* We'd need to include tree.h to do this and it doesn't seem worth
816 it. */
817 return 0;
819 case AND:
820 lv = l1 & l2, hv = h1 & h2;
821 break;
823 case IOR:
824 lv = l1 | l2, hv = h1 | h2;
825 break;
827 case XOR:
828 lv = l1 ^ l2, hv = h1 ^ h2;
829 break;
831 case SMIN:
832 if (h1 < h2
833 || (h1 == h2
834 && ((unsigned HOST_WIDE_INT) l1
835 < (unsigned HOST_WIDE_INT) l2)))
836 lv = l1, hv = h1;
837 else
838 lv = l2, hv = h2;
839 break;
841 case SMAX:
842 if (h1 > h2
843 || (h1 == h2
844 && ((unsigned HOST_WIDE_INT) l1
845 > (unsigned HOST_WIDE_INT) l2)))
846 lv = l1, hv = h1;
847 else
848 lv = l2, hv = h2;
849 break;
851 case UMIN:
852 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
853 || (h1 == h2
854 && ((unsigned HOST_WIDE_INT) l1
855 < (unsigned HOST_WIDE_INT) l2)))
856 lv = l1, hv = h1;
857 else
858 lv = l2, hv = h2;
859 break;
861 case UMAX:
862 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
863 || (h1 == h2
864 && ((unsigned HOST_WIDE_INT) l1
865 > (unsigned HOST_WIDE_INT) l2)))
866 lv = l1, hv = h1;
867 else
868 lv = l2, hv = h2;
869 break;
871 case LSHIFTRT: case ASHIFTRT:
872 case ASHIFT:
873 case ROTATE: case ROTATERT:
874 #ifdef SHIFT_COUNT_TRUNCATED
875 if (SHIFT_COUNT_TRUNCATED)
876 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
877 #endif
879 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
880 return 0;
882 if (code == LSHIFTRT || code == ASHIFTRT)
883 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
884 code == ASHIFTRT);
885 else if (code == ASHIFT)
886 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
887 else if (code == ROTATE)
888 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
889 else /* code == ROTATERT */
890 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
891 break;
893 default:
894 return 0;
897 return immed_double_const (lv, hv, mode);
900 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
901 || width > HOST_BITS_PER_WIDE_INT || width == 0)
903 /* Even if we can't compute a constant result,
904 there are some cases worth simplifying. */
906 switch (code)
908 case PLUS:
909 /* Maybe simplify x + 0 to x. The two expressions are equivalent
910 when x is NaN, infinite, or finite and non-zero. They aren't
911 when x is -0 and the rounding mode is not towards -infinity,
912 since (-0) + 0 is then 0. */
913 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
914 return op0;
916 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
917 transformations are safe even for IEEE. */
918 if (GET_CODE (op0) == NEG)
919 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
920 else if (GET_CODE (op1) == NEG)
921 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
923 /* (~a) + 1 -> -a */
924 if (INTEGRAL_MODE_P (mode)
925 && GET_CODE (op0) == NOT
926 && trueop1 == const1_rtx)
927 return gen_rtx_NEG (mode, XEXP (op0, 0));
929 /* Handle both-operands-constant cases. We can only add
930 CONST_INTs to constants since the sum of relocatable symbols
931 can't be handled by most assemblers. Don't add CONST_INT
932 to CONST_INT since overflow won't be computed properly if wider
933 than HOST_BITS_PER_WIDE_INT. */
935 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
936 && GET_CODE (op1) == CONST_INT)
937 return plus_constant (op0, INTVAL (op1));
938 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
939 && GET_CODE (op0) == CONST_INT)
940 return plus_constant (op1, INTVAL (op0));
942 /* See if this is something like X * C - X or vice versa or
943 if the multiplication is written as a shift. If so, we can
944 distribute and make a new multiply, shift, or maybe just
945 have X (if C is 2 in the example above). But don't make
946 real multiply if we didn't have one before. */
948 if (! FLOAT_MODE_P (mode))
950 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
951 rtx lhs = op0, rhs = op1;
952 int had_mult = 0;
954 if (GET_CODE (lhs) == NEG)
955 coeff0 = -1, lhs = XEXP (lhs, 0);
956 else if (GET_CODE (lhs) == MULT
957 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
959 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
960 had_mult = 1;
962 else if (GET_CODE (lhs) == ASHIFT
963 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
964 && INTVAL (XEXP (lhs, 1)) >= 0
965 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
967 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
968 lhs = XEXP (lhs, 0);
971 if (GET_CODE (rhs) == NEG)
972 coeff1 = -1, rhs = XEXP (rhs, 0);
973 else if (GET_CODE (rhs) == MULT
974 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
976 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
977 had_mult = 1;
979 else if (GET_CODE (rhs) == ASHIFT
980 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
981 && INTVAL (XEXP (rhs, 1)) >= 0
982 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
984 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
985 rhs = XEXP (rhs, 0);
988 if (rtx_equal_p (lhs, rhs))
990 tem = simplify_gen_binary (MULT, mode, lhs,
991 GEN_INT (coeff0 + coeff1));
992 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
996 /* If one of the operands is a PLUS or a MINUS, see if we can
997 simplify this by the associative law.
998 Don't use the associative law for floating point.
999 The inaccuracy makes it nonassociative,
1000 and subtle programs can break if operations are associated. */
1002 if (INTEGRAL_MODE_P (mode)
1003 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1004 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1005 || (GET_CODE (op0) == CONST
1006 && GET_CODE (XEXP (op0, 0)) == PLUS)
1007 || (GET_CODE (op1) == CONST
1008 && GET_CODE (XEXP (op1, 0)) == PLUS))
1009 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1010 return tem;
1011 break;
1013 case COMPARE:
1014 #ifdef HAVE_cc0
1015 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1016 using cc0, in which case we want to leave it as a COMPARE
1017 so we can distinguish it from a register-register-copy.
1019 In IEEE floating point, x-0 is not the same as x. */
1021 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1022 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1023 && trueop1 == CONST0_RTX (mode))
1024 return op0;
1025 #endif
1027 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1028 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1029 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1030 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1032 rtx xop00 = XEXP (op0, 0);
1033 rtx xop10 = XEXP (op1, 0);
1035 #ifdef HAVE_cc0
1036 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1037 #else
1038 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1039 && GET_MODE (xop00) == GET_MODE (xop10)
1040 && REGNO (xop00) == REGNO (xop10)
1041 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1042 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1043 #endif
1044 return xop00;
1046 break;
1048 case MINUS:
1049 /* We can't assume x-x is 0 even with non-IEEE floating point,
1050 but since it is zero except in very strange circumstances, we
1051 will treat it as zero with -funsafe-math-optimizations. */
1052 if (rtx_equal_p (trueop0, trueop1)
1053 && ! side_effects_p (op0)
1054 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1055 return CONST0_RTX (mode);
1057 /* Change subtraction from zero into negation. (0 - x) is the
1058 same as -x when x is NaN, infinite, or finite and non-zero.
1059 But if the mode has signed zeros, and does not round towards
1060 -infinity, then 0 - 0 is 0, not -0. */
1061 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1062 return gen_rtx_NEG (mode, op1);
1064 /* (-1 - a) is ~a. */
1065 if (trueop0 == constm1_rtx)
1066 return gen_rtx_NOT (mode, op1);
1068 /* Subtracting 0 has no effect unless the mode has signed zeros
1069 and supports rounding towards -infinity. In such a case,
1070 0 - 0 is -0. */
1071 if (!(HONOR_SIGNED_ZEROS (mode)
1072 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1073 && trueop1 == CONST0_RTX (mode))
1074 return op0;
1076 /* See if this is something like X * C - X or vice versa or
1077 if the multiplication is written as a shift. If so, we can
1078 distribute and make a new multiply, shift, or maybe just
1079 have X (if C is 2 in the example above). But don't make
1080 real multiply if we didn't have one before. */
1082 if (! FLOAT_MODE_P (mode))
1084 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1085 rtx lhs = op0, rhs = op1;
1086 int had_mult = 0;
1088 if (GET_CODE (lhs) == NEG)
1089 coeff0 = -1, lhs = XEXP (lhs, 0);
1090 else if (GET_CODE (lhs) == MULT
1091 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1093 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1094 had_mult = 1;
1096 else if (GET_CODE (lhs) == ASHIFT
1097 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1098 && INTVAL (XEXP (lhs, 1)) >= 0
1099 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1101 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1102 lhs = XEXP (lhs, 0);
1105 if (GET_CODE (rhs) == NEG)
1106 coeff1 = - 1, rhs = XEXP (rhs, 0);
1107 else if (GET_CODE (rhs) == MULT
1108 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1110 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1111 had_mult = 1;
1113 else if (GET_CODE (rhs) == ASHIFT
1114 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1115 && INTVAL (XEXP (rhs, 1)) >= 0
1116 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1118 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1119 rhs = XEXP (rhs, 0);
1122 if (rtx_equal_p (lhs, rhs))
1124 tem = simplify_gen_binary (MULT, mode, lhs,
1125 GEN_INT (coeff0 - coeff1));
1126 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1130 /* (a - (-b)) -> (a + b). True even for IEEE. */
1131 if (GET_CODE (op1) == NEG)
1132 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1134 /* If one of the operands is a PLUS or a MINUS, see if we can
1135 simplify this by the associative law.
1136 Don't use the associative law for floating point.
1137 The inaccuracy makes it nonassociative,
1138 and subtle programs can break if operations are associated. */
1140 if (INTEGRAL_MODE_P (mode)
1141 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1142 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1143 || (GET_CODE (op0) == CONST
1144 && GET_CODE (XEXP (op0, 0)) == PLUS)
1145 || (GET_CODE (op1) == CONST
1146 && GET_CODE (XEXP (op1, 0)) == PLUS))
1147 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1148 return tem;
1150 /* Don't let a relocatable value get a negative coeff. */
1151 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1152 return simplify_gen_binary (PLUS, mode,
1153 op0,
1154 neg_const_int (mode, op1));
1156 /* (x - (x & y)) -> (x & ~y) */
1157 if (GET_CODE (op1) == AND)
1159 if (rtx_equal_p (op0, XEXP (op1, 0)))
1160 return simplify_gen_binary (AND, mode, op0,
1161 gen_rtx_NOT (mode, XEXP (op1, 1)));
1162 if (rtx_equal_p (op0, XEXP (op1, 1)))
1163 return simplify_gen_binary (AND, mode, op0,
1164 gen_rtx_NOT (mode, XEXP (op1, 0)));
1166 break;
1168 case MULT:
1169 if (trueop1 == constm1_rtx)
1171 tem = simplify_unary_operation (NEG, mode, op0, mode);
1173 return tem ? tem : gen_rtx_NEG (mode, op0);
1176 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1177 x is NaN, since x * 0 is then also NaN. Nor is it valid
1178 when the mode has signed zeros, since multiplying a negative
1179 number by 0 will give -0, not 0. */
1180 if (!HONOR_NANS (mode)
1181 && !HONOR_SIGNED_ZEROS (mode)
1182 && trueop1 == CONST0_RTX (mode)
1183 && ! side_effects_p (op0))
1184 return op1;
1186 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1187 However, ANSI says we can drop signals,
1188 so we can do this anyway. */
1189 if (trueop1 == CONST1_RTX (mode))
1190 return op0;
1192 /* Convert multiply by constant power of two into shift unless
1193 we are still generating RTL. This test is a kludge. */
1194 if (GET_CODE (trueop1) == CONST_INT
1195 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1196 /* If the mode is larger than the host word size, and the
1197 uppermost bit is set, then this isn't a power of two due
1198 to implicit sign extension. */
1199 && (width <= HOST_BITS_PER_WIDE_INT
1200 || val != HOST_BITS_PER_WIDE_INT - 1)
1201 && ! rtx_equal_function_value_matters)
1202 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1204 /* x*2 is x+x and x*(-1) is -x */
1205 if (GET_CODE (trueop1) == CONST_DOUBLE
1206 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1207 && GET_MODE (op0) == mode)
1209 REAL_VALUE_TYPE d;
1210 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1212 if (REAL_VALUES_EQUAL (d, dconst2))
1213 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1215 if (REAL_VALUES_EQUAL (d, dconstm1))
1216 return gen_rtx_NEG (mode, op0);
1218 break;
1220 case IOR:
1221 if (trueop1 == const0_rtx)
1222 return op0;
1223 if (GET_CODE (trueop1) == CONST_INT
1224 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1225 == GET_MODE_MASK (mode)))
1226 return op1;
1227 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1228 return op0;
1229 /* A | (~A) -> -1 */
1230 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1231 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1232 && ! side_effects_p (op0)
1233 && GET_MODE_CLASS (mode) != MODE_CC)
1234 return constm1_rtx;
1235 break;
1237 case XOR:
1238 if (trueop1 == const0_rtx)
1239 return op0;
1240 if (GET_CODE (trueop1) == CONST_INT
1241 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1242 == GET_MODE_MASK (mode)))
1243 return gen_rtx_NOT (mode, op0);
1244 if (trueop0 == trueop1 && ! side_effects_p (op0)
1245 && GET_MODE_CLASS (mode) != MODE_CC)
1246 return const0_rtx;
1247 break;
1249 case AND:
1250 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1251 return const0_rtx;
1252 if (GET_CODE (trueop1) == CONST_INT
1253 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1254 == GET_MODE_MASK (mode)))
1255 return op0;
1256 if (trueop0 == trueop1 && ! side_effects_p (op0)
1257 && GET_MODE_CLASS (mode) != MODE_CC)
1258 return op0;
1259 /* A & (~A) -> 0 */
1260 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1261 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1262 && ! side_effects_p (op0)
1263 && GET_MODE_CLASS (mode) != MODE_CC)
1264 return const0_rtx;
1265 break;
1267 case UDIV:
1268 /* Convert divide by power of two into shift (divide by 1 handled
1269 below). */
1270 if (GET_CODE (trueop1) == CONST_INT
1271 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1272 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1274 /* ... fall through ... */
1276 case DIV:
1277 if (trueop1 == CONST1_RTX (mode))
1279 /* On some platforms DIV uses narrower mode than its
1280 operands. */
1281 rtx x = gen_lowpart_common (mode, op0);
1282 if (x)
1283 return x;
1284 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1285 return gen_lowpart_SUBREG (mode, op0);
1286 else
1287 return op0;
1290 /* Maybe change 0 / x to 0. This transformation isn't safe for
1291 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1292 Nor is it safe for modes with signed zeros, since dividing
1293 0 by a negative number gives -0, not 0. */
1294 if (!HONOR_NANS (mode)
1295 && !HONOR_SIGNED_ZEROS (mode)
1296 && trueop0 == CONST0_RTX (mode)
1297 && ! side_effects_p (op1))
1298 return op0;
1300 /* Change division by a constant into multiplication. Only do
1301 this with -funsafe-math-optimizations. */
1302 else if (GET_CODE (trueop1) == CONST_DOUBLE
1303 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1304 && trueop1 != CONST0_RTX (mode)
1305 && flag_unsafe_math_optimizations)
1307 REAL_VALUE_TYPE d;
1308 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1310 if (! REAL_VALUES_EQUAL (d, dconst0))
1312 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1313 return gen_rtx_MULT (mode, op0,
1314 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1317 break;
1319 case UMOD:
1320 /* Handle modulus by power of two (mod with 1 handled below). */
1321 if (GET_CODE (trueop1) == CONST_INT
1322 && exact_log2 (INTVAL (trueop1)) > 0)
1323 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1325 /* ... fall through ... */
1327 case MOD:
1328 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1329 && ! side_effects_p (op0) && ! side_effects_p (op1))
1330 return const0_rtx;
1331 break;
1333 case ROTATERT:
1334 case ROTATE:
1335 /* Rotating ~0 always results in ~0. */
1336 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1337 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1338 && ! side_effects_p (op1))
1339 return op0;
1341 /* ... fall through ... */
1343 case ASHIFT:
1344 case ASHIFTRT:
1345 case LSHIFTRT:
1346 if (trueop1 == const0_rtx)
1347 return op0;
1348 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1349 return op0;
1350 break;
1352 case SMIN:
1353 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1354 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1355 && ! side_effects_p (op0))
1356 return op1;
1357 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1358 return op0;
1359 break;
1361 case SMAX:
1362 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1363 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1364 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1365 && ! side_effects_p (op0))
1366 return op1;
1367 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1368 return op0;
1369 break;
1371 case UMIN:
1372 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1373 return op1;
1374 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1375 return op0;
1376 break;
1378 case UMAX:
1379 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1380 return op1;
1381 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1382 return op0;
1383 break;
1385 case SS_PLUS:
1386 case US_PLUS:
1387 case SS_MINUS:
1388 case US_MINUS:
1389 /* ??? There are simplifications that can be done. */
1390 return 0;
1392 default:
1393 abort ();
1396 return 0;
1399 /* Get the integer argument values in two forms:
1400 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1402 arg0 = INTVAL (trueop0);
1403 arg1 = INTVAL (trueop1);
1405 if (width < HOST_BITS_PER_WIDE_INT)
1407 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1408 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1410 arg0s = arg0;
1411 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1412 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1414 arg1s = arg1;
1415 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1416 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1418 else
1420 arg0s = arg0;
1421 arg1s = arg1;
1424 /* Compute the value of the arithmetic. */
1426 switch (code)
1428 case PLUS:
1429 val = arg0s + arg1s;
1430 break;
1432 case MINUS:
1433 val = arg0s - arg1s;
1434 break;
1436 case MULT:
1437 val = arg0s * arg1s;
1438 break;
1440 case DIV:
1441 if (arg1s == 0
1442 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1443 && arg1s == -1))
1444 return 0;
1445 val = arg0s / arg1s;
1446 break;
1448 case MOD:
1449 if (arg1s == 0
1450 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1451 && arg1s == -1))
1452 return 0;
1453 val = arg0s % arg1s;
1454 break;
1456 case UDIV:
1457 if (arg1 == 0
1458 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1459 && arg1s == -1))
1460 return 0;
1461 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1462 break;
1464 case UMOD:
1465 if (arg1 == 0
1466 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1467 && arg1s == -1))
1468 return 0;
1469 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1470 break;
1472 case AND:
1473 val = arg0 & arg1;
1474 break;
1476 case IOR:
1477 val = arg0 | arg1;
1478 break;
1480 case XOR:
1481 val = arg0 ^ arg1;
1482 break;
1484 case LSHIFTRT:
1485 /* If shift count is undefined, don't fold it; let the machine do
1486 what it wants. But truncate it if the machine will do that. */
1487 if (arg1 < 0)
1488 return 0;
1490 #ifdef SHIFT_COUNT_TRUNCATED
1491 if (SHIFT_COUNT_TRUNCATED)
1492 arg1 %= width;
1493 #endif
1495 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1496 break;
1498 case ASHIFT:
1499 if (arg1 < 0)
1500 return 0;
1502 #ifdef SHIFT_COUNT_TRUNCATED
1503 if (SHIFT_COUNT_TRUNCATED)
1504 arg1 %= width;
1505 #endif
1507 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1508 break;
1510 case ASHIFTRT:
1511 if (arg1 < 0)
1512 return 0;
1514 #ifdef SHIFT_COUNT_TRUNCATED
1515 if (SHIFT_COUNT_TRUNCATED)
1516 arg1 %= width;
1517 #endif
1519 val = arg0s >> arg1;
1521 /* Bootstrap compiler may not have sign extended the right shift.
1522 Manually extend the sign to insure bootstrap cc matches gcc. */
1523 if (arg0s < 0 && arg1 > 0)
1524 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1526 break;
1528 case ROTATERT:
1529 if (arg1 < 0)
1530 return 0;
1532 arg1 %= width;
1533 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1534 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1535 break;
1537 case ROTATE:
1538 if (arg1 < 0)
1539 return 0;
1541 arg1 %= width;
1542 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1543 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1544 break;
1546 case COMPARE:
1547 /* Do nothing here. */
1548 return 0;
1550 case SMIN:
1551 val = arg0s <= arg1s ? arg0s : arg1s;
1552 break;
1554 case UMIN:
1555 val = ((unsigned HOST_WIDE_INT) arg0
1556 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1557 break;
1559 case SMAX:
1560 val = arg0s > arg1s ? arg0s : arg1s;
1561 break;
1563 case UMAX:
1564 val = ((unsigned HOST_WIDE_INT) arg0
1565 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1566 break;
1568 default:
1569 abort ();
1572 val = trunc_int_for_mode (val, mode);
1574 return GEN_INT (val);
1577 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1578 PLUS or MINUS.
1580 Rather than test for specific case, we do this by a brute-force method
1581 and do all possible simplifications until no more changes occur. Then
1582 we rebuild the operation.
1584 If FORCE is true, then always generate the rtx. This is used to
1585 canonicalize stuff emitted from simplify_gen_binary. Note that this
1586 can still fail if the rtx is too complex. It won't fail just because
1587 the result is not 'simpler' than the input, however. */
1589 struct simplify_plus_minus_op_data
1591 rtx op;
1592 int neg;
1595 static int
1596 simplify_plus_minus_op_data_cmp (p1, p2)
1597 const void *p1;
1598 const void *p2;
1600 const struct simplify_plus_minus_op_data *d1 = p1;
1601 const struct simplify_plus_minus_op_data *d2 = p2;
1603 return (commutative_operand_precedence (d2->op)
1604 - commutative_operand_precedence (d1->op));
1607 static rtx
1608 simplify_plus_minus (code, mode, op0, op1, force)
1609 enum rtx_code code;
1610 enum machine_mode mode;
1611 rtx op0, op1;
1612 int force;
1614 struct simplify_plus_minus_op_data ops[8];
1615 rtx result, tem;
1616 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1617 int first, negate, changed;
1618 int i, j;
1620 memset ((char *) ops, 0, sizeof ops);
1622 /* Set up the two operands and then expand them until nothing has been
1623 changed. If we run out of room in our array, give up; this should
1624 almost never happen. */
1626 ops[0].op = op0;
1627 ops[0].neg = 0;
1628 ops[1].op = op1;
1629 ops[1].neg = (code == MINUS);
1633 changed = 0;
1635 for (i = 0; i < n_ops; i++)
1637 rtx this_op = ops[i].op;
1638 int this_neg = ops[i].neg;
1639 enum rtx_code this_code = GET_CODE (this_op);
1641 switch (this_code)
1643 case PLUS:
1644 case MINUS:
1645 if (n_ops == 7)
1646 return NULL_RTX;
1648 ops[n_ops].op = XEXP (this_op, 1);
1649 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1650 n_ops++;
1652 ops[i].op = XEXP (this_op, 0);
1653 input_ops++;
1654 changed = 1;
1655 break;
1657 case NEG:
1658 ops[i].op = XEXP (this_op, 0);
1659 ops[i].neg = ! this_neg;
1660 changed = 1;
1661 break;
1663 case CONST:
1664 if (n_ops < 7
1665 && GET_CODE (XEXP (this_op, 0)) == PLUS
1666 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
1667 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
1669 ops[i].op = XEXP (XEXP (this_op, 0), 0);
1670 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
1671 ops[n_ops].neg = this_neg;
1672 n_ops++;
1673 input_consts++;
1674 changed = 1;
1676 break;
1678 case NOT:
1679 /* ~a -> (-a - 1) */
1680 if (n_ops != 7)
1682 ops[n_ops].op = constm1_rtx;
1683 ops[n_ops++].neg = this_neg;
1684 ops[i].op = XEXP (this_op, 0);
1685 ops[i].neg = !this_neg;
1686 changed = 1;
1688 break;
1690 case CONST_INT:
1691 if (this_neg)
1693 ops[i].op = neg_const_int (mode, this_op);
1694 ops[i].neg = 0;
1695 changed = 1;
1697 break;
1699 default:
1700 break;
1704 while (changed);
1706 /* If we only have two operands, we can't do anything. */
1707 if (n_ops <= 2 && !force)
1708 return NULL_RTX;
1710 /* Count the number of CONSTs we didn't split above. */
1711 for (i = 0; i < n_ops; i++)
1712 if (GET_CODE (ops[i].op) == CONST)
1713 input_consts++;
1715 /* Now simplify each pair of operands until nothing changes. The first
1716 time through just simplify constants against each other. */
1718 first = 1;
1721 changed = first;
1723 for (i = 0; i < n_ops - 1; i++)
1724 for (j = i + 1; j < n_ops; j++)
1726 rtx lhs = ops[i].op, rhs = ops[j].op;
1727 int lneg = ops[i].neg, rneg = ops[j].neg;
1729 if (lhs != 0 && rhs != 0
1730 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
1732 enum rtx_code ncode = PLUS;
1734 if (lneg != rneg)
1736 ncode = MINUS;
1737 if (lneg)
1738 tem = lhs, lhs = rhs, rhs = tem;
1740 else if (swap_commutative_operands_p (lhs, rhs))
1741 tem = lhs, lhs = rhs, rhs = tem;
1743 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1745 /* Reject "simplifications" that just wrap the two
1746 arguments in a CONST. Failure to do so can result
1747 in infinite recursion with simplify_binary_operation
1748 when it calls us to simplify CONST operations. */
1749 if (tem
1750 && ! (GET_CODE (tem) == CONST
1751 && GET_CODE (XEXP (tem, 0)) == ncode
1752 && XEXP (XEXP (tem, 0), 0) == lhs
1753 && XEXP (XEXP (tem, 0), 1) == rhs)
1754 /* Don't allow -x + -1 -> ~x simplifications in the
1755 first pass. This allows us the chance to combine
1756 the -1 with other constants. */
1757 && ! (first
1758 && GET_CODE (tem) == NOT
1759 && XEXP (tem, 0) == rhs))
1761 lneg &= rneg;
1762 if (GET_CODE (tem) == NEG)
1763 tem = XEXP (tem, 0), lneg = !lneg;
1764 if (GET_CODE (tem) == CONST_INT && lneg)
1765 tem = neg_const_int (mode, tem), lneg = 0;
1767 ops[i].op = tem;
1768 ops[i].neg = lneg;
1769 ops[j].op = NULL_RTX;
1770 changed = 1;
1775 first = 0;
1777 while (changed);
1779 /* Pack all the operands to the lower-numbered entries. */
1780 for (i = 0, j = 0; j < n_ops; j++)
1781 if (ops[j].op)
1782 ops[i++] = ops[j];
1783 n_ops = i;
1785 /* Sort the operations based on swap_commutative_operands_p. */
1786 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
1788 /* We suppressed creation of trivial CONST expressions in the
1789 combination loop to avoid recursion. Create one manually now.
1790 The combination loop should have ensured that there is exactly
1791 one CONST_INT, and the sort will have ensured that it is last
1792 in the array and that any other constant will be next-to-last. */
1794 if (n_ops > 1
1795 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
1796 && CONSTANT_P (ops[n_ops - 2].op))
1798 rtx value = ops[n_ops - 1].op;
1799 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
1800 value = neg_const_int (mode, value);
1801 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
1802 n_ops--;
1805 /* Count the number of CONSTs that we generated. */
1806 n_consts = 0;
1807 for (i = 0; i < n_ops; i++)
1808 if (GET_CODE (ops[i].op) == CONST)
1809 n_consts++;
1811 /* Give up if we didn't reduce the number of operands we had. Make
1812 sure we count a CONST as two operands. If we have the same
1813 number of operands, but have made more CONSTs than before, this
1814 is also an improvement, so accept it. */
1815 if (!force
1816 && (n_ops + n_consts > input_ops
1817 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
1818 return NULL_RTX;
1820 /* Put a non-negated operand first. If there aren't any, make all
1821 operands positive and negate the whole thing later. */
1823 negate = 0;
1824 for (i = 0; i < n_ops && ops[i].neg; i++)
1825 continue;
1826 if (i == n_ops)
1828 for (i = 0; i < n_ops; i++)
1829 ops[i].neg = 0;
1830 negate = 1;
1832 else if (i != 0)
1834 tem = ops[0].op;
1835 ops[0] = ops[i];
1836 ops[i].op = tem;
1837 ops[i].neg = 1;
1840 /* Now make the result by performing the requested operations. */
1841 result = ops[0].op;
1842 for (i = 1; i < n_ops; i++)
1843 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
1844 mode, result, ops[i].op);
1846 return negate ? gen_rtx_NEG (mode, result) : result;
1849 /* Like simplify_binary_operation except used for relational operators.
1850 MODE is the mode of the operands, not that of the result. If MODE
1851 is VOIDmode, both operands must also be VOIDmode and we compare the
1852 operands in "infinite precision".
1854 If no simplification is possible, this function returns zero. Otherwise,
1855 it returns either const_true_rtx or const0_rtx. */
1858 simplify_relational_operation (code, mode, op0, op1)
1859 enum rtx_code code;
1860 enum machine_mode mode;
1861 rtx op0, op1;
1863 int equal, op0lt, op0ltu, op1lt, op1ltu;
1864 rtx tem;
1865 rtx trueop0;
1866 rtx trueop1;
1868 if (mode == VOIDmode
1869 && (GET_MODE (op0) != VOIDmode
1870 || GET_MODE (op1) != VOIDmode))
1871 abort ();
1873 /* If op0 is a compare, extract the comparison arguments from it. */
1874 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1875 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1877 trueop0 = avoid_constant_pool_reference (op0);
1878 trueop1 = avoid_constant_pool_reference (op1);
1880 /* We can't simplify MODE_CC values since we don't know what the
1881 actual comparison is. */
1882 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1883 #ifdef HAVE_cc0
1884 || op0 == cc0_rtx
1885 #endif
1887 return 0;
1889 /* Make sure the constant is second. */
1890 if (swap_commutative_operands_p (trueop0, trueop1))
1892 tem = op0, op0 = op1, op1 = tem;
1893 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1894 code = swap_condition (code);
1897 /* For integer comparisons of A and B maybe we can simplify A - B and can
1898 then simplify a comparison of that with zero. If A and B are both either
1899 a register or a CONST_INT, this can't help; testing for these cases will
1900 prevent infinite recursion here and speed things up.
1902 If CODE is an unsigned comparison, then we can never do this optimization,
1903 because it gives an incorrect result if the subtraction wraps around zero.
1904 ANSI C defines unsigned operations such that they never overflow, and
1905 thus such cases can not be ignored. */
1907 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
1908 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
1909 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
1910 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1911 && code != GTU && code != GEU && code != LTU && code != LEU)
1912 return simplify_relational_operation (signed_condition (code),
1913 mode, tem, const0_rtx);
1915 if (flag_unsafe_math_optimizations && code == ORDERED)
1916 return const_true_rtx;
1918 if (flag_unsafe_math_optimizations && code == UNORDERED)
1919 return const0_rtx;
1921 /* For modes without NaNs, if the two operands are equal, we know the
1922 result. */
1923 if (!HONOR_NANS (GET_MODE (trueop0)) && rtx_equal_p (trueop0, trueop1))
1924 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1926 /* If the operands are floating-point constants, see if we can fold
1927 the result. */
1928 else if (GET_CODE (trueop0) == CONST_DOUBLE
1929 && GET_CODE (trueop1) == CONST_DOUBLE
1930 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
1932 REAL_VALUE_TYPE d0, d1;
1934 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
1935 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
1937 /* Comparisons are unordered iff at least one of the values is NaN. */
1938 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
1939 switch (code)
1941 case UNEQ:
1942 case UNLT:
1943 case UNGT:
1944 case UNLE:
1945 case UNGE:
1946 case NE:
1947 case UNORDERED:
1948 return const_true_rtx;
1949 case EQ:
1950 case LT:
1951 case GT:
1952 case LE:
1953 case GE:
1954 case LTGT:
1955 case ORDERED:
1956 return const0_rtx;
1957 default:
1958 return 0;
1961 equal = REAL_VALUES_EQUAL (d0, d1);
1962 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
1963 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
1966 /* Otherwise, see if the operands are both integers. */
1967 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1968 && (GET_CODE (trueop0) == CONST_DOUBLE
1969 || GET_CODE (trueop0) == CONST_INT)
1970 && (GET_CODE (trueop1) == CONST_DOUBLE
1971 || GET_CODE (trueop1) == CONST_INT))
1973 int width = GET_MODE_BITSIZE (mode);
1974 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1975 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1977 /* Get the two words comprising each integer constant. */
1978 if (GET_CODE (trueop0) == CONST_DOUBLE)
1980 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
1981 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
1983 else
1985 l0u = l0s = INTVAL (trueop0);
1986 h0u = h0s = HWI_SIGN_EXTEND (l0s);
1989 if (GET_CODE (trueop1) == CONST_DOUBLE)
1991 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
1992 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
1994 else
1996 l1u = l1s = INTVAL (trueop1);
1997 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2000 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2001 we have to sign or zero-extend the values. */
2002 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2004 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2005 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2007 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2008 l0s |= ((HOST_WIDE_INT) (-1) << width);
2010 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2011 l1s |= ((HOST_WIDE_INT) (-1) << width);
2013 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2014 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2016 equal = (h0u == h1u && l0u == l1u);
2017 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2018 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2019 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2020 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2023 /* Otherwise, there are some code-specific tests we can make. */
2024 else
2026 switch (code)
2028 case EQ:
2029 /* References to the frame plus a constant or labels cannot
2030 be zero, but a SYMBOL_REF can due to #pragma weak. */
2031 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2032 || GET_CODE (trueop0) == LABEL_REF)
2033 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2034 /* On some machines, the ap reg can be 0 sometimes. */
2035 && op0 != arg_pointer_rtx
2036 #endif
2038 return const0_rtx;
2039 break;
2041 case NE:
2042 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2043 || GET_CODE (trueop0) == LABEL_REF)
2044 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2045 && op0 != arg_pointer_rtx
2046 #endif
2048 return const_true_rtx;
2049 break;
2051 case GEU:
2052 /* Unsigned values are never negative. */
2053 if (trueop1 == const0_rtx)
2054 return const_true_rtx;
2055 break;
2057 case LTU:
2058 if (trueop1 == const0_rtx)
2059 return const0_rtx;
2060 break;
2062 case LEU:
2063 /* Unsigned values are never greater than the largest
2064 unsigned value. */
2065 if (GET_CODE (trueop1) == CONST_INT
2066 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2067 && INTEGRAL_MODE_P (mode))
2068 return const_true_rtx;
2069 break;
2071 case GTU:
2072 if (GET_CODE (trueop1) == CONST_INT
2073 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2074 && INTEGRAL_MODE_P (mode))
2075 return const0_rtx;
2076 break;
2078 default:
2079 break;
2082 return 0;
2085 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2086 as appropriate. */
2087 switch (code)
2089 case EQ:
2090 case UNEQ:
2091 return equal ? const_true_rtx : const0_rtx;
2092 case NE:
2093 case LTGT:
2094 return ! equal ? const_true_rtx : const0_rtx;
2095 case LT:
2096 case UNLT:
2097 return op0lt ? const_true_rtx : const0_rtx;
2098 case GT:
2099 case UNGT:
2100 return op1lt ? const_true_rtx : const0_rtx;
2101 case LTU:
2102 return op0ltu ? const_true_rtx : const0_rtx;
2103 case GTU:
2104 return op1ltu ? const_true_rtx : const0_rtx;
2105 case LE:
2106 case UNLE:
2107 return equal || op0lt ? const_true_rtx : const0_rtx;
2108 case GE:
2109 case UNGE:
2110 return equal || op1lt ? const_true_rtx : const0_rtx;
2111 case LEU:
2112 return equal || op0ltu ? const_true_rtx : const0_rtx;
2113 case GEU:
2114 return equal || op1ltu ? const_true_rtx : const0_rtx;
2115 case ORDERED:
2116 return const_true_rtx;
2117 case UNORDERED:
2118 return const0_rtx;
2119 default:
2120 abort ();
2124 /* Simplify CODE, an operation with result mode MODE and three operands,
2125 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2126 a constant. Return 0 if no simplifications is possible. */
2129 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2130 enum rtx_code code;
2131 enum machine_mode mode, op0_mode;
2132 rtx op0, op1, op2;
2134 unsigned int width = GET_MODE_BITSIZE (mode);
2136 /* VOIDmode means "infinite" precision. */
2137 if (width == 0)
2138 width = HOST_BITS_PER_WIDE_INT;
2140 switch (code)
2142 case SIGN_EXTRACT:
2143 case ZERO_EXTRACT:
2144 if (GET_CODE (op0) == CONST_INT
2145 && GET_CODE (op1) == CONST_INT
2146 && GET_CODE (op2) == CONST_INT
2147 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2148 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2150 /* Extracting a bit-field from a constant */
2151 HOST_WIDE_INT val = INTVAL (op0);
2153 if (BITS_BIG_ENDIAN)
2154 val >>= (GET_MODE_BITSIZE (op0_mode)
2155 - INTVAL (op2) - INTVAL (op1));
2156 else
2157 val >>= INTVAL (op2);
2159 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2161 /* First zero-extend. */
2162 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2163 /* If desired, propagate sign bit. */
2164 if (code == SIGN_EXTRACT
2165 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2166 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2169 /* Clear the bits that don't belong in our mode,
2170 unless they and our sign bit are all one.
2171 So we get either a reasonable negative value or a reasonable
2172 unsigned value for this mode. */
2173 if (width < HOST_BITS_PER_WIDE_INT
2174 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2175 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2176 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2178 return GEN_INT (val);
2180 break;
2182 case IF_THEN_ELSE:
2183 if (GET_CODE (op0) == CONST_INT)
2184 return op0 != const0_rtx ? op1 : op2;
2186 /* Convert a == b ? b : a to "a". */
2187 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2188 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2189 && rtx_equal_p (XEXP (op0, 0), op1)
2190 && rtx_equal_p (XEXP (op0, 1), op2))
2191 return op1;
2192 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2193 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2194 && rtx_equal_p (XEXP (op0, 1), op1)
2195 && rtx_equal_p (XEXP (op0, 0), op2))
2196 return op2;
2197 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2199 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2200 ? GET_MODE (XEXP (op0, 1))
2201 : GET_MODE (XEXP (op0, 0)));
2202 rtx temp;
2203 if (cmp_mode == VOIDmode)
2204 cmp_mode = op0_mode;
2205 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2206 XEXP (op0, 0), XEXP (op0, 1));
2208 /* See if any simplifications were possible. */
2209 if (temp == const0_rtx)
2210 return op2;
2211 else if (temp == const1_rtx)
2212 return op1;
2213 else if (temp)
2214 op0 = temp;
2216 /* Look for happy constants in op1 and op2. */
2217 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2219 HOST_WIDE_INT t = INTVAL (op1);
2220 HOST_WIDE_INT f = INTVAL (op2);
2222 if (t == STORE_FLAG_VALUE && f == 0)
2223 code = GET_CODE (op0);
2224 else if (t == 0 && f == STORE_FLAG_VALUE)
2226 enum rtx_code tmp;
2227 tmp = reversed_comparison_code (op0, NULL_RTX);
2228 if (tmp == UNKNOWN)
2229 break;
2230 code = tmp;
2232 else
2233 break;
2235 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2238 break;
2240 default:
2241 abort ();
2244 return 0;
2247 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2248 Return 0 if no simplifications is possible. */
2250 simplify_subreg (outermode, op, innermode, byte)
2251 rtx op;
2252 unsigned int byte;
2253 enum machine_mode outermode, innermode;
2255 /* Little bit of sanity checking. */
2256 if (innermode == VOIDmode || outermode == VOIDmode
2257 || innermode == BLKmode || outermode == BLKmode)
2258 abort ();
2260 if (GET_MODE (op) != innermode
2261 && GET_MODE (op) != VOIDmode)
2262 abort ();
2264 if (byte % GET_MODE_SIZE (outermode)
2265 || byte >= GET_MODE_SIZE (innermode))
2266 abort ();
2268 if (outermode == innermode && !byte)
2269 return op;
2271 /* Simplify subregs of vector constants. */
2272 if (GET_CODE (op) == CONST_VECTOR)
2274 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
2275 int offset = byte / elt_size;
2276 rtx elt;
2278 if (GET_MODE_INNER (innermode) == outermode)
2280 elt = CONST_VECTOR_ELT (op, offset);
2282 /* ?? We probably don't need this copy_rtx because constants
2283 can be shared. ?? */
2285 return copy_rtx (elt);
2287 else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
2288 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
2290 return (gen_rtx_CONST_VECTOR
2291 (outermode,
2292 gen_rtvec_v (GET_MODE_NUNITS (outermode),
2293 &CONST_VECTOR_ELT (op, offset))));
2295 else if (GET_MODE_CLASS (outermode) == MODE_INT
2296 && (GET_MODE_SIZE (outermode) % elt_size == 0))
2298 /* This happens when the target register size is smaller then
2299 the vector mode, and we synthesize operations with vectors
2300 of elements that are smaller than the register size. */
2301 HOST_WIDE_INT sum = 0, high = 0;
2302 unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
2303 unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
2304 unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
2305 int shift = BITS_PER_UNIT * elt_size;
2307 for (; n_elts--; i += step)
2309 elt = CONST_VECTOR_ELT (op, i);
2310 if (GET_CODE (elt) == CONST_DOUBLE
2311 && GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
2313 elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
2314 elt);
2315 if (! elt)
2316 return NULL_RTX;
2318 if (GET_CODE (elt) != CONST_INT)
2319 return NULL_RTX;
2320 high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
2321 sum = (sum << shift) + INTVAL (elt);
2323 if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
2324 return GEN_INT (trunc_int_for_mode (sum, outermode));
2325 else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
2326 return immed_double_const (high, sum, outermode);
2327 else
2328 return NULL_RTX;
2330 else if (GET_MODE_CLASS (outermode) == MODE_INT
2331 && (elt_size % GET_MODE_SIZE (outermode) == 0))
2333 enum machine_mode new_mode
2334 = int_mode_for_mode (GET_MODE_INNER (innermode));
2335 int subbyte = byte % elt_size;
2337 op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
2338 if (! op)
2339 return NULL_RTX;
2340 return simplify_subreg (outermode, op, new_mode, subbyte);
2342 else if (GET_MODE_CLASS (outermode) != MODE_VECTOR_INT
2343 && GET_MODE_CLASS (outermode) != MODE_VECTOR_FLOAT)
2344 /* This shouldn't happen, but let's not do anything stupid. */
2345 return NULL_RTX;
2348 /* Attempt to simplify constant to non-SUBREG expression. */
2349 if (CONSTANT_P (op))
2351 int offset, part;
2352 unsigned HOST_WIDE_INT val = 0;
2354 if (GET_MODE_CLASS (outermode) == MODE_VECTOR_INT
2355 || GET_MODE_CLASS (outermode) == MODE_VECTOR_FLOAT)
2357 /* Construct a CONST_VECTOR from individual subregs. */
2358 enum machine_mode submode = GET_MODE_INNER (outermode);
2359 int subsize = GET_MODE_UNIT_SIZE (outermode);
2360 int i, elts = GET_MODE_NUNITS (outermode);
2361 rtvec v = rtvec_alloc (elts);
2363 for (i = 0; i < elts; i++, byte += subsize)
2365 RTVEC_ELT (v, i) = simplify_subreg (submode, op, innermode, byte);
2367 return gen_rtx_CONST_VECTOR (outermode, v);
2370 /* ??? This code is partly redundant with code below, but can handle
2371 the subregs of floats and similar corner cases.
2372 Later it we should move all simplification code here and rewrite
2373 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2374 using SIMPLIFY_SUBREG. */
2375 if (subreg_lowpart_offset (outermode, innermode) == byte)
2377 rtx new = gen_lowpart_if_possible (outermode, op);
2378 if (new)
2379 return new;
2382 /* Similar comment as above apply here. */
2383 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2384 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2385 && GET_MODE_CLASS (outermode) == MODE_INT)
2387 rtx new = constant_subword (op,
2388 (byte / UNITS_PER_WORD),
2389 innermode);
2390 if (new)
2391 return new;
2394 offset = byte * BITS_PER_UNIT;
2395 switch (GET_CODE (op))
2397 case CONST_DOUBLE:
2398 if (GET_MODE (op) != VOIDmode)
2399 break;
2401 /* We can't handle this case yet. */
2402 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2403 return NULL_RTX;
2405 part = offset >= HOST_BITS_PER_WIDE_INT;
2406 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2407 && BYTES_BIG_ENDIAN)
2408 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2409 && WORDS_BIG_ENDIAN))
2410 part = !part;
2411 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2412 offset %= HOST_BITS_PER_WIDE_INT;
2414 /* We've already picked the word we want from a double, so
2415 pretend this is actually an integer. */
2416 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2418 /* FALLTHROUGH */
2419 case CONST_INT:
2420 if (GET_CODE (op) == CONST_INT)
2421 val = INTVAL (op);
2423 /* We don't handle synthetizing of non-integral constants yet. */
2424 if (GET_MODE_CLASS (outermode) != MODE_INT)
2425 return NULL_RTX;
2427 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2429 if (WORDS_BIG_ENDIAN)
2430 offset = (GET_MODE_BITSIZE (innermode)
2431 - GET_MODE_BITSIZE (outermode) - offset);
2432 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2433 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2434 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2435 - 2 * (offset % BITS_PER_WORD));
2438 if (offset >= HOST_BITS_PER_WIDE_INT)
2439 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2440 else
2442 val >>= offset;
2443 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2444 val = trunc_int_for_mode (val, outermode);
2445 return GEN_INT (val);
2447 default:
2448 break;
2452 /* Changing mode twice with SUBREG => just change it once,
2453 or not at all if changing back op starting mode. */
2454 if (GET_CODE (op) == SUBREG)
2456 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2457 int final_offset = byte + SUBREG_BYTE (op);
2458 rtx new;
2460 if (outermode == innermostmode
2461 && byte == 0 && SUBREG_BYTE (op) == 0)
2462 return SUBREG_REG (op);
2464 /* The SUBREG_BYTE represents offset, as if the value were stored
2465 in memory. Irritating exception is paradoxical subreg, where
2466 we define SUBREG_BYTE to be 0. On big endian machines, this
2467 value should be negative. For a moment, undo this exception. */
2468 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2470 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2471 if (WORDS_BIG_ENDIAN)
2472 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2473 if (BYTES_BIG_ENDIAN)
2474 final_offset += difference % UNITS_PER_WORD;
2476 if (SUBREG_BYTE (op) == 0
2477 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2479 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2480 if (WORDS_BIG_ENDIAN)
2481 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2482 if (BYTES_BIG_ENDIAN)
2483 final_offset += difference % UNITS_PER_WORD;
2486 /* See whether resulting subreg will be paradoxical. */
2487 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2489 /* In nonparadoxical subregs we can't handle negative offsets. */
2490 if (final_offset < 0)
2491 return NULL_RTX;
2492 /* Bail out in case resulting subreg would be incorrect. */
2493 if (final_offset % GET_MODE_SIZE (outermode)
2494 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2495 return NULL_RTX;
2497 else
2499 int offset = 0;
2500 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2502 /* In paradoxical subreg, see if we are still looking on lower part.
2503 If so, our SUBREG_BYTE will be 0. */
2504 if (WORDS_BIG_ENDIAN)
2505 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2506 if (BYTES_BIG_ENDIAN)
2507 offset += difference % UNITS_PER_WORD;
2508 if (offset == final_offset)
2509 final_offset = 0;
2510 else
2511 return NULL_RTX;
2514 /* Recurse for futher possible simplifications. */
2515 new = simplify_subreg (outermode, SUBREG_REG (op),
2516 GET_MODE (SUBREG_REG (op)),
2517 final_offset);
2518 if (new)
2519 return new;
2520 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2523 /* SUBREG of a hard register => just change the register number
2524 and/or mode. If the hard register is not valid in that mode,
2525 suppress this simplification. If the hard register is the stack,
2526 frame, or argument pointer, leave this as a SUBREG. */
2528 if (REG_P (op)
2529 && (! REG_FUNCTION_VALUE_P (op)
2530 || ! rtx_equal_function_value_matters)
2531 #ifdef CLASS_CANNOT_CHANGE_MODE
2532 && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode, innermode)
2533 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2534 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT
2535 && (TEST_HARD_REG_BIT
2536 (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE],
2537 REGNO (op))))
2538 #endif
2539 && REGNO (op) < FIRST_PSEUDO_REGISTER
2540 && ((reload_completed && !frame_pointer_needed)
2541 || (REGNO (op) != FRAME_POINTER_REGNUM
2542 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2543 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2544 #endif
2546 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2547 && REGNO (op) != ARG_POINTER_REGNUM
2548 #endif
2549 && REGNO (op) != STACK_POINTER_REGNUM)
2551 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2554 /* ??? We do allow it if the current REG is not valid for
2555 its mode. This is a kludge to work around how float/complex
2556 arguments are passed on 32-bit Sparc and should be fixed. */
2557 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2558 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2560 rtx x = gen_rtx_REG (outermode, final_regno);
2562 /* Propagate original regno. We don't have any way to specify
2563 the offset inside orignal regno, so do so only for lowpart.
2564 The information is used only by alias analysis that can not
2565 grog partial register anyway. */
2567 if (subreg_lowpart_offset (outermode, innermode) == byte)
2568 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2569 return x;
2573 /* If we have a SUBREG of a register that we are replacing and we are
2574 replacing it with a MEM, make a new MEM and try replacing the
2575 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2576 or if we would be widening it. */
2578 if (GET_CODE (op) == MEM
2579 && ! mode_dependent_address_p (XEXP (op, 0))
2580 /* Allow splitting of volatile memory references in case we don't
2581 have instruction to move the whole thing. */
2582 && (! MEM_VOLATILE_P (op)
2583 || ! have_insn_for (SET, innermode))
2584 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2585 return adjust_address_nv (op, outermode, byte);
2587 /* Handle complex values represented as CONCAT
2588 of real and imaginary part. */
2589 if (GET_CODE (op) == CONCAT)
2591 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2592 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2593 unsigned int final_offset;
2594 rtx res;
2596 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2597 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2598 if (res)
2599 return res;
2600 /* We can at least simplify it by referring directly to the relevant part. */
2601 return gen_rtx_SUBREG (outermode, part, final_offset);
2604 return NULL_RTX;
2606 /* Make a SUBREG operation or equivalent if it folds. */
2609 simplify_gen_subreg (outermode, op, innermode, byte)
2610 rtx op;
2611 unsigned int byte;
2612 enum machine_mode outermode, innermode;
2614 rtx new;
2615 /* Little bit of sanity checking. */
2616 if (innermode == VOIDmode || outermode == VOIDmode
2617 || innermode == BLKmode || outermode == BLKmode)
2618 abort ();
2620 if (GET_MODE (op) != innermode
2621 && GET_MODE (op) != VOIDmode)
2622 abort ();
2624 if (byte % GET_MODE_SIZE (outermode)
2625 || byte >= GET_MODE_SIZE (innermode))
2626 abort ();
2628 if (GET_CODE (op) == QUEUED)
2629 return NULL_RTX;
2631 new = simplify_subreg (outermode, op, innermode, byte);
2632 if (new)
2633 return new;
2635 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2636 return NULL_RTX;
2638 return gen_rtx_SUBREG (outermode, op, byte);
2640 /* Simplify X, an rtx expression.
2642 Return the simplified expression or NULL if no simplifications
2643 were possible.
2645 This is the preferred entry point into the simplification routines;
2646 however, we still allow passes to call the more specific routines.
2648 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2649 code that need to be unified.
2651 1. fold_rtx in cse.c. This code uses various CSE specific
2652 information to aid in RTL simplification.
2654 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2655 it uses combine specific information to aid in RTL
2656 simplification.
2658 3. The routines in this file.
2661 Long term we want to only have one body of simplification code; to
2662 get to that state I recommend the following steps:
2664 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2665 which are not pass dependent state into these routines.
2667 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2668 use this routine whenever possible.
2670 3. Allow for pass dependent state to be provided to these
2671 routines and add simplifications based on the pass dependent
2672 state. Remove code from cse.c & combine.c that becomes
2673 redundant/dead.
2675 It will take time, but ultimately the compiler will be easier to
2676 maintain and improve. It's totally silly that when we add a
2677 simplification that it needs to be added to 4 places (3 for RTL
2678 simplification and 1 for tree simplification. */
2681 simplify_rtx (x)
2682 rtx x;
2684 enum rtx_code code = GET_CODE (x);
2685 enum machine_mode mode = GET_MODE (x);
2687 switch (GET_RTX_CLASS (code))
2689 case '1':
2690 return simplify_unary_operation (code, mode,
2691 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2692 case 'c':
2693 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
2695 rtx tem;
2697 tem = XEXP (x, 0);
2698 XEXP (x, 0) = XEXP (x, 1);
2699 XEXP (x, 1) = tem;
2700 return simplify_binary_operation (code, mode,
2701 XEXP (x, 0), XEXP (x, 1));
2704 case '2':
2705 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2707 case '3':
2708 case 'b':
2709 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2710 XEXP (x, 0), XEXP (x, 1),
2711 XEXP (x, 2));
2713 case '<':
2714 return simplify_relational_operation (code,
2715 ((GET_MODE (XEXP (x, 0))
2716 != VOIDmode)
2717 ? GET_MODE (XEXP (x, 0))
2718 : GET_MODE (XEXP (x, 1))),
2719 XEXP (x, 0), XEXP (x, 1));
2720 case 'x':
2721 /* The only case we try to handle is a SUBREG. */
2722 if (code == SUBREG)
2723 return simplify_gen_subreg (mode, SUBREG_REG (x),
2724 GET_MODE (SUBREG_REG (x)),
2725 SUBREG_BYTE (x));
2726 return NULL;
2727 default:
2728 return NULL;