* config/i386/netbsd-elf.h (LINK_SPEC): Define as
[official-gcc.git] / gcc / simplify-rtx.c
blob5db501eccfcad155f076e45bb7582fe5771448ac
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "function.h"
35 #include "expr.h"
36 #include "toplev.h"
37 #include "output.h"
38 #include "ggc.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
43 virtual regs here because the simplify_*_operation routines are called
44 by integrate.c, which is called before virtual register instantiation.
46 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
47 a header file so that their definitions can be shared with the
48 simplification routines in simplify-rtx.c. Until then, do not
49 change these macros without also changing the copy in simplify-rtx.c. */
51 #define FIXED_BASE_PLUS_P(X) \
52 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
53 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
54 || (X) == virtual_stack_vars_rtx \
55 || (X) == virtual_incoming_args_rtx \
56 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
57 && (XEXP (X, 0) == frame_pointer_rtx \
58 || XEXP (X, 0) == hard_frame_pointer_rtx \
59 || ((X) == arg_pointer_rtx \
60 && fixed_regs[ARG_POINTER_REGNUM]) \
61 || XEXP (X, 0) == virtual_stack_vars_rtx \
62 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
63 || GET_CODE (X) == ADDRESSOF)
65 /* Similar, but also allows reference to the stack pointer.
67 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
68 arg_pointer_rtx by itself is nonzero, because on at least one machine,
69 the i960, the arg pointer is zero when it is unused. */
71 #define NONZERO_BASE_PLUS_P(X) \
72 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
73 || (X) == virtual_stack_vars_rtx \
74 || (X) == virtual_incoming_args_rtx \
75 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
76 && (XEXP (X, 0) == frame_pointer_rtx \
77 || XEXP (X, 0) == hard_frame_pointer_rtx \
78 || ((X) == arg_pointer_rtx \
79 && fixed_regs[ARG_POINTER_REGNUM]) \
80 || XEXP (X, 0) == virtual_stack_vars_rtx \
81 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
82 || (X) == stack_pointer_rtx \
83 || (X) == virtual_stack_dynamic_rtx \
84 || (X) == virtual_outgoing_args_rtx \
85 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
86 && (XEXP (X, 0) == stack_pointer_rtx \
87 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
88 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
89 || GET_CODE (X) == ADDRESSOF)
91 /* Much code operates on (low, high) pairs; the low value is an
92 unsigned wide int, the high value a signed wide int. We
93 occasionally need to sign extend from low to high as if low were a
94 signed wide int. */
95 #define HWI_SIGN_EXTEND(low) \
96 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
98 static rtx neg_const_int PARAMS ((enum machine_mode, rtx));
99 static int simplify_plus_minus_op_data_cmp PARAMS ((const void *,
100 const void *));
101 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
102 enum machine_mode, rtx,
103 rtx, int));
105 /* Negate a CONST_INT rtx, truncating (because a conversion from a
106 maximally negative number can overflow). */
107 static rtx
108 neg_const_int (mode, i)
109 enum machine_mode mode;
110 rtx i;
112 return gen_int_mode (- INTVAL (i), mode);
116 /* Make a binary operation by properly ordering the operands and
117 seeing if the expression folds. */
120 simplify_gen_binary (code, mode, op0, op1)
121 enum rtx_code code;
122 enum machine_mode mode;
123 rtx op0, op1;
125 rtx tem;
127 /* Put complex operands first and constants second if commutative. */
128 if (GET_RTX_CLASS (code) == 'c'
129 && swap_commutative_operands_p (op0, op1))
130 tem = op0, op0 = op1, op1 = tem;
132 /* If this simplifies, do it. */
133 tem = simplify_binary_operation (code, mode, op0, op1);
134 if (tem)
135 return tem;
137 /* Handle addition and subtraction specially. Otherwise, just form
138 the operation. */
140 if (code == PLUS || code == MINUS)
142 tem = simplify_plus_minus (code, mode, op0, op1, 1);
143 if (tem)
144 return tem;
147 return gen_rtx_fmt_ee (code, mode, op0, op1);
150 /* If X is a MEM referencing the constant pool, return the real value.
151 Otherwise return X. */
153 avoid_constant_pool_reference (x)
154 rtx x;
156 rtx c, addr;
157 enum machine_mode cmode;
159 if (GET_CODE (x) != MEM)
160 return x;
161 addr = XEXP (x, 0);
163 if (GET_CODE (addr) != SYMBOL_REF
164 || ! CONSTANT_POOL_ADDRESS_P (addr))
165 return x;
167 c = get_pool_constant (addr);
168 cmode = get_pool_mode (addr);
170 /* If we're accessing the constant in a different mode than it was
171 originally stored, attempt to fix that up via subreg simplifications.
172 If that fails we have no choice but to return the original memory. */
173 if (cmode != GET_MODE (x))
175 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
176 return c ? c : x;
179 return c;
182 /* Make a unary operation by first seeing if it folds and otherwise making
183 the specified operation. */
186 simplify_gen_unary (code, mode, op, op_mode)
187 enum rtx_code code;
188 enum machine_mode mode;
189 rtx op;
190 enum machine_mode op_mode;
192 rtx tem;
194 /* If this simplifies, use it. */
195 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
196 return tem;
198 return gen_rtx_fmt_e (code, mode, op);
201 /* Likewise for ternary operations. */
204 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
205 enum rtx_code code;
206 enum machine_mode mode, op0_mode;
207 rtx op0, op1, op2;
209 rtx tem;
211 /* If this simplifies, use it. */
212 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
213 op0, op1, op2)))
214 return tem;
216 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
219 /* Likewise, for relational operations.
220 CMP_MODE specifies mode comparison is done in.
224 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
225 enum rtx_code code;
226 enum machine_mode mode;
227 enum machine_mode cmp_mode;
228 rtx op0, op1;
230 rtx tem;
232 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
233 return tem;
235 /* For the following tests, ensure const0_rtx is op1. */
236 if (op0 == const0_rtx && swap_commutative_operands_p (op0, op1))
237 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
239 /* If op0 is a compare, extract the comparison arguments from it. */
240 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
241 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
243 /* If op0 is a comparison, extract the comparison arguments form it. */
244 if (code == NE && op1 == const0_rtx
245 && GET_RTX_CLASS (GET_CODE (op0)) == '<')
246 return op0;
247 else if (code == EQ && op1 == const0_rtx)
249 /* The following tests GET_RTX_CLASS (GET_CODE (op0)) == '<'. */
250 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
251 if (new != UNKNOWN)
253 code = new;
254 mode = cmp_mode;
255 op1 = XEXP (op0, 1);
256 op0 = XEXP (op0, 0);
260 /* Put complex operands first and constants second. */
261 if (swap_commutative_operands_p (op0, op1))
262 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
264 return gen_rtx_fmt_ee (code, mode, op0, op1);
267 /* Replace all occurrences of OLD in X with NEW and try to simplify the
268 resulting RTX. Return a new RTX which is as simplified as possible. */
271 simplify_replace_rtx (x, old, new)
272 rtx x;
273 rtx old;
274 rtx new;
276 enum rtx_code code = GET_CODE (x);
277 enum machine_mode mode = GET_MODE (x);
279 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
280 to build a new expression substituting recursively. If we can't do
281 anything, return our input. */
283 if (x == old)
284 return new;
286 switch (GET_RTX_CLASS (code))
288 case '1':
290 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
291 rtx op = (XEXP (x, 0) == old
292 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
294 return simplify_gen_unary (code, mode, op, op_mode);
297 case '2':
298 case 'c':
299 return
300 simplify_gen_binary (code, mode,
301 simplify_replace_rtx (XEXP (x, 0), old, new),
302 simplify_replace_rtx (XEXP (x, 1), old, new));
303 case '<':
305 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
306 ? GET_MODE (XEXP (x, 0))
307 : GET_MODE (XEXP (x, 1)));
308 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
309 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
311 return
312 simplify_gen_relational (code, mode,
313 (op_mode != VOIDmode
314 ? op_mode
315 : GET_MODE (op0) != VOIDmode
316 ? GET_MODE (op0)
317 : GET_MODE (op1)),
318 op0, op1);
321 case '3':
322 case 'b':
324 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
325 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
327 return
328 simplify_gen_ternary (code, mode,
329 (op_mode != VOIDmode
330 ? op_mode
331 : GET_MODE (op0)),
332 op0,
333 simplify_replace_rtx (XEXP (x, 1), old, new),
334 simplify_replace_rtx (XEXP (x, 2), old, new));
337 case 'x':
338 /* The only case we try to handle is a SUBREG. */
339 if (code == SUBREG)
341 rtx exp;
342 exp = simplify_gen_subreg (GET_MODE (x),
343 simplify_replace_rtx (SUBREG_REG (x),
344 old, new),
345 GET_MODE (SUBREG_REG (x)),
346 SUBREG_BYTE (x));
347 if (exp)
348 x = exp;
350 return x;
352 default:
353 if (GET_CODE (x) == MEM)
354 return
355 replace_equiv_address_nv (x,
356 simplify_replace_rtx (XEXP (x, 0),
357 old, new));
359 return x;
361 return x;
364 /* Try to simplify a unary operation CODE whose output mode is to be
365 MODE with input operand OP whose mode was originally OP_MODE.
366 Return zero if no simplification can be made. */
368 simplify_unary_operation (code, mode, op, op_mode)
369 enum rtx_code code;
370 enum machine_mode mode;
371 rtx op;
372 enum machine_mode op_mode;
374 unsigned int width = GET_MODE_BITSIZE (mode);
375 rtx trueop = avoid_constant_pool_reference (op);
377 /* The order of these tests is critical so that, for example, we don't
378 check the wrong mode (input vs. output) for a conversion operation,
379 such as FIX. At some point, this should be simplified. */
381 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
382 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
384 HOST_WIDE_INT hv, lv;
385 REAL_VALUE_TYPE d;
387 if (GET_CODE (trueop) == CONST_INT)
388 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
389 else
390 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
392 REAL_VALUE_FROM_INT (d, lv, hv, mode);
393 d = real_value_truncate (mode, d);
394 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
396 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
397 && (GET_CODE (trueop) == CONST_DOUBLE
398 || GET_CODE (trueop) == CONST_INT))
400 HOST_WIDE_INT hv, lv;
401 REAL_VALUE_TYPE d;
403 if (GET_CODE (trueop) == CONST_INT)
404 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
405 else
406 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
408 if (op_mode == VOIDmode)
410 /* We don't know how to interpret negative-looking numbers in
411 this case, so don't try to fold those. */
412 if (hv < 0)
413 return 0;
415 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
417 else
418 hv = 0, lv &= GET_MODE_MASK (op_mode);
420 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
421 d = real_value_truncate (mode, d);
422 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
425 if (GET_CODE (trueop) == CONST_INT
426 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
428 HOST_WIDE_INT arg0 = INTVAL (trueop);
429 HOST_WIDE_INT val;
431 switch (code)
433 case NOT:
434 val = ~ arg0;
435 break;
437 case NEG:
438 val = - arg0;
439 break;
441 case ABS:
442 val = (arg0 >= 0 ? arg0 : - arg0);
443 break;
445 case FFS:
446 /* Don't use ffs here. Instead, get low order bit and then its
447 number. If arg0 is zero, this will return 0, as desired. */
448 arg0 &= GET_MODE_MASK (mode);
449 val = exact_log2 (arg0 & (- arg0)) + 1;
450 break;
452 case TRUNCATE:
453 val = arg0;
454 break;
456 case ZERO_EXTEND:
457 /* When zero-extending a CONST_INT, we need to know its
458 original mode. */
459 if (op_mode == VOIDmode)
460 abort ();
461 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
463 /* If we were really extending the mode,
464 we would have to distinguish between zero-extension
465 and sign-extension. */
466 if (width != GET_MODE_BITSIZE (op_mode))
467 abort ();
468 val = arg0;
470 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
471 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
472 else
473 return 0;
474 break;
476 case SIGN_EXTEND:
477 if (op_mode == VOIDmode)
478 op_mode = mode;
479 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
481 /* If we were really extending the mode,
482 we would have to distinguish between zero-extension
483 and sign-extension. */
484 if (width != GET_MODE_BITSIZE (op_mode))
485 abort ();
486 val = arg0;
488 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
491 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
492 if (val
493 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
494 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
496 else
497 return 0;
498 break;
500 case SQRT:
501 case FLOAT_EXTEND:
502 case FLOAT_TRUNCATE:
503 case SS_TRUNCATE:
504 case US_TRUNCATE:
505 return 0;
507 default:
508 abort ();
511 val = trunc_int_for_mode (val, mode);
513 return GEN_INT (val);
516 /* We can do some operations on integer CONST_DOUBLEs. Also allow
517 for a DImode operation on a CONST_INT. */
518 else if (GET_MODE (trueop) == VOIDmode
519 && width <= HOST_BITS_PER_WIDE_INT * 2
520 && (GET_CODE (trueop) == CONST_DOUBLE
521 || GET_CODE (trueop) == CONST_INT))
523 unsigned HOST_WIDE_INT l1, lv;
524 HOST_WIDE_INT h1, hv;
526 if (GET_CODE (trueop) == CONST_DOUBLE)
527 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
528 else
529 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
531 switch (code)
533 case NOT:
534 lv = ~ l1;
535 hv = ~ h1;
536 break;
538 case NEG:
539 neg_double (l1, h1, &lv, &hv);
540 break;
542 case ABS:
543 if (h1 < 0)
544 neg_double (l1, h1, &lv, &hv);
545 else
546 lv = l1, hv = h1;
547 break;
549 case FFS:
550 hv = 0;
551 if (l1 == 0)
552 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
553 else
554 lv = exact_log2 (l1 & (-l1)) + 1;
555 break;
557 case TRUNCATE:
558 /* This is just a change-of-mode, so do nothing. */
559 lv = l1, hv = h1;
560 break;
562 case ZERO_EXTEND:
563 if (op_mode == VOIDmode)
564 abort ();
566 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
567 return 0;
569 hv = 0;
570 lv = l1 & GET_MODE_MASK (op_mode);
571 break;
573 case SIGN_EXTEND:
574 if (op_mode == VOIDmode
575 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
576 return 0;
577 else
579 lv = l1 & GET_MODE_MASK (op_mode);
580 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
581 && (lv & ((HOST_WIDE_INT) 1
582 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
583 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
585 hv = HWI_SIGN_EXTEND (lv);
587 break;
589 case SQRT:
590 return 0;
592 default:
593 return 0;
596 return immed_double_const (lv, hv, mode);
599 else if (GET_CODE (trueop) == CONST_DOUBLE
600 && GET_MODE_CLASS (mode) == MODE_FLOAT)
602 REAL_VALUE_TYPE d;
603 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
605 switch (code)
607 case SQRT:
608 /* We don't attempt to optimize this. */
609 return 0;
611 case ABS: d = REAL_VALUE_ABS (d); break;
612 case NEG: d = REAL_VALUE_NEGATE (d); break;
613 case FLOAT_TRUNCATE: d = real_value_truncate (mode, d); break;
614 case FLOAT_EXTEND: /* All this does is change the mode. */ break;
615 case FIX: d = REAL_VALUE_RNDZINT (d); break;
616 case UNSIGNED_FIX: d = REAL_VALUE_UNSIGNED_RNDZINT (d); break;
617 default:
618 abort ();
620 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
623 else if (GET_CODE (trueop) == CONST_DOUBLE
624 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
625 && GET_MODE_CLASS (mode) == MODE_INT
626 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
628 HOST_WIDE_INT i;
629 REAL_VALUE_TYPE d;
630 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
631 switch (code)
633 case FIX: i = REAL_VALUE_FIX (d); break;
634 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
635 default:
636 abort ();
638 return gen_int_mode (i, mode);
641 /* This was formerly used only for non-IEEE float.
642 eggert@twinsun.com says it is safe for IEEE also. */
643 else
645 enum rtx_code reversed;
646 /* There are some simplifications we can do even if the operands
647 aren't constant. */
648 switch (code)
650 case NOT:
651 /* (not (not X)) == X. */
652 if (GET_CODE (op) == NOT)
653 return XEXP (op, 0);
655 /* (not (eq X Y)) == (ne X Y), etc. */
656 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
657 && ((reversed = reversed_comparison_code (op, NULL_RTX))
658 != UNKNOWN))
659 return gen_rtx_fmt_ee (reversed,
660 op_mode, XEXP (op, 0), XEXP (op, 1));
661 break;
663 case NEG:
664 /* (neg (neg X)) == X. */
665 if (GET_CODE (op) == NEG)
666 return XEXP (op, 0);
667 break;
669 case SIGN_EXTEND:
670 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
671 becomes just the MINUS if its mode is MODE. This allows
672 folding switch statements on machines using casesi (such as
673 the VAX). */
674 if (GET_CODE (op) == TRUNCATE
675 && GET_MODE (XEXP (op, 0)) == mode
676 && GET_CODE (XEXP (op, 0)) == MINUS
677 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
678 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
679 return XEXP (op, 0);
681 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
682 if (! POINTERS_EXTEND_UNSIGNED
683 && mode == Pmode && GET_MODE (op) == ptr_mode
684 && (CONSTANT_P (op)
685 || (GET_CODE (op) == SUBREG
686 && GET_CODE (SUBREG_REG (op)) == REG
687 && REG_POINTER (SUBREG_REG (op))
688 && GET_MODE (SUBREG_REG (op)) == Pmode)))
689 return convert_memory_address (Pmode, op);
690 #endif
691 break;
693 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
694 case ZERO_EXTEND:
695 if (POINTERS_EXTEND_UNSIGNED > 0
696 && mode == Pmode && GET_MODE (op) == ptr_mode
697 && (CONSTANT_P (op)
698 || (GET_CODE (op) == SUBREG
699 && GET_CODE (SUBREG_REG (op)) == REG
700 && REG_POINTER (SUBREG_REG (op))
701 && GET_MODE (SUBREG_REG (op)) == Pmode)))
702 return convert_memory_address (Pmode, op);
703 break;
704 #endif
706 default:
707 break;
710 return 0;
714 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
715 and OP1. Return 0 if no simplification is possible.
717 Don't use this for relational operations such as EQ or LT.
718 Use simplify_relational_operation instead. */
720 simplify_binary_operation (code, mode, op0, op1)
721 enum rtx_code code;
722 enum machine_mode mode;
723 rtx op0, op1;
725 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
726 HOST_WIDE_INT val;
727 unsigned int width = GET_MODE_BITSIZE (mode);
728 rtx tem;
729 rtx trueop0 = avoid_constant_pool_reference (op0);
730 rtx trueop1 = avoid_constant_pool_reference (op1);
732 /* Relational operations don't work here. We must know the mode
733 of the operands in order to do the comparison correctly.
734 Assuming a full word can give incorrect results.
735 Consider comparing 128 with -128 in QImode. */
737 if (GET_RTX_CLASS (code) == '<')
738 abort ();
740 /* Make sure the constant is second. */
741 if (GET_RTX_CLASS (code) == 'c'
742 && swap_commutative_operands_p (trueop0, trueop1))
744 tem = op0, op0 = op1, op1 = tem;
745 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
748 if (GET_MODE_CLASS (mode) == MODE_FLOAT
749 && GET_CODE (trueop0) == CONST_DOUBLE
750 && GET_CODE (trueop1) == CONST_DOUBLE
751 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
753 REAL_VALUE_TYPE f0, f1, value;
755 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
756 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
757 f0 = real_value_truncate (mode, f0);
758 f1 = real_value_truncate (mode, f1);
760 if (code == DIV
761 && !MODE_HAS_INFINITIES (mode)
762 && REAL_VALUES_EQUAL (f1, dconst0))
763 return 0;
765 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
767 value = real_value_truncate (mode, value);
768 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
771 /* We can fold some multi-word operations. */
772 if (GET_MODE_CLASS (mode) == MODE_INT
773 && width == HOST_BITS_PER_WIDE_INT * 2
774 && (GET_CODE (trueop0) == CONST_DOUBLE
775 || GET_CODE (trueop0) == CONST_INT)
776 && (GET_CODE (trueop1) == CONST_DOUBLE
777 || GET_CODE (trueop1) == CONST_INT))
779 unsigned HOST_WIDE_INT l1, l2, lv;
780 HOST_WIDE_INT h1, h2, hv;
782 if (GET_CODE (trueop0) == CONST_DOUBLE)
783 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
784 else
785 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
787 if (GET_CODE (trueop1) == CONST_DOUBLE)
788 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
789 else
790 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
792 switch (code)
794 case MINUS:
795 /* A - B == A + (-B). */
796 neg_double (l2, h2, &lv, &hv);
797 l2 = lv, h2 = hv;
799 /* .. fall through ... */
801 case PLUS:
802 add_double (l1, h1, l2, h2, &lv, &hv);
803 break;
805 case MULT:
806 mul_double (l1, h1, l2, h2, &lv, &hv);
807 break;
809 case DIV: case MOD: case UDIV: case UMOD:
810 /* We'd need to include tree.h to do this and it doesn't seem worth
811 it. */
812 return 0;
814 case AND:
815 lv = l1 & l2, hv = h1 & h2;
816 break;
818 case IOR:
819 lv = l1 | l2, hv = h1 | h2;
820 break;
822 case XOR:
823 lv = l1 ^ l2, hv = h1 ^ h2;
824 break;
826 case SMIN:
827 if (h1 < h2
828 || (h1 == h2
829 && ((unsigned HOST_WIDE_INT) l1
830 < (unsigned HOST_WIDE_INT) l2)))
831 lv = l1, hv = h1;
832 else
833 lv = l2, hv = h2;
834 break;
836 case SMAX:
837 if (h1 > h2
838 || (h1 == h2
839 && ((unsigned HOST_WIDE_INT) l1
840 > (unsigned HOST_WIDE_INT) l2)))
841 lv = l1, hv = h1;
842 else
843 lv = l2, hv = h2;
844 break;
846 case UMIN:
847 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
848 || (h1 == h2
849 && ((unsigned HOST_WIDE_INT) l1
850 < (unsigned HOST_WIDE_INT) l2)))
851 lv = l1, hv = h1;
852 else
853 lv = l2, hv = h2;
854 break;
856 case UMAX:
857 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
858 || (h1 == h2
859 && ((unsigned HOST_WIDE_INT) l1
860 > (unsigned HOST_WIDE_INT) l2)))
861 lv = l1, hv = h1;
862 else
863 lv = l2, hv = h2;
864 break;
866 case LSHIFTRT: case ASHIFTRT:
867 case ASHIFT:
868 case ROTATE: case ROTATERT:
869 #ifdef SHIFT_COUNT_TRUNCATED
870 if (SHIFT_COUNT_TRUNCATED)
871 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
872 #endif
874 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
875 return 0;
877 if (code == LSHIFTRT || code == ASHIFTRT)
878 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
879 code == ASHIFTRT);
880 else if (code == ASHIFT)
881 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
882 else if (code == ROTATE)
883 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
884 else /* code == ROTATERT */
885 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
886 break;
888 default:
889 return 0;
892 return immed_double_const (lv, hv, mode);
895 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
896 || width > HOST_BITS_PER_WIDE_INT || width == 0)
898 /* Even if we can't compute a constant result,
899 there are some cases worth simplifying. */
901 switch (code)
903 case PLUS:
904 /* Maybe simplify x + 0 to x. The two expressions are equivalent
905 when x is NaN, infinite, or finite and non-zero. They aren't
906 when x is -0 and the rounding mode is not towards -infinity,
907 since (-0) + 0 is then 0. */
908 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
909 return op0;
911 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
912 transformations are safe even for IEEE. */
913 if (GET_CODE (op0) == NEG)
914 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
915 else if (GET_CODE (op1) == NEG)
916 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
918 /* (~a) + 1 -> -a */
919 if (INTEGRAL_MODE_P (mode)
920 && GET_CODE (op0) == NOT
921 && trueop1 == const1_rtx)
922 return gen_rtx_NEG (mode, XEXP (op0, 0));
924 /* Handle both-operands-constant cases. We can only add
925 CONST_INTs to constants since the sum of relocatable symbols
926 can't be handled by most assemblers. Don't add CONST_INT
927 to CONST_INT since overflow won't be computed properly if wider
928 than HOST_BITS_PER_WIDE_INT. */
930 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
931 && GET_CODE (op1) == CONST_INT)
932 return plus_constant (op0, INTVAL (op1));
933 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
934 && GET_CODE (op0) == CONST_INT)
935 return plus_constant (op1, INTVAL (op0));
937 /* See if this is something like X * C - X or vice versa or
938 if the multiplication is written as a shift. If so, we can
939 distribute and make a new multiply, shift, or maybe just
940 have X (if C is 2 in the example above). But don't make
941 real multiply if we didn't have one before. */
943 if (! FLOAT_MODE_P (mode))
945 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
946 rtx lhs = op0, rhs = op1;
947 int had_mult = 0;
949 if (GET_CODE (lhs) == NEG)
950 coeff0 = -1, lhs = XEXP (lhs, 0);
951 else if (GET_CODE (lhs) == MULT
952 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
954 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
955 had_mult = 1;
957 else if (GET_CODE (lhs) == ASHIFT
958 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
959 && INTVAL (XEXP (lhs, 1)) >= 0
960 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
962 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
963 lhs = XEXP (lhs, 0);
966 if (GET_CODE (rhs) == NEG)
967 coeff1 = -1, rhs = XEXP (rhs, 0);
968 else if (GET_CODE (rhs) == MULT
969 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
971 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
972 had_mult = 1;
974 else if (GET_CODE (rhs) == ASHIFT
975 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
976 && INTVAL (XEXP (rhs, 1)) >= 0
977 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
979 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
980 rhs = XEXP (rhs, 0);
983 if (rtx_equal_p (lhs, rhs))
985 tem = simplify_gen_binary (MULT, mode, lhs,
986 GEN_INT (coeff0 + coeff1));
987 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
991 /* If one of the operands is a PLUS or a MINUS, see if we can
992 simplify this by the associative law.
993 Don't use the associative law for floating point.
994 The inaccuracy makes it nonassociative,
995 and subtle programs can break if operations are associated. */
997 if (INTEGRAL_MODE_P (mode)
998 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
999 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1000 || (GET_CODE (op0) == CONST
1001 && GET_CODE (XEXP (op0, 0)) == PLUS)
1002 || (GET_CODE (op1) == CONST
1003 && GET_CODE (XEXP (op1, 0)) == PLUS))
1004 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1005 return tem;
1006 break;
1008 case COMPARE:
1009 #ifdef HAVE_cc0
1010 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1011 using cc0, in which case we want to leave it as a COMPARE
1012 so we can distinguish it from a register-register-copy.
1014 In IEEE floating point, x-0 is not the same as x. */
1016 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1017 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1018 && trueop1 == CONST0_RTX (mode))
1019 return op0;
1020 #endif
1022 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1023 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1024 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1025 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1027 rtx xop00 = XEXP (op0, 0);
1028 rtx xop10 = XEXP (op1, 0);
1030 #ifdef HAVE_cc0
1031 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1032 #else
1033 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1034 && GET_MODE (xop00) == GET_MODE (xop10)
1035 && REGNO (xop00) == REGNO (xop10)
1036 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1037 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1038 #endif
1039 return xop00;
1041 break;
1043 case MINUS:
1044 /* We can't assume x-x is 0 even with non-IEEE floating point,
1045 but since it is zero except in very strange circumstances, we
1046 will treat it as zero with -funsafe-math-optimizations. */
1047 if (rtx_equal_p (trueop0, trueop1)
1048 && ! side_effects_p (op0)
1049 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1050 return CONST0_RTX (mode);
1052 /* Change subtraction from zero into negation. (0 - x) is the
1053 same as -x when x is NaN, infinite, or finite and non-zero.
1054 But if the mode has signed zeros, and does not round towards
1055 -infinity, then 0 - 0 is 0, not -0. */
1056 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1057 return gen_rtx_NEG (mode, op1);
1059 /* (-1 - a) is ~a. */
1060 if (trueop0 == constm1_rtx)
1061 return gen_rtx_NOT (mode, op1);
1063 /* Subtracting 0 has no effect unless the mode has signed zeros
1064 and supports rounding towards -infinity. In such a case,
1065 0 - 0 is -0. */
1066 if (!(HONOR_SIGNED_ZEROS (mode)
1067 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1068 && trueop1 == CONST0_RTX (mode))
1069 return op0;
1071 /* See if this is something like X * C - X or vice versa or
1072 if the multiplication is written as a shift. If so, we can
1073 distribute and make a new multiply, shift, or maybe just
1074 have X (if C is 2 in the example above). But don't make
1075 real multiply if we didn't have one before. */
1077 if (! FLOAT_MODE_P (mode))
1079 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1080 rtx lhs = op0, rhs = op1;
1081 int had_mult = 0;
1083 if (GET_CODE (lhs) == NEG)
1084 coeff0 = -1, lhs = XEXP (lhs, 0);
1085 else if (GET_CODE (lhs) == MULT
1086 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1088 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1089 had_mult = 1;
1091 else if (GET_CODE (lhs) == ASHIFT
1092 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1093 && INTVAL (XEXP (lhs, 1)) >= 0
1094 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1096 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1097 lhs = XEXP (lhs, 0);
1100 if (GET_CODE (rhs) == NEG)
1101 coeff1 = - 1, rhs = XEXP (rhs, 0);
1102 else if (GET_CODE (rhs) == MULT
1103 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1105 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1106 had_mult = 1;
1108 else if (GET_CODE (rhs) == ASHIFT
1109 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1110 && INTVAL (XEXP (rhs, 1)) >= 0
1111 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1113 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1114 rhs = XEXP (rhs, 0);
1117 if (rtx_equal_p (lhs, rhs))
1119 tem = simplify_gen_binary (MULT, mode, lhs,
1120 GEN_INT (coeff0 - coeff1));
1121 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1125 /* (a - (-b)) -> (a + b). True even for IEEE. */
1126 if (GET_CODE (op1) == NEG)
1127 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1129 /* If one of the operands is a PLUS or a MINUS, see if we can
1130 simplify this by the associative law.
1131 Don't use the associative law for floating point.
1132 The inaccuracy makes it nonassociative,
1133 and subtle programs can break if operations are associated. */
1135 if (INTEGRAL_MODE_P (mode)
1136 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1137 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1138 || (GET_CODE (op0) == CONST
1139 && GET_CODE (XEXP (op0, 0)) == PLUS)
1140 || (GET_CODE (op1) == CONST
1141 && GET_CODE (XEXP (op1, 0)) == PLUS))
1142 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1143 return tem;
1145 /* Don't let a relocatable value get a negative coeff. */
1146 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1147 return simplify_gen_binary (PLUS, mode,
1148 op0,
1149 neg_const_int (mode, op1));
1151 /* (x - (x & y)) -> (x & ~y) */
1152 if (GET_CODE (op1) == AND)
1154 if (rtx_equal_p (op0, XEXP (op1, 0)))
1155 return simplify_gen_binary (AND, mode, op0,
1156 gen_rtx_NOT (mode, XEXP (op1, 1)));
1157 if (rtx_equal_p (op0, XEXP (op1, 1)))
1158 return simplify_gen_binary (AND, mode, op0,
1159 gen_rtx_NOT (mode, XEXP (op1, 0)));
1161 break;
1163 case MULT:
1164 if (trueop1 == constm1_rtx)
1166 tem = simplify_unary_operation (NEG, mode, op0, mode);
1168 return tem ? tem : gen_rtx_NEG (mode, op0);
1171 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1172 x is NaN, since x * 0 is then also NaN. Nor is it valid
1173 when the mode has signed zeros, since multiplying a negative
1174 number by 0 will give -0, not 0. */
1175 if (!HONOR_NANS (mode)
1176 && !HONOR_SIGNED_ZEROS (mode)
1177 && trueop1 == CONST0_RTX (mode)
1178 && ! side_effects_p (op0))
1179 return op1;
1181 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1182 However, ANSI says we can drop signals,
1183 so we can do this anyway. */
1184 if (trueop1 == CONST1_RTX (mode))
1185 return op0;
1187 /* Convert multiply by constant power of two into shift unless
1188 we are still generating RTL. This test is a kludge. */
1189 if (GET_CODE (trueop1) == CONST_INT
1190 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1191 /* If the mode is larger than the host word size, and the
1192 uppermost bit is set, then this isn't a power of two due
1193 to implicit sign extension. */
1194 && (width <= HOST_BITS_PER_WIDE_INT
1195 || val != HOST_BITS_PER_WIDE_INT - 1)
1196 && ! rtx_equal_function_value_matters)
1197 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1199 /* x*2 is x+x and x*(-1) is -x */
1200 if (GET_CODE (trueop1) == CONST_DOUBLE
1201 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1202 && GET_MODE (op0) == mode)
1204 REAL_VALUE_TYPE d;
1205 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1207 if (REAL_VALUES_EQUAL (d, dconst2))
1208 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1210 if (REAL_VALUES_EQUAL (d, dconstm1))
1211 return gen_rtx_NEG (mode, op0);
1213 break;
1215 case IOR:
1216 if (trueop1 == const0_rtx)
1217 return op0;
1218 if (GET_CODE (trueop1) == CONST_INT
1219 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1220 == GET_MODE_MASK (mode)))
1221 return op1;
1222 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1223 return op0;
1224 /* A | (~A) -> -1 */
1225 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1226 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1227 && ! side_effects_p (op0)
1228 && GET_MODE_CLASS (mode) != MODE_CC)
1229 return constm1_rtx;
1230 break;
1232 case XOR:
1233 if (trueop1 == const0_rtx)
1234 return op0;
1235 if (GET_CODE (trueop1) == CONST_INT
1236 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1237 == GET_MODE_MASK (mode)))
1238 return gen_rtx_NOT (mode, op0);
1239 if (trueop0 == trueop1 && ! side_effects_p (op0)
1240 && GET_MODE_CLASS (mode) != MODE_CC)
1241 return const0_rtx;
1242 break;
1244 case AND:
1245 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1246 return const0_rtx;
1247 if (GET_CODE (trueop1) == CONST_INT
1248 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1249 == GET_MODE_MASK (mode)))
1250 return op0;
1251 if (trueop0 == trueop1 && ! side_effects_p (op0)
1252 && GET_MODE_CLASS (mode) != MODE_CC)
1253 return op0;
1254 /* A & (~A) -> 0 */
1255 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1256 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1257 && ! side_effects_p (op0)
1258 && GET_MODE_CLASS (mode) != MODE_CC)
1259 return const0_rtx;
1260 break;
1262 case UDIV:
1263 /* Convert divide by power of two into shift (divide by 1 handled
1264 below). */
1265 if (GET_CODE (trueop1) == CONST_INT
1266 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1267 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1269 /* ... fall through ... */
1271 case DIV:
1272 if (trueop1 == CONST1_RTX (mode))
1274 /* On some platforms DIV uses narrower mode than its
1275 operands. */
1276 rtx x = gen_lowpart_common (mode, op0);
1277 if (x)
1278 return x;
1279 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1280 return gen_lowpart_SUBREG (mode, op0);
1281 else
1282 return op0;
1285 /* Maybe change 0 / x to 0. This transformation isn't safe for
1286 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1287 Nor is it safe for modes with signed zeros, since dividing
1288 0 by a negative number gives -0, not 0. */
1289 if (!HONOR_NANS (mode)
1290 && !HONOR_SIGNED_ZEROS (mode)
1291 && trueop0 == CONST0_RTX (mode)
1292 && ! side_effects_p (op1))
1293 return op0;
1295 /* Change division by a constant into multiplication. Only do
1296 this with -funsafe-math-optimizations. */
1297 else if (GET_CODE (trueop1) == CONST_DOUBLE
1298 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1299 && trueop1 != CONST0_RTX (mode)
1300 && flag_unsafe_math_optimizations)
1302 REAL_VALUE_TYPE d;
1303 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1305 if (! REAL_VALUES_EQUAL (d, dconst0))
1307 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1308 return gen_rtx_MULT (mode, op0,
1309 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1312 break;
1314 case UMOD:
1315 /* Handle modulus by power of two (mod with 1 handled below). */
1316 if (GET_CODE (trueop1) == CONST_INT
1317 && exact_log2 (INTVAL (trueop1)) > 0)
1318 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1320 /* ... fall through ... */
1322 case MOD:
1323 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1324 && ! side_effects_p (op0) && ! side_effects_p (op1))
1325 return const0_rtx;
1326 break;
1328 case ROTATERT:
1329 case ROTATE:
1330 /* Rotating ~0 always results in ~0. */
1331 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1332 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1333 && ! side_effects_p (op1))
1334 return op0;
1336 /* ... fall through ... */
1338 case ASHIFT:
1339 case ASHIFTRT:
1340 case LSHIFTRT:
1341 if (trueop1 == const0_rtx)
1342 return op0;
1343 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1344 return op0;
1345 break;
1347 case SMIN:
1348 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1349 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1350 && ! side_effects_p (op0))
1351 return op1;
1352 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1353 return op0;
1354 break;
1356 case SMAX:
1357 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1358 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1359 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1360 && ! side_effects_p (op0))
1361 return op1;
1362 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1363 return op0;
1364 break;
1366 case UMIN:
1367 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1368 return op1;
1369 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1370 return op0;
1371 break;
1373 case UMAX:
1374 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1375 return op1;
1376 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1377 return op0;
1378 break;
1380 case SS_PLUS:
1381 case US_PLUS:
1382 case SS_MINUS:
1383 case US_MINUS:
1384 /* ??? There are simplifications that can be done. */
1385 return 0;
1387 default:
1388 abort ();
1391 return 0;
1394 /* Get the integer argument values in two forms:
1395 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1397 arg0 = INTVAL (trueop0);
1398 arg1 = INTVAL (trueop1);
1400 if (width < HOST_BITS_PER_WIDE_INT)
1402 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1403 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1405 arg0s = arg0;
1406 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1407 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1409 arg1s = arg1;
1410 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1411 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1413 else
1415 arg0s = arg0;
1416 arg1s = arg1;
1419 /* Compute the value of the arithmetic. */
1421 switch (code)
1423 case PLUS:
1424 val = arg0s + arg1s;
1425 break;
1427 case MINUS:
1428 val = arg0s - arg1s;
1429 break;
1431 case MULT:
1432 val = arg0s * arg1s;
1433 break;
1435 case DIV:
1436 if (arg1s == 0
1437 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1438 && arg1s == -1))
1439 return 0;
1440 val = arg0s / arg1s;
1441 break;
1443 case MOD:
1444 if (arg1s == 0
1445 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1446 && arg1s == -1))
1447 return 0;
1448 val = arg0s % arg1s;
1449 break;
1451 case UDIV:
1452 if (arg1 == 0
1453 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1454 && arg1s == -1))
1455 return 0;
1456 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1457 break;
1459 case UMOD:
1460 if (arg1 == 0
1461 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1462 && arg1s == -1))
1463 return 0;
1464 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1465 break;
1467 case AND:
1468 val = arg0 & arg1;
1469 break;
1471 case IOR:
1472 val = arg0 | arg1;
1473 break;
1475 case XOR:
1476 val = arg0 ^ arg1;
1477 break;
1479 case LSHIFTRT:
1480 /* If shift count is undefined, don't fold it; let the machine do
1481 what it wants. But truncate it if the machine will do that. */
1482 if (arg1 < 0)
1483 return 0;
1485 #ifdef SHIFT_COUNT_TRUNCATED
1486 if (SHIFT_COUNT_TRUNCATED)
1487 arg1 %= width;
1488 #endif
1490 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1491 break;
1493 case ASHIFT:
1494 if (arg1 < 0)
1495 return 0;
1497 #ifdef SHIFT_COUNT_TRUNCATED
1498 if (SHIFT_COUNT_TRUNCATED)
1499 arg1 %= width;
1500 #endif
1502 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1503 break;
1505 case ASHIFTRT:
1506 if (arg1 < 0)
1507 return 0;
1509 #ifdef SHIFT_COUNT_TRUNCATED
1510 if (SHIFT_COUNT_TRUNCATED)
1511 arg1 %= width;
1512 #endif
1514 val = arg0s >> arg1;
1516 /* Bootstrap compiler may not have sign extended the right shift.
1517 Manually extend the sign to insure bootstrap cc matches gcc. */
1518 if (arg0s < 0 && arg1 > 0)
1519 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1521 break;
1523 case ROTATERT:
1524 if (arg1 < 0)
1525 return 0;
1527 arg1 %= width;
1528 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1529 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1530 break;
1532 case ROTATE:
1533 if (arg1 < 0)
1534 return 0;
1536 arg1 %= width;
1537 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1538 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1539 break;
1541 case COMPARE:
1542 /* Do nothing here. */
1543 return 0;
1545 case SMIN:
1546 val = arg0s <= arg1s ? arg0s : arg1s;
1547 break;
1549 case UMIN:
1550 val = ((unsigned HOST_WIDE_INT) arg0
1551 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1552 break;
1554 case SMAX:
1555 val = arg0s > arg1s ? arg0s : arg1s;
1556 break;
1558 case UMAX:
1559 val = ((unsigned HOST_WIDE_INT) arg0
1560 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1561 break;
1563 default:
1564 abort ();
1567 val = trunc_int_for_mode (val, mode);
1569 return GEN_INT (val);
1572 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1573 PLUS or MINUS.
1575 Rather than test for specific case, we do this by a brute-force method
1576 and do all possible simplifications until no more changes occur. Then
1577 we rebuild the operation.
1579 If FORCE is true, then always generate the rtx. This is used to
1580 canonicalize stuff emitted from simplify_gen_binary. Note that this
1581 can still fail if the rtx is too complex. It won't fail just because
1582 the result is not 'simpler' than the input, however. */
1584 struct simplify_plus_minus_op_data
1586 rtx op;
1587 int neg;
1590 static int
1591 simplify_plus_minus_op_data_cmp (p1, p2)
1592 const void *p1;
1593 const void *p2;
1595 const struct simplify_plus_minus_op_data *d1 = p1;
1596 const struct simplify_plus_minus_op_data *d2 = p2;
1598 return (commutative_operand_precedence (d2->op)
1599 - commutative_operand_precedence (d1->op));
1602 static rtx
1603 simplify_plus_minus (code, mode, op0, op1, force)
1604 enum rtx_code code;
1605 enum machine_mode mode;
1606 rtx op0, op1;
1607 int force;
1609 struct simplify_plus_minus_op_data ops[8];
1610 rtx result, tem;
1611 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1612 int first, negate, changed;
1613 int i, j;
1615 memset ((char *) ops, 0, sizeof ops);
1617 /* Set up the two operands and then expand them until nothing has been
1618 changed. If we run out of room in our array, give up; this should
1619 almost never happen. */
1621 ops[0].op = op0;
1622 ops[0].neg = 0;
1623 ops[1].op = op1;
1624 ops[1].neg = (code == MINUS);
1628 changed = 0;
1630 for (i = 0; i < n_ops; i++)
1632 rtx this_op = ops[i].op;
1633 int this_neg = ops[i].neg;
1634 enum rtx_code this_code = GET_CODE (this_op);
1636 switch (this_code)
1638 case PLUS:
1639 case MINUS:
1640 if (n_ops == 7)
1641 return NULL_RTX;
1643 ops[n_ops].op = XEXP (this_op, 1);
1644 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1645 n_ops++;
1647 ops[i].op = XEXP (this_op, 0);
1648 input_ops++;
1649 changed = 1;
1650 break;
1652 case NEG:
1653 ops[i].op = XEXP (this_op, 0);
1654 ops[i].neg = ! this_neg;
1655 changed = 1;
1656 break;
1658 case CONST:
1659 if (n_ops < 7
1660 && GET_CODE (XEXP (this_op, 0)) == PLUS
1661 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
1662 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
1664 ops[i].op = XEXP (XEXP (this_op, 0), 0);
1665 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
1666 ops[n_ops].neg = this_neg;
1667 n_ops++;
1668 input_consts++;
1669 changed = 1;
1671 break;
1673 case NOT:
1674 /* ~a -> (-a - 1) */
1675 if (n_ops != 7)
1677 ops[n_ops].op = constm1_rtx;
1678 ops[n_ops++].neg = this_neg;
1679 ops[i].op = XEXP (this_op, 0);
1680 ops[i].neg = !this_neg;
1681 changed = 1;
1683 break;
1685 case CONST_INT:
1686 if (this_neg)
1688 ops[i].op = neg_const_int (mode, this_op);
1689 ops[i].neg = 0;
1690 changed = 1;
1692 break;
1694 default:
1695 break;
1699 while (changed);
1701 /* If we only have two operands, we can't do anything. */
1702 if (n_ops <= 2 && !force)
1703 return NULL_RTX;
1705 /* Count the number of CONSTs we didn't split above. */
1706 for (i = 0; i < n_ops; i++)
1707 if (GET_CODE (ops[i].op) == CONST)
1708 input_consts++;
1710 /* Now simplify each pair of operands until nothing changes. The first
1711 time through just simplify constants against each other. */
1713 first = 1;
1716 changed = first;
1718 for (i = 0; i < n_ops - 1; i++)
1719 for (j = i + 1; j < n_ops; j++)
1721 rtx lhs = ops[i].op, rhs = ops[j].op;
1722 int lneg = ops[i].neg, rneg = ops[j].neg;
1724 if (lhs != 0 && rhs != 0
1725 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
1727 enum rtx_code ncode = PLUS;
1729 if (lneg != rneg)
1731 ncode = MINUS;
1732 if (lneg)
1733 tem = lhs, lhs = rhs, rhs = tem;
1735 else if (swap_commutative_operands_p (lhs, rhs))
1736 tem = lhs, lhs = rhs, rhs = tem;
1738 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1740 /* Reject "simplifications" that just wrap the two
1741 arguments in a CONST. Failure to do so can result
1742 in infinite recursion with simplify_binary_operation
1743 when it calls us to simplify CONST operations. */
1744 if (tem
1745 && ! (GET_CODE (tem) == CONST
1746 && GET_CODE (XEXP (tem, 0)) == ncode
1747 && XEXP (XEXP (tem, 0), 0) == lhs
1748 && XEXP (XEXP (tem, 0), 1) == rhs)
1749 /* Don't allow -x + -1 -> ~x simplifications in the
1750 first pass. This allows us the chance to combine
1751 the -1 with other constants. */
1752 && ! (first
1753 && GET_CODE (tem) == NOT
1754 && XEXP (tem, 0) == rhs))
1756 lneg &= rneg;
1757 if (GET_CODE (tem) == NEG)
1758 tem = XEXP (tem, 0), lneg = !lneg;
1759 if (GET_CODE (tem) == CONST_INT && lneg)
1760 tem = neg_const_int (mode, tem), lneg = 0;
1762 ops[i].op = tem;
1763 ops[i].neg = lneg;
1764 ops[j].op = NULL_RTX;
1765 changed = 1;
1770 first = 0;
1772 while (changed);
1774 /* Pack all the operands to the lower-numbered entries. */
1775 for (i = 0, j = 0; j < n_ops; j++)
1776 if (ops[j].op)
1777 ops[i++] = ops[j];
1778 n_ops = i;
1780 /* Sort the operations based on swap_commutative_operands_p. */
1781 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
1783 /* We suppressed creation of trivial CONST expressions in the
1784 combination loop to avoid recursion. Create one manually now.
1785 The combination loop should have ensured that there is exactly
1786 one CONST_INT, and the sort will have ensured that it is last
1787 in the array and that any other constant will be next-to-last. */
1789 if (n_ops > 1
1790 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
1791 && CONSTANT_P (ops[n_ops - 2].op))
1793 rtx value = ops[n_ops - 1].op;
1794 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
1795 value = neg_const_int (mode, value);
1796 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
1797 n_ops--;
1800 /* Count the number of CONSTs that we generated. */
1801 n_consts = 0;
1802 for (i = 0; i < n_ops; i++)
1803 if (GET_CODE (ops[i].op) == CONST)
1804 n_consts++;
1806 /* Give up if we didn't reduce the number of operands we had. Make
1807 sure we count a CONST as two operands. If we have the same
1808 number of operands, but have made more CONSTs than before, this
1809 is also an improvement, so accept it. */
1810 if (!force
1811 && (n_ops + n_consts > input_ops
1812 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
1813 return NULL_RTX;
1815 /* Put a non-negated operand first. If there aren't any, make all
1816 operands positive and negate the whole thing later. */
1818 negate = 0;
1819 for (i = 0; i < n_ops && ops[i].neg; i++)
1820 continue;
1821 if (i == n_ops)
1823 for (i = 0; i < n_ops; i++)
1824 ops[i].neg = 0;
1825 negate = 1;
1827 else if (i != 0)
1829 tem = ops[0].op;
1830 ops[0] = ops[i];
1831 ops[i].op = tem;
1832 ops[i].neg = 1;
1835 /* Now make the result by performing the requested operations. */
1836 result = ops[0].op;
1837 for (i = 1; i < n_ops; i++)
1838 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
1839 mode, result, ops[i].op);
1841 return negate ? gen_rtx_NEG (mode, result) : result;
1844 /* Like simplify_binary_operation except used for relational operators.
1845 MODE is the mode of the operands, not that of the result. If MODE
1846 is VOIDmode, both operands must also be VOIDmode and we compare the
1847 operands in "infinite precision".
1849 If no simplification is possible, this function returns zero. Otherwise,
1850 it returns either const_true_rtx or const0_rtx. */
1853 simplify_relational_operation (code, mode, op0, op1)
1854 enum rtx_code code;
1855 enum machine_mode mode;
1856 rtx op0, op1;
1858 int equal, op0lt, op0ltu, op1lt, op1ltu;
1859 rtx tem;
1860 rtx trueop0;
1861 rtx trueop1;
1863 if (mode == VOIDmode
1864 && (GET_MODE (op0) != VOIDmode
1865 || GET_MODE (op1) != VOIDmode))
1866 abort ();
1868 /* If op0 is a compare, extract the comparison arguments from it. */
1869 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1870 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1872 trueop0 = avoid_constant_pool_reference (op0);
1873 trueop1 = avoid_constant_pool_reference (op1);
1875 /* We can't simplify MODE_CC values since we don't know what the
1876 actual comparison is. */
1877 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1878 #ifdef HAVE_cc0
1879 || op0 == cc0_rtx
1880 #endif
1882 return 0;
1884 /* Make sure the constant is second. */
1885 if (swap_commutative_operands_p (trueop0, trueop1))
1887 tem = op0, op0 = op1, op1 = tem;
1888 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1889 code = swap_condition (code);
1892 /* For integer comparisons of A and B maybe we can simplify A - B and can
1893 then simplify a comparison of that with zero. If A and B are both either
1894 a register or a CONST_INT, this can't help; testing for these cases will
1895 prevent infinite recursion here and speed things up.
1897 If CODE is an unsigned comparison, then we can never do this optimization,
1898 because it gives an incorrect result if the subtraction wraps around zero.
1899 ANSI C defines unsigned operations such that they never overflow, and
1900 thus such cases can not be ignored. */
1902 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
1903 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
1904 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
1905 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1906 && code != GTU && code != GEU && code != LTU && code != LEU)
1907 return simplify_relational_operation (signed_condition (code),
1908 mode, tem, const0_rtx);
1910 if (flag_unsafe_math_optimizations && code == ORDERED)
1911 return const_true_rtx;
1913 if (flag_unsafe_math_optimizations && code == UNORDERED)
1914 return const0_rtx;
1916 /* For modes without NaNs, if the two operands are equal, we know the
1917 result. */
1918 if (!HONOR_NANS (GET_MODE (trueop0)) && rtx_equal_p (trueop0, trueop1))
1919 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1921 /* If the operands are floating-point constants, see if we can fold
1922 the result. */
1923 else if (GET_CODE (trueop0) == CONST_DOUBLE
1924 && GET_CODE (trueop1) == CONST_DOUBLE
1925 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
1927 REAL_VALUE_TYPE d0, d1;
1929 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
1930 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
1932 /* Comparisons are unordered iff at least one of the values is NaN. */
1933 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
1934 switch (code)
1936 case UNEQ:
1937 case UNLT:
1938 case UNGT:
1939 case UNLE:
1940 case UNGE:
1941 case NE:
1942 case UNORDERED:
1943 return const_true_rtx;
1944 case EQ:
1945 case LT:
1946 case GT:
1947 case LE:
1948 case GE:
1949 case LTGT:
1950 case ORDERED:
1951 return const0_rtx;
1952 default:
1953 return 0;
1956 equal = REAL_VALUES_EQUAL (d0, d1);
1957 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
1958 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
1961 /* Otherwise, see if the operands are both integers. */
1962 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1963 && (GET_CODE (trueop0) == CONST_DOUBLE
1964 || GET_CODE (trueop0) == CONST_INT)
1965 && (GET_CODE (trueop1) == CONST_DOUBLE
1966 || GET_CODE (trueop1) == CONST_INT))
1968 int width = GET_MODE_BITSIZE (mode);
1969 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1970 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1972 /* Get the two words comprising each integer constant. */
1973 if (GET_CODE (trueop0) == CONST_DOUBLE)
1975 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
1976 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
1978 else
1980 l0u = l0s = INTVAL (trueop0);
1981 h0u = h0s = HWI_SIGN_EXTEND (l0s);
1984 if (GET_CODE (trueop1) == CONST_DOUBLE)
1986 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
1987 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
1989 else
1991 l1u = l1s = INTVAL (trueop1);
1992 h1u = h1s = HWI_SIGN_EXTEND (l1s);
1995 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1996 we have to sign or zero-extend the values. */
1997 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
1999 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2000 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2002 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2003 l0s |= ((HOST_WIDE_INT) (-1) << width);
2005 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2006 l1s |= ((HOST_WIDE_INT) (-1) << width);
2008 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2009 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2011 equal = (h0u == h1u && l0u == l1u);
2012 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2013 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2014 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2015 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2018 /* Otherwise, there are some code-specific tests we can make. */
2019 else
2021 switch (code)
2023 case EQ:
2024 /* References to the frame plus a constant or labels cannot
2025 be zero, but a SYMBOL_REF can due to #pragma weak. */
2026 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2027 || GET_CODE (trueop0) == LABEL_REF)
2028 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2029 /* On some machines, the ap reg can be 0 sometimes. */
2030 && op0 != arg_pointer_rtx
2031 #endif
2033 return const0_rtx;
2034 break;
2036 case NE:
2037 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2038 || GET_CODE (trueop0) == LABEL_REF)
2039 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2040 && op0 != arg_pointer_rtx
2041 #endif
2043 return const_true_rtx;
2044 break;
2046 case GEU:
2047 /* Unsigned values are never negative. */
2048 if (trueop1 == const0_rtx)
2049 return const_true_rtx;
2050 break;
2052 case LTU:
2053 if (trueop1 == const0_rtx)
2054 return const0_rtx;
2055 break;
2057 case LEU:
2058 /* Unsigned values are never greater than the largest
2059 unsigned value. */
2060 if (GET_CODE (trueop1) == CONST_INT
2061 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2062 && INTEGRAL_MODE_P (mode))
2063 return const_true_rtx;
2064 break;
2066 case GTU:
2067 if (GET_CODE (trueop1) == CONST_INT
2068 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2069 && INTEGRAL_MODE_P (mode))
2070 return const0_rtx;
2071 break;
2073 default:
2074 break;
2077 return 0;
2080 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2081 as appropriate. */
2082 switch (code)
2084 case EQ:
2085 case UNEQ:
2086 return equal ? const_true_rtx : const0_rtx;
2087 case NE:
2088 case LTGT:
2089 return ! equal ? const_true_rtx : const0_rtx;
2090 case LT:
2091 case UNLT:
2092 return op0lt ? const_true_rtx : const0_rtx;
2093 case GT:
2094 case UNGT:
2095 return op1lt ? const_true_rtx : const0_rtx;
2096 case LTU:
2097 return op0ltu ? const_true_rtx : const0_rtx;
2098 case GTU:
2099 return op1ltu ? const_true_rtx : const0_rtx;
2100 case LE:
2101 case UNLE:
2102 return equal || op0lt ? const_true_rtx : const0_rtx;
2103 case GE:
2104 case UNGE:
2105 return equal || op1lt ? const_true_rtx : const0_rtx;
2106 case LEU:
2107 return equal || op0ltu ? const_true_rtx : const0_rtx;
2108 case GEU:
2109 return equal || op1ltu ? const_true_rtx : const0_rtx;
2110 case ORDERED:
2111 return const_true_rtx;
2112 case UNORDERED:
2113 return const0_rtx;
2114 default:
2115 abort ();
2119 /* Simplify CODE, an operation with result mode MODE and three operands,
2120 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2121 a constant. Return 0 if no simplifications is possible. */
2124 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2125 enum rtx_code code;
2126 enum machine_mode mode, op0_mode;
2127 rtx op0, op1, op2;
2129 unsigned int width = GET_MODE_BITSIZE (mode);
2131 /* VOIDmode means "infinite" precision. */
2132 if (width == 0)
2133 width = HOST_BITS_PER_WIDE_INT;
2135 switch (code)
2137 case SIGN_EXTRACT:
2138 case ZERO_EXTRACT:
2139 if (GET_CODE (op0) == CONST_INT
2140 && GET_CODE (op1) == CONST_INT
2141 && GET_CODE (op2) == CONST_INT
2142 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2143 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2145 /* Extracting a bit-field from a constant */
2146 HOST_WIDE_INT val = INTVAL (op0);
2148 if (BITS_BIG_ENDIAN)
2149 val >>= (GET_MODE_BITSIZE (op0_mode)
2150 - INTVAL (op2) - INTVAL (op1));
2151 else
2152 val >>= INTVAL (op2);
2154 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2156 /* First zero-extend. */
2157 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2158 /* If desired, propagate sign bit. */
2159 if (code == SIGN_EXTRACT
2160 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2161 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2164 /* Clear the bits that don't belong in our mode,
2165 unless they and our sign bit are all one.
2166 So we get either a reasonable negative value or a reasonable
2167 unsigned value for this mode. */
2168 if (width < HOST_BITS_PER_WIDE_INT
2169 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2170 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2171 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2173 return GEN_INT (val);
2175 break;
2177 case IF_THEN_ELSE:
2178 if (GET_CODE (op0) == CONST_INT)
2179 return op0 != const0_rtx ? op1 : op2;
2181 /* Convert a == b ? b : a to "a". */
2182 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2183 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2184 && rtx_equal_p (XEXP (op0, 0), op1)
2185 && rtx_equal_p (XEXP (op0, 1), op2))
2186 return op1;
2187 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2188 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2189 && rtx_equal_p (XEXP (op0, 1), op1)
2190 && rtx_equal_p (XEXP (op0, 0), op2))
2191 return op2;
2192 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2194 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2195 ? GET_MODE (XEXP (op0, 1))
2196 : GET_MODE (XEXP (op0, 0)));
2197 rtx temp;
2198 if (cmp_mode == VOIDmode)
2199 cmp_mode = op0_mode;
2200 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2201 XEXP (op0, 0), XEXP (op0, 1));
2203 /* See if any simplifications were possible. */
2204 if (temp == const0_rtx)
2205 return op2;
2206 else if (temp == const1_rtx)
2207 return op1;
2208 else if (temp)
2209 op0 = temp;
2211 /* Look for happy constants in op1 and op2. */
2212 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2214 HOST_WIDE_INT t = INTVAL (op1);
2215 HOST_WIDE_INT f = INTVAL (op2);
2217 if (t == STORE_FLAG_VALUE && f == 0)
2218 code = GET_CODE (op0);
2219 else if (t == 0 && f == STORE_FLAG_VALUE)
2221 enum rtx_code tmp;
2222 tmp = reversed_comparison_code (op0, NULL_RTX);
2223 if (tmp == UNKNOWN)
2224 break;
2225 code = tmp;
2227 else
2228 break;
2230 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2233 break;
2235 default:
2236 abort ();
2239 return 0;
2242 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2243 Return 0 if no simplifications is possible. */
2245 simplify_subreg (outermode, op, innermode, byte)
2246 rtx op;
2247 unsigned int byte;
2248 enum machine_mode outermode, innermode;
2250 /* Little bit of sanity checking. */
2251 if (innermode == VOIDmode || outermode == VOIDmode
2252 || innermode == BLKmode || outermode == BLKmode)
2253 abort ();
2255 if (GET_MODE (op) != innermode
2256 && GET_MODE (op) != VOIDmode)
2257 abort ();
2259 if (byte % GET_MODE_SIZE (outermode)
2260 || byte >= GET_MODE_SIZE (innermode))
2261 abort ();
2263 if (outermode == innermode && !byte)
2264 return op;
2266 /* Attempt to simplify constant to non-SUBREG expression. */
2267 if (CONSTANT_P (op))
2269 int offset, part;
2270 unsigned HOST_WIDE_INT val = 0;
2272 /* ??? This code is partly redundant with code below, but can handle
2273 the subregs of floats and similar corner cases.
2274 Later it we should move all simplification code here and rewrite
2275 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2276 using SIMPLIFY_SUBREG. */
2277 if (subreg_lowpart_offset (outermode, innermode) == byte)
2279 rtx new = gen_lowpart_if_possible (outermode, op);
2280 if (new)
2281 return new;
2284 /* Similar comment as above apply here. */
2285 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2286 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2287 && GET_MODE_CLASS (outermode) == MODE_INT)
2289 rtx new = constant_subword (op,
2290 (byte / UNITS_PER_WORD),
2291 innermode);
2292 if (new)
2293 return new;
2296 offset = byte * BITS_PER_UNIT;
2297 switch (GET_CODE (op))
2299 case CONST_DOUBLE:
2300 if (GET_MODE (op) != VOIDmode)
2301 break;
2303 /* We can't handle this case yet. */
2304 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2305 return NULL_RTX;
2307 part = offset >= HOST_BITS_PER_WIDE_INT;
2308 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2309 && BYTES_BIG_ENDIAN)
2310 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2311 && WORDS_BIG_ENDIAN))
2312 part = !part;
2313 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2314 offset %= HOST_BITS_PER_WIDE_INT;
2316 /* We've already picked the word we want from a double, so
2317 pretend this is actually an integer. */
2318 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2320 /* FALLTHROUGH */
2321 case CONST_INT:
2322 if (GET_CODE (op) == CONST_INT)
2323 val = INTVAL (op);
2325 /* We don't handle synthetizing of non-integral constants yet. */
2326 if (GET_MODE_CLASS (outermode) != MODE_INT)
2327 return NULL_RTX;
2329 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2331 if (WORDS_BIG_ENDIAN)
2332 offset = (GET_MODE_BITSIZE (innermode)
2333 - GET_MODE_BITSIZE (outermode) - offset);
2334 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2335 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2336 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2337 - 2 * (offset % BITS_PER_WORD));
2340 if (offset >= HOST_BITS_PER_WIDE_INT)
2341 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2342 else
2344 val >>= offset;
2345 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2346 val = trunc_int_for_mode (val, outermode);
2347 return GEN_INT (val);
2349 default:
2350 break;
2354 /* Changing mode twice with SUBREG => just change it once,
2355 or not at all if changing back op starting mode. */
2356 if (GET_CODE (op) == SUBREG)
2358 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2359 int final_offset = byte + SUBREG_BYTE (op);
2360 rtx new;
2362 if (outermode == innermostmode
2363 && byte == 0 && SUBREG_BYTE (op) == 0)
2364 return SUBREG_REG (op);
2366 /* The SUBREG_BYTE represents offset, as if the value were stored
2367 in memory. Irritating exception is paradoxical subreg, where
2368 we define SUBREG_BYTE to be 0. On big endian machines, this
2369 value should be negative. For a moment, undo this exception. */
2370 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2372 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2373 if (WORDS_BIG_ENDIAN)
2374 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2375 if (BYTES_BIG_ENDIAN)
2376 final_offset += difference % UNITS_PER_WORD;
2378 if (SUBREG_BYTE (op) == 0
2379 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2381 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2382 if (WORDS_BIG_ENDIAN)
2383 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2384 if (BYTES_BIG_ENDIAN)
2385 final_offset += difference % UNITS_PER_WORD;
2388 /* See whether resulting subreg will be paradoxical. */
2389 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2391 /* In nonparadoxical subregs we can't handle negative offsets. */
2392 if (final_offset < 0)
2393 return NULL_RTX;
2394 /* Bail out in case resulting subreg would be incorrect. */
2395 if (final_offset % GET_MODE_SIZE (outermode)
2396 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2397 return NULL_RTX;
2399 else
2401 int offset = 0;
2402 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2404 /* In paradoxical subreg, see if we are still looking on lower part.
2405 If so, our SUBREG_BYTE will be 0. */
2406 if (WORDS_BIG_ENDIAN)
2407 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2408 if (BYTES_BIG_ENDIAN)
2409 offset += difference % UNITS_PER_WORD;
2410 if (offset == final_offset)
2411 final_offset = 0;
2412 else
2413 return NULL_RTX;
2416 /* Recurse for futher possible simplifications. */
2417 new = simplify_subreg (outermode, SUBREG_REG (op),
2418 GET_MODE (SUBREG_REG (op)),
2419 final_offset);
2420 if (new)
2421 return new;
2422 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2425 /* SUBREG of a hard register => just change the register number
2426 and/or mode. If the hard register is not valid in that mode,
2427 suppress this simplification. If the hard register is the stack,
2428 frame, or argument pointer, leave this as a SUBREG. */
2430 if (REG_P (op)
2431 && (! REG_FUNCTION_VALUE_P (op)
2432 || ! rtx_equal_function_value_matters)
2433 #ifdef CLASS_CANNOT_CHANGE_MODE
2434 && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode, innermode)
2435 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2436 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT
2437 && (TEST_HARD_REG_BIT
2438 (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE],
2439 REGNO (op))))
2440 #endif
2441 && REGNO (op) < FIRST_PSEUDO_REGISTER
2442 && ((reload_completed && !frame_pointer_needed)
2443 || (REGNO (op) != FRAME_POINTER_REGNUM
2444 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2445 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2446 #endif
2448 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2449 && REGNO (op) != ARG_POINTER_REGNUM
2450 #endif
2451 && REGNO (op) != STACK_POINTER_REGNUM)
2453 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2456 /* ??? We do allow it if the current REG is not valid for
2457 its mode. This is a kludge to work around how float/complex
2458 arguments are passed on 32-bit Sparc and should be fixed. */
2459 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2460 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2462 rtx x = gen_rtx_REG (outermode, final_regno);
2464 /* Propagate original regno. We don't have any way to specify
2465 the offset inside orignal regno, so do so only for lowpart.
2466 The information is used only by alias analysis that can not
2467 grog partial register anyway. */
2469 if (subreg_lowpart_offset (outermode, innermode) == byte)
2470 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2471 return x;
2475 /* If we have a SUBREG of a register that we are replacing and we are
2476 replacing it with a MEM, make a new MEM and try replacing the
2477 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2478 or if we would be widening it. */
2480 if (GET_CODE (op) == MEM
2481 && ! mode_dependent_address_p (XEXP (op, 0))
2482 /* Allow splitting of volatile memory references in case we don't
2483 have instruction to move the whole thing. */
2484 && (! MEM_VOLATILE_P (op)
2485 || ! have_insn_for (SET, innermode))
2486 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2487 return adjust_address_nv (op, outermode, byte);
2489 /* Handle complex values represented as CONCAT
2490 of real and imaginary part. */
2491 if (GET_CODE (op) == CONCAT)
2493 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2494 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2495 unsigned int final_offset;
2496 rtx res;
2498 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2499 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2500 if (res)
2501 return res;
2502 /* We can at least simplify it by referring directly to the relevant part. */
2503 return gen_rtx_SUBREG (outermode, part, final_offset);
2506 return NULL_RTX;
2508 /* Make a SUBREG operation or equivalent if it folds. */
2511 simplify_gen_subreg (outermode, op, innermode, byte)
2512 rtx op;
2513 unsigned int byte;
2514 enum machine_mode outermode, innermode;
2516 rtx new;
2517 /* Little bit of sanity checking. */
2518 if (innermode == VOIDmode || outermode == VOIDmode
2519 || innermode == BLKmode || outermode == BLKmode)
2520 abort ();
2522 if (GET_MODE (op) != innermode
2523 && GET_MODE (op) != VOIDmode)
2524 abort ();
2526 if (byte % GET_MODE_SIZE (outermode)
2527 || byte >= GET_MODE_SIZE (innermode))
2528 abort ();
2530 if (GET_CODE (op) == QUEUED)
2531 return NULL_RTX;
2533 new = simplify_subreg (outermode, op, innermode, byte);
2534 if (new)
2535 return new;
2537 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2538 return NULL_RTX;
2540 return gen_rtx_SUBREG (outermode, op, byte);
2542 /* Simplify X, an rtx expression.
2544 Return the simplified expression or NULL if no simplifications
2545 were possible.
2547 This is the preferred entry point into the simplification routines;
2548 however, we still allow passes to call the more specific routines.
2550 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2551 code that need to be unified.
2553 1. fold_rtx in cse.c. This code uses various CSE specific
2554 information to aid in RTL simplification.
2556 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2557 it uses combine specific information to aid in RTL
2558 simplification.
2560 3. The routines in this file.
2563 Long term we want to only have one body of simplification code; to
2564 get to that state I recommend the following steps:
2566 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2567 which are not pass dependent state into these routines.
2569 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2570 use this routine whenever possible.
2572 3. Allow for pass dependent state to be provided to these
2573 routines and add simplifications based on the pass dependent
2574 state. Remove code from cse.c & combine.c that becomes
2575 redundant/dead.
2577 It will take time, but ultimately the compiler will be easier to
2578 maintain and improve. It's totally silly that when we add a
2579 simplification that it needs to be added to 4 places (3 for RTL
2580 simplification and 1 for tree simplification. */
2583 simplify_rtx (x)
2584 rtx x;
2586 enum rtx_code code = GET_CODE (x);
2587 enum machine_mode mode = GET_MODE (x);
2589 switch (GET_RTX_CLASS (code))
2591 case '1':
2592 return simplify_unary_operation (code, mode,
2593 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2594 case 'c':
2595 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
2597 rtx tem;
2599 tem = XEXP (x, 0);
2600 XEXP (x, 0) = XEXP (x, 1);
2601 XEXP (x, 1) = tem;
2602 return simplify_binary_operation (code, mode,
2603 XEXP (x, 0), XEXP (x, 1));
2606 case '2':
2607 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2609 case '3':
2610 case 'b':
2611 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2612 XEXP (x, 0), XEXP (x, 1),
2613 XEXP (x, 2));
2615 case '<':
2616 return simplify_relational_operation (code,
2617 ((GET_MODE (XEXP (x, 0))
2618 != VOIDmode)
2619 ? GET_MODE (XEXP (x, 0))
2620 : GET_MODE (XEXP (x, 1))),
2621 XEXP (x, 0), XEXP (x, 1));
2622 case 'x':
2623 /* The only case we try to handle is a SUBREG. */
2624 if (code == SUBREG)
2625 return simplify_gen_subreg (mode, SUBREG_REG (x),
2626 GET_MODE (SUBREG_REG (x)),
2627 SUBREG_BYTE (x));
2628 return NULL;
2629 default:
2630 return NULL;