Fix PR47707
[official-gcc.git] / gcc / simplify-rtx.c
blobce4eab45b922808bf4601a3f4a172e864c451053
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "diagnostic-core.h"
38 #include "output.h"
39 #include "ggc.h"
40 #include "target.h"
42 /* Simplification and canonicalization of RTL. */
44 /* Much code operates on (low, high) pairs; the low value is an
45 unsigned wide int, the high value a signed wide int. We
46 occasionally need to sign extend from low to high as if low were a
47 signed wide int. */
48 #define HWI_SIGN_EXTEND(low) \
49 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51 static rtx neg_const_int (enum machine_mode, const_rtx);
52 static bool plus_minus_operand_p (const_rtx);
53 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
55 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
56 unsigned int);
57 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
58 rtx, rtx);
59 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
60 enum machine_mode, rtx, rtx);
61 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
62 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
63 rtx, rtx, rtx, rtx);
65 /* Negate a CONST_INT rtx, truncating (because a conversion from a
66 maximally negative number can overflow). */
67 static rtx
68 neg_const_int (enum machine_mode mode, const_rtx i)
70 return gen_int_mode (- INTVAL (i), mode);
73 /* Test whether expression, X, is an immediate constant that represents
74 the most significant bit of machine mode MODE. */
76 bool
77 mode_signbit_p (enum machine_mode mode, const_rtx x)
79 unsigned HOST_WIDE_INT val;
80 unsigned int width;
82 if (GET_MODE_CLASS (mode) != MODE_INT)
83 return false;
85 width = GET_MODE_BITSIZE (mode);
86 if (width == 0)
87 return false;
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
93 && GET_CODE (x) == CONST_DOUBLE
94 && CONST_DOUBLE_LOW (x) == 0)
96 val = CONST_DOUBLE_HIGH (x);
97 width -= HOST_BITS_PER_WIDE_INT;
99 else
100 return false;
102 if (width < HOST_BITS_PER_WIDE_INT)
103 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
107 /* Make a binary operation by properly ordering the operands and
108 seeing if the expression folds. */
111 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
112 rtx op1)
114 rtx tem;
116 /* If this simplifies, do it. */
117 tem = simplify_binary_operation (code, mode, op0, op1);
118 if (tem)
119 return tem;
121 /* Put complex operands first and constants second if commutative. */
122 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
123 && swap_commutative_operands_p (op0, op1))
124 tem = op0, op0 = op1, op1 = tem;
126 return gen_rtx_fmt_ee (code, mode, op0, op1);
129 /* If X is a MEM referencing the constant pool, return the real value.
130 Otherwise return X. */
132 avoid_constant_pool_reference (rtx x)
134 rtx c, tmp, addr;
135 enum machine_mode cmode;
136 HOST_WIDE_INT offset = 0;
138 switch (GET_CODE (x))
140 case MEM:
141 break;
143 case FLOAT_EXTEND:
144 /* Handle float extensions of constant pool references. */
145 tmp = XEXP (x, 0);
146 c = avoid_constant_pool_reference (tmp);
147 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
149 REAL_VALUE_TYPE d;
151 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
152 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
154 return x;
156 default:
157 return x;
160 if (GET_MODE (x) == BLKmode)
161 return x;
163 addr = XEXP (x, 0);
165 /* Call target hook to avoid the effects of -fpic etc.... */
166 addr = targetm.delegitimize_address (addr);
168 /* Split the address into a base and integer offset. */
169 if (GET_CODE (addr) == CONST
170 && GET_CODE (XEXP (addr, 0)) == PLUS
171 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
173 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
174 addr = XEXP (XEXP (addr, 0), 0);
177 if (GET_CODE (addr) == LO_SUM)
178 addr = XEXP (addr, 1);
180 /* If this is a constant pool reference, we can turn it into its
181 constant and hope that simplifications happen. */
182 if (GET_CODE (addr) == SYMBOL_REF
183 && CONSTANT_POOL_ADDRESS_P (addr))
185 c = get_pool_constant (addr);
186 cmode = get_pool_mode (addr);
188 /* If we're accessing the constant in a different mode than it was
189 originally stored, attempt to fix that up via subreg simplifications.
190 If that fails we have no choice but to return the original memory. */
191 if (offset != 0 || cmode != GET_MODE (x))
193 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
194 if (tem && CONSTANT_P (tem))
195 return tem;
197 else
198 return c;
201 return x;
204 /* Simplify a MEM based on its attributes. This is the default
205 delegitimize_address target hook, and it's recommended that every
206 overrider call it. */
209 delegitimize_mem_from_attrs (rtx x)
211 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
212 use their base addresses as equivalent. */
213 if (MEM_P (x)
214 && MEM_EXPR (x)
215 && MEM_OFFSET (x))
217 tree decl = MEM_EXPR (x);
218 enum machine_mode mode = GET_MODE (x);
219 HOST_WIDE_INT offset = 0;
221 switch (TREE_CODE (decl))
223 default:
224 decl = NULL;
225 break;
227 case VAR_DECL:
228 break;
230 case ARRAY_REF:
231 case ARRAY_RANGE_REF:
232 case COMPONENT_REF:
233 case BIT_FIELD_REF:
234 case REALPART_EXPR:
235 case IMAGPART_EXPR:
236 case VIEW_CONVERT_EXPR:
238 HOST_WIDE_INT bitsize, bitpos;
239 tree toffset;
240 int unsignedp = 0, volatilep = 0;
242 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
243 &mode, &unsignedp, &volatilep, false);
244 if (bitsize != GET_MODE_BITSIZE (mode)
245 || (bitpos % BITS_PER_UNIT)
246 || (toffset && !host_integerp (toffset, 0)))
247 decl = NULL;
248 else
250 offset += bitpos / BITS_PER_UNIT;
251 if (toffset)
252 offset += TREE_INT_CST_LOW (toffset);
254 break;
258 if (decl
259 && mode == GET_MODE (x)
260 && TREE_CODE (decl) == VAR_DECL
261 && (TREE_STATIC (decl)
262 || DECL_THREAD_LOCAL_P (decl))
263 && DECL_RTL_SET_P (decl)
264 && MEM_P (DECL_RTL (decl)))
266 rtx newx;
268 offset += INTVAL (MEM_OFFSET (x));
270 newx = DECL_RTL (decl);
272 if (MEM_P (newx))
274 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
276 /* Avoid creating a new MEM needlessly if we already had
277 the same address. We do if there's no OFFSET and the
278 old address X is identical to NEWX, or if X is of the
279 form (plus NEWX OFFSET), or the NEWX is of the form
280 (plus Y (const_int Z)) and X is that with the offset
281 added: (plus Y (const_int Z+OFFSET)). */
282 if (!((offset == 0
283 || (GET_CODE (o) == PLUS
284 && GET_CODE (XEXP (o, 1)) == CONST_INT
285 && (offset == INTVAL (XEXP (o, 1))
286 || (GET_CODE (n) == PLUS
287 && GET_CODE (XEXP (n, 1)) == CONST_INT
288 && (INTVAL (XEXP (n, 1)) + offset
289 == INTVAL (XEXP (o, 1)))
290 && (n = XEXP (n, 0))))
291 && (o = XEXP (o, 0))))
292 && rtx_equal_p (o, n)))
293 x = adjust_address_nv (newx, mode, offset);
295 else if (GET_MODE (x) == GET_MODE (newx)
296 && offset == 0)
297 x = newx;
301 return x;
304 /* Make a unary operation by first seeing if it folds and otherwise making
305 the specified operation. */
308 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
309 enum machine_mode op_mode)
311 rtx tem;
313 /* If this simplifies, use it. */
314 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
315 return tem;
317 return gen_rtx_fmt_e (code, mode, op);
320 /* Likewise for ternary operations. */
323 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
324 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
326 rtx tem;
328 /* If this simplifies, use it. */
329 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
330 op0, op1, op2)))
331 return tem;
333 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
336 /* Likewise, for relational operations.
337 CMP_MODE specifies mode comparison is done in. */
340 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
341 enum machine_mode cmp_mode, rtx op0, rtx op1)
343 rtx tem;
345 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
346 op0, op1)))
347 return tem;
349 return gen_rtx_fmt_ee (code, mode, op0, op1);
352 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
353 and simplify the result. If FN is non-NULL, call this callback on each
354 X, if it returns non-NULL, replace X with its return value and simplify the
355 result. */
358 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
359 rtx (*fn) (rtx, const_rtx, void *), void *data)
361 enum rtx_code code = GET_CODE (x);
362 enum machine_mode mode = GET_MODE (x);
363 enum machine_mode op_mode;
364 const char *fmt;
365 rtx op0, op1, op2, newx, op;
366 rtvec vec, newvec;
367 int i, j;
369 if (__builtin_expect (fn != NULL, 0))
371 newx = fn (x, old_rtx, data);
372 if (newx)
373 return newx;
375 else if (rtx_equal_p (x, old_rtx))
376 return copy_rtx ((rtx) data);
378 switch (GET_RTX_CLASS (code))
380 case RTX_UNARY:
381 op0 = XEXP (x, 0);
382 op_mode = GET_MODE (op0);
383 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
384 if (op0 == XEXP (x, 0))
385 return x;
386 return simplify_gen_unary (code, mode, op0, op_mode);
388 case RTX_BIN_ARITH:
389 case RTX_COMM_ARITH:
390 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
391 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
392 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
393 return x;
394 return simplify_gen_binary (code, mode, op0, op1);
396 case RTX_COMPARE:
397 case RTX_COMM_COMPARE:
398 op0 = XEXP (x, 0);
399 op1 = XEXP (x, 1);
400 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
401 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
402 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
403 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
404 return x;
405 return simplify_gen_relational (code, mode, op_mode, op0, op1);
407 case RTX_TERNARY:
408 case RTX_BITFIELD_OPS:
409 op0 = XEXP (x, 0);
410 op_mode = GET_MODE (op0);
411 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
412 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
413 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
414 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
415 return x;
416 if (op_mode == VOIDmode)
417 op_mode = GET_MODE (op0);
418 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
420 case RTX_EXTRA:
421 if (code == SUBREG)
423 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
424 if (op0 == SUBREG_REG (x))
425 return x;
426 op0 = simplify_gen_subreg (GET_MODE (x), op0,
427 GET_MODE (SUBREG_REG (x)),
428 SUBREG_BYTE (x));
429 return op0 ? op0 : x;
431 break;
433 case RTX_OBJ:
434 if (code == MEM)
436 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
437 if (op0 == XEXP (x, 0))
438 return x;
439 return replace_equiv_address_nv (x, op0);
441 else if (code == LO_SUM)
443 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
444 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
446 /* (lo_sum (high x) x) -> x */
447 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
448 return op1;
450 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
451 return x;
452 return gen_rtx_LO_SUM (mode, op0, op1);
454 break;
456 default:
457 break;
460 newx = x;
461 fmt = GET_RTX_FORMAT (code);
462 for (i = 0; fmt[i]; i++)
463 switch (fmt[i])
465 case 'E':
466 vec = XVEC (x, i);
467 newvec = XVEC (newx, i);
468 for (j = 0; j < GET_NUM_ELEM (vec); j++)
470 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
471 old_rtx, fn, data);
472 if (op != RTVEC_ELT (vec, j))
474 if (newvec == vec)
476 newvec = shallow_copy_rtvec (vec);
477 if (x == newx)
478 newx = shallow_copy_rtx (x);
479 XVEC (newx, i) = newvec;
481 RTVEC_ELT (newvec, j) = op;
484 break;
486 case 'e':
487 if (XEXP (x, i))
489 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
490 if (op != XEXP (x, i))
492 if (x == newx)
493 newx = shallow_copy_rtx (x);
494 XEXP (newx, i) = op;
497 break;
499 return newx;
502 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
503 resulting RTX. Return a new RTX which is as simplified as possible. */
506 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
508 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
511 /* Try to simplify a unary operation CODE whose output mode is to be
512 MODE with input operand OP whose mode was originally OP_MODE.
513 Return zero if no simplification can be made. */
515 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
516 rtx op, enum machine_mode op_mode)
518 rtx trueop, tem;
520 trueop = avoid_constant_pool_reference (op);
522 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
523 if (tem)
524 return tem;
526 return simplify_unary_operation_1 (code, mode, op);
529 /* Perform some simplifications we can do even if the operands
530 aren't constant. */
531 static rtx
532 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
534 enum rtx_code reversed;
535 rtx temp;
537 switch (code)
539 case NOT:
540 /* (not (not X)) == X. */
541 if (GET_CODE (op) == NOT)
542 return XEXP (op, 0);
544 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
545 comparison is all ones. */
546 if (COMPARISON_P (op)
547 && (mode == BImode || STORE_FLAG_VALUE == -1)
548 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
549 return simplify_gen_relational (reversed, mode, VOIDmode,
550 XEXP (op, 0), XEXP (op, 1));
552 /* (not (plus X -1)) can become (neg X). */
553 if (GET_CODE (op) == PLUS
554 && XEXP (op, 1) == constm1_rtx)
555 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
557 /* Similarly, (not (neg X)) is (plus X -1). */
558 if (GET_CODE (op) == NEG)
559 return plus_constant (XEXP (op, 0), -1);
561 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
562 if (GET_CODE (op) == XOR
563 && CONST_INT_P (XEXP (op, 1))
564 && (temp = simplify_unary_operation (NOT, mode,
565 XEXP (op, 1), mode)) != 0)
566 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
568 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
569 if (GET_CODE (op) == PLUS
570 && CONST_INT_P (XEXP (op, 1))
571 && mode_signbit_p (mode, XEXP (op, 1))
572 && (temp = simplify_unary_operation (NOT, mode,
573 XEXP (op, 1), mode)) != 0)
574 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
577 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
578 operands other than 1, but that is not valid. We could do a
579 similar simplification for (not (lshiftrt C X)) where C is
580 just the sign bit, but this doesn't seem common enough to
581 bother with. */
582 if (GET_CODE (op) == ASHIFT
583 && XEXP (op, 0) == const1_rtx)
585 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
586 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
589 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
590 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
591 so we can perform the above simplification. */
593 if (STORE_FLAG_VALUE == -1
594 && GET_CODE (op) == ASHIFTRT
595 && GET_CODE (XEXP (op, 1))
596 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
597 return simplify_gen_relational (GE, mode, VOIDmode,
598 XEXP (op, 0), const0_rtx);
601 if (GET_CODE (op) == SUBREG
602 && subreg_lowpart_p (op)
603 && (GET_MODE_SIZE (GET_MODE (op))
604 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
605 && GET_CODE (SUBREG_REG (op)) == ASHIFT
606 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
608 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
609 rtx x;
611 x = gen_rtx_ROTATE (inner_mode,
612 simplify_gen_unary (NOT, inner_mode, const1_rtx,
613 inner_mode),
614 XEXP (SUBREG_REG (op), 1));
615 return rtl_hooks.gen_lowpart_no_emit (mode, x);
618 /* Apply De Morgan's laws to reduce number of patterns for machines
619 with negating logical insns (and-not, nand, etc.). If result has
620 only one NOT, put it first, since that is how the patterns are
621 coded. */
623 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
625 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
626 enum machine_mode op_mode;
628 op_mode = GET_MODE (in1);
629 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
631 op_mode = GET_MODE (in2);
632 if (op_mode == VOIDmode)
633 op_mode = mode;
634 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
636 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
638 rtx tem = in2;
639 in2 = in1; in1 = tem;
642 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
643 mode, in1, in2);
645 break;
647 case NEG:
648 /* (neg (neg X)) == X. */
649 if (GET_CODE (op) == NEG)
650 return XEXP (op, 0);
652 /* (neg (plus X 1)) can become (not X). */
653 if (GET_CODE (op) == PLUS
654 && XEXP (op, 1) == const1_rtx)
655 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
657 /* Similarly, (neg (not X)) is (plus X 1). */
658 if (GET_CODE (op) == NOT)
659 return plus_constant (XEXP (op, 0), 1);
661 /* (neg (minus X Y)) can become (minus Y X). This transformation
662 isn't safe for modes with signed zeros, since if X and Y are
663 both +0, (minus Y X) is the same as (minus X Y). If the
664 rounding mode is towards +infinity (or -infinity) then the two
665 expressions will be rounded differently. */
666 if (GET_CODE (op) == MINUS
667 && !HONOR_SIGNED_ZEROS (mode)
668 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
669 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
671 if (GET_CODE (op) == PLUS
672 && !HONOR_SIGNED_ZEROS (mode)
673 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
675 /* (neg (plus A C)) is simplified to (minus -C A). */
676 if (CONST_INT_P (XEXP (op, 1))
677 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
679 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
680 if (temp)
681 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
684 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
685 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
686 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
689 /* (neg (mult A B)) becomes (mult (neg A) B).
690 This works even for floating-point values. */
691 if (GET_CODE (op) == MULT
692 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
694 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
695 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
698 /* NEG commutes with ASHIFT since it is multiplication. Only do
699 this if we can then eliminate the NEG (e.g., if the operand
700 is a constant). */
701 if (GET_CODE (op) == ASHIFT)
703 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
704 if (temp)
705 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
708 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
709 C is equal to the width of MODE minus 1. */
710 if (GET_CODE (op) == ASHIFTRT
711 && CONST_INT_P (XEXP (op, 1))
712 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
713 return simplify_gen_binary (LSHIFTRT, mode,
714 XEXP (op, 0), XEXP (op, 1));
716 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
717 C is equal to the width of MODE minus 1. */
718 if (GET_CODE (op) == LSHIFTRT
719 && CONST_INT_P (XEXP (op, 1))
720 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
721 return simplify_gen_binary (ASHIFTRT, mode,
722 XEXP (op, 0), XEXP (op, 1));
724 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
725 if (GET_CODE (op) == XOR
726 && XEXP (op, 1) == const1_rtx
727 && nonzero_bits (XEXP (op, 0), mode) == 1)
728 return plus_constant (XEXP (op, 0), -1);
730 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
731 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
732 if (GET_CODE (op) == LT
733 && XEXP (op, 1) == const0_rtx
734 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
736 enum machine_mode inner = GET_MODE (XEXP (op, 0));
737 int isize = GET_MODE_BITSIZE (inner);
738 if (STORE_FLAG_VALUE == 1)
740 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
741 GEN_INT (isize - 1));
742 if (mode == inner)
743 return temp;
744 if (GET_MODE_BITSIZE (mode) > isize)
745 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
746 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
748 else if (STORE_FLAG_VALUE == -1)
750 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
751 GEN_INT (isize - 1));
752 if (mode == inner)
753 return temp;
754 if (GET_MODE_BITSIZE (mode) > isize)
755 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
756 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
759 break;
761 case TRUNCATE:
762 /* We can't handle truncation to a partial integer mode here
763 because we don't know the real bitsize of the partial
764 integer mode. */
765 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
766 break;
768 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
769 if ((GET_CODE (op) == SIGN_EXTEND
770 || GET_CODE (op) == ZERO_EXTEND)
771 && GET_MODE (XEXP (op, 0)) == mode)
772 return XEXP (op, 0);
774 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
775 (OP:SI foo:SI) if OP is NEG or ABS. */
776 if ((GET_CODE (op) == ABS
777 || GET_CODE (op) == NEG)
778 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
779 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
780 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
781 return simplify_gen_unary (GET_CODE (op), mode,
782 XEXP (XEXP (op, 0), 0), mode);
784 /* (truncate:A (subreg:B (truncate:C X) 0)) is
785 (truncate:A X). */
786 if (GET_CODE (op) == SUBREG
787 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
788 && subreg_lowpart_p (op))
789 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
790 GET_MODE (XEXP (SUBREG_REG (op), 0)));
792 /* If we know that the value is already truncated, we can
793 replace the TRUNCATE with a SUBREG. Note that this is also
794 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
795 modes we just have to apply a different definition for
796 truncation. But don't do this for an (LSHIFTRT (MULT ...))
797 since this will cause problems with the umulXi3_highpart
798 patterns. */
799 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
800 GET_MODE_BITSIZE (GET_MODE (op)))
801 ? (num_sign_bit_copies (op, GET_MODE (op))
802 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
803 - GET_MODE_BITSIZE (mode)))
804 : truncated_to_mode (mode, op))
805 && ! (GET_CODE (op) == LSHIFTRT
806 && GET_CODE (XEXP (op, 0)) == MULT))
807 return rtl_hooks.gen_lowpart_no_emit (mode, op);
809 /* A truncate of a comparison can be replaced with a subreg if
810 STORE_FLAG_VALUE permits. This is like the previous test,
811 but it works even if the comparison is done in a mode larger
812 than HOST_BITS_PER_WIDE_INT. */
813 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
814 && COMPARISON_P (op)
815 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
816 return rtl_hooks.gen_lowpart_no_emit (mode, op);
817 break;
819 case FLOAT_TRUNCATE:
820 if (DECIMAL_FLOAT_MODE_P (mode))
821 break;
823 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
824 if (GET_CODE (op) == FLOAT_EXTEND
825 && GET_MODE (XEXP (op, 0)) == mode)
826 return XEXP (op, 0);
828 /* (float_truncate:SF (float_truncate:DF foo:XF))
829 = (float_truncate:SF foo:XF).
830 This may eliminate double rounding, so it is unsafe.
832 (float_truncate:SF (float_extend:XF foo:DF))
833 = (float_truncate:SF foo:DF).
835 (float_truncate:DF (float_extend:XF foo:SF))
836 = (float_extend:SF foo:DF). */
837 if ((GET_CODE (op) == FLOAT_TRUNCATE
838 && flag_unsafe_math_optimizations)
839 || GET_CODE (op) == FLOAT_EXTEND)
840 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
841 0)))
842 > GET_MODE_SIZE (mode)
843 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
844 mode,
845 XEXP (op, 0), mode);
847 /* (float_truncate (float x)) is (float x) */
848 if (GET_CODE (op) == FLOAT
849 && (flag_unsafe_math_optimizations
850 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
851 && ((unsigned)significand_size (GET_MODE (op))
852 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
853 - num_sign_bit_copies (XEXP (op, 0),
854 GET_MODE (XEXP (op, 0))))))))
855 return simplify_gen_unary (FLOAT, mode,
856 XEXP (op, 0),
857 GET_MODE (XEXP (op, 0)));
859 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
860 (OP:SF foo:SF) if OP is NEG or ABS. */
861 if ((GET_CODE (op) == ABS
862 || GET_CODE (op) == NEG)
863 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
864 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
865 return simplify_gen_unary (GET_CODE (op), mode,
866 XEXP (XEXP (op, 0), 0), mode);
868 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
869 is (float_truncate:SF x). */
870 if (GET_CODE (op) == SUBREG
871 && subreg_lowpart_p (op)
872 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
873 return SUBREG_REG (op);
874 break;
876 case FLOAT_EXTEND:
877 if (DECIMAL_FLOAT_MODE_P (mode))
878 break;
880 /* (float_extend (float_extend x)) is (float_extend x)
882 (float_extend (float x)) is (float x) assuming that double
883 rounding can't happen.
885 if (GET_CODE (op) == FLOAT_EXTEND
886 || (GET_CODE (op) == FLOAT
887 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
888 && ((unsigned)significand_size (GET_MODE (op))
889 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
890 - num_sign_bit_copies (XEXP (op, 0),
891 GET_MODE (XEXP (op, 0)))))))
892 return simplify_gen_unary (GET_CODE (op), mode,
893 XEXP (op, 0),
894 GET_MODE (XEXP (op, 0)));
896 break;
898 case ABS:
899 /* (abs (neg <foo>)) -> (abs <foo>) */
900 if (GET_CODE (op) == NEG)
901 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
902 GET_MODE (XEXP (op, 0)));
904 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
905 do nothing. */
906 if (GET_MODE (op) == VOIDmode)
907 break;
909 /* If operand is something known to be positive, ignore the ABS. */
910 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
911 || ((GET_MODE_BITSIZE (GET_MODE (op))
912 <= HOST_BITS_PER_WIDE_INT)
913 && ((nonzero_bits (op, GET_MODE (op))
914 & ((unsigned HOST_WIDE_INT) 1
915 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
916 == 0)))
917 return op;
919 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
920 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
921 return gen_rtx_NEG (mode, op);
923 break;
925 case FFS:
926 /* (ffs (*_extend <X>)) = (ffs <X>) */
927 if (GET_CODE (op) == SIGN_EXTEND
928 || GET_CODE (op) == ZERO_EXTEND)
929 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
930 GET_MODE (XEXP (op, 0)));
931 break;
933 case POPCOUNT:
934 switch (GET_CODE (op))
936 case BSWAP:
937 case ZERO_EXTEND:
938 /* (popcount (zero_extend <X>)) = (popcount <X>) */
939 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
940 GET_MODE (XEXP (op, 0)));
942 case ROTATE:
943 case ROTATERT:
944 /* Rotations don't affect popcount. */
945 if (!side_effects_p (XEXP (op, 1)))
946 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
947 GET_MODE (XEXP (op, 0)));
948 break;
950 default:
951 break;
953 break;
955 case PARITY:
956 switch (GET_CODE (op))
958 case NOT:
959 case BSWAP:
960 case ZERO_EXTEND:
961 case SIGN_EXTEND:
962 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
963 GET_MODE (XEXP (op, 0)));
965 case ROTATE:
966 case ROTATERT:
967 /* Rotations don't affect parity. */
968 if (!side_effects_p (XEXP (op, 1)))
969 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
970 GET_MODE (XEXP (op, 0)));
971 break;
973 default:
974 break;
976 break;
978 case BSWAP:
979 /* (bswap (bswap x)) -> x. */
980 if (GET_CODE (op) == BSWAP)
981 return XEXP (op, 0);
982 break;
984 case FLOAT:
985 /* (float (sign_extend <X>)) = (float <X>). */
986 if (GET_CODE (op) == SIGN_EXTEND)
987 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
988 GET_MODE (XEXP (op, 0)));
989 break;
991 case SIGN_EXTEND:
992 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
993 becomes just the MINUS if its mode is MODE. This allows
994 folding switch statements on machines using casesi (such as
995 the VAX). */
996 if (GET_CODE (op) == TRUNCATE
997 && GET_MODE (XEXP (op, 0)) == mode
998 && GET_CODE (XEXP (op, 0)) == MINUS
999 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1000 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1001 return XEXP (op, 0);
1003 /* Check for a sign extension of a subreg of a promoted
1004 variable, where the promotion is sign-extended, and the
1005 target mode is the same as the variable's promotion. */
1006 if (GET_CODE (op) == SUBREG
1007 && SUBREG_PROMOTED_VAR_P (op)
1008 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1009 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1010 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1012 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1013 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1014 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1016 gcc_assert (GET_MODE_BITSIZE (mode)
1017 > GET_MODE_BITSIZE (GET_MODE (op)));
1018 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1019 GET_MODE (XEXP (op, 0)));
1022 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1023 is (sign_extend:M (subreg:O <X>)) if there is mode with
1024 GET_MODE_BITSIZE (N) - I bits.
1025 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1026 is similarly (zero_extend:M (subreg:O <X>)). */
1027 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1028 && GET_CODE (XEXP (op, 0)) == ASHIFT
1029 && CONST_INT_P (XEXP (op, 1))
1030 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1031 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1033 enum machine_mode tmode
1034 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1035 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1036 gcc_assert (GET_MODE_BITSIZE (mode)
1037 > GET_MODE_BITSIZE (GET_MODE (op)));
1038 if (tmode != BLKmode)
1040 rtx inner =
1041 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1042 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1043 ? SIGN_EXTEND : ZERO_EXTEND,
1044 mode, inner, tmode);
1048 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1049 /* As we do not know which address space the pointer is refering to,
1050 we can do this only if the target does not support different pointer
1051 or address modes depending on the address space. */
1052 if (target_default_pointer_address_modes_p ()
1053 && ! POINTERS_EXTEND_UNSIGNED
1054 && mode == Pmode && GET_MODE (op) == ptr_mode
1055 && (CONSTANT_P (op)
1056 || (GET_CODE (op) == SUBREG
1057 && REG_P (SUBREG_REG (op))
1058 && REG_POINTER (SUBREG_REG (op))
1059 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1060 return convert_memory_address (Pmode, op);
1061 #endif
1062 break;
1064 case ZERO_EXTEND:
1065 /* Check for a zero extension of a subreg of a promoted
1066 variable, where the promotion is zero-extended, and the
1067 target mode is the same as the variable's promotion. */
1068 if (GET_CODE (op) == SUBREG
1069 && SUBREG_PROMOTED_VAR_P (op)
1070 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1071 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1072 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1074 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1075 if (GET_CODE (op) == ZERO_EXTEND)
1076 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1077 GET_MODE (XEXP (op, 0)));
1079 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1080 is (zero_extend:M (subreg:O <X>)) if there is mode with
1081 GET_MODE_BITSIZE (N) - I bits. */
1082 if (GET_CODE (op) == LSHIFTRT
1083 && GET_CODE (XEXP (op, 0)) == ASHIFT
1084 && CONST_INT_P (XEXP (op, 1))
1085 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1086 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1088 enum machine_mode tmode
1089 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1090 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1091 if (tmode != BLKmode)
1093 rtx inner =
1094 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1095 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1099 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1100 /* As we do not know which address space the pointer is refering to,
1101 we can do this only if the target does not support different pointer
1102 or address modes depending on the address space. */
1103 if (target_default_pointer_address_modes_p ()
1104 && POINTERS_EXTEND_UNSIGNED > 0
1105 && mode == Pmode && GET_MODE (op) == ptr_mode
1106 && (CONSTANT_P (op)
1107 || (GET_CODE (op) == SUBREG
1108 && REG_P (SUBREG_REG (op))
1109 && REG_POINTER (SUBREG_REG (op))
1110 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1111 return convert_memory_address (Pmode, op);
1112 #endif
1113 break;
1115 default:
1116 break;
1119 return 0;
1122 /* Try to compute the value of a unary operation CODE whose output mode is to
1123 be MODE with input operand OP whose mode was originally OP_MODE.
1124 Return zero if the value cannot be computed. */
1126 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1127 rtx op, enum machine_mode op_mode)
1129 unsigned int width = GET_MODE_BITSIZE (mode);
1131 if (code == VEC_DUPLICATE)
1133 gcc_assert (VECTOR_MODE_P (mode));
1134 if (GET_MODE (op) != VOIDmode)
1136 if (!VECTOR_MODE_P (GET_MODE (op)))
1137 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1138 else
1139 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1140 (GET_MODE (op)));
1142 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1143 || GET_CODE (op) == CONST_VECTOR)
1145 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1146 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1147 rtvec v = rtvec_alloc (n_elts);
1148 unsigned int i;
1150 if (GET_CODE (op) != CONST_VECTOR)
1151 for (i = 0; i < n_elts; i++)
1152 RTVEC_ELT (v, i) = op;
1153 else
1155 enum machine_mode inmode = GET_MODE (op);
1156 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1157 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1159 gcc_assert (in_n_elts < n_elts);
1160 gcc_assert ((n_elts % in_n_elts) == 0);
1161 for (i = 0; i < n_elts; i++)
1162 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1164 return gen_rtx_CONST_VECTOR (mode, v);
1168 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1170 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1171 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1172 enum machine_mode opmode = GET_MODE (op);
1173 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1174 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1175 rtvec v = rtvec_alloc (n_elts);
1176 unsigned int i;
1178 gcc_assert (op_n_elts == n_elts);
1179 for (i = 0; i < n_elts; i++)
1181 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1182 CONST_VECTOR_ELT (op, i),
1183 GET_MODE_INNER (opmode));
1184 if (!x)
1185 return 0;
1186 RTVEC_ELT (v, i) = x;
1188 return gen_rtx_CONST_VECTOR (mode, v);
1191 /* The order of these tests is critical so that, for example, we don't
1192 check the wrong mode (input vs. output) for a conversion operation,
1193 such as FIX. At some point, this should be simplified. */
1195 if (code == FLOAT && GET_MODE (op) == VOIDmode
1196 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1198 HOST_WIDE_INT hv, lv;
1199 REAL_VALUE_TYPE d;
1201 if (CONST_INT_P (op))
1202 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1203 else
1204 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1206 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1207 d = real_value_truncate (mode, d);
1208 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1210 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1211 && (GET_CODE (op) == CONST_DOUBLE
1212 || CONST_INT_P (op)))
1214 HOST_WIDE_INT hv, lv;
1215 REAL_VALUE_TYPE d;
1217 if (CONST_INT_P (op))
1218 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1219 else
1220 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1222 if (op_mode == VOIDmode)
1224 /* We don't know how to interpret negative-looking numbers in
1225 this case, so don't try to fold those. */
1226 if (hv < 0)
1227 return 0;
1229 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1231 else
1232 hv = 0, lv &= GET_MODE_MASK (op_mode);
1234 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1235 d = real_value_truncate (mode, d);
1236 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1239 if (CONST_INT_P (op)
1240 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1242 HOST_WIDE_INT arg0 = INTVAL (op);
1243 HOST_WIDE_INT val;
1245 switch (code)
1247 case NOT:
1248 val = ~ arg0;
1249 break;
1251 case NEG:
1252 val = - arg0;
1253 break;
1255 case ABS:
1256 val = (arg0 >= 0 ? arg0 : - arg0);
1257 break;
1259 case FFS:
1260 arg0 &= GET_MODE_MASK (mode);
1261 val = ffs_hwi (arg0);
1262 break;
1264 case CLZ:
1265 arg0 &= GET_MODE_MASK (mode);
1266 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1268 else
1269 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1270 break;
1272 case CTZ:
1273 arg0 &= GET_MODE_MASK (mode);
1274 if (arg0 == 0)
1276 /* Even if the value at zero is undefined, we have to come
1277 up with some replacement. Seems good enough. */
1278 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1279 val = GET_MODE_BITSIZE (mode);
1281 else
1282 val = ctz_hwi (arg0);
1283 break;
1285 case POPCOUNT:
1286 arg0 &= GET_MODE_MASK (mode);
1287 val = 0;
1288 while (arg0)
1289 val++, arg0 &= arg0 - 1;
1290 break;
1292 case PARITY:
1293 arg0 &= GET_MODE_MASK (mode);
1294 val = 0;
1295 while (arg0)
1296 val++, arg0 &= arg0 - 1;
1297 val &= 1;
1298 break;
1300 case BSWAP:
1302 unsigned int s;
1304 val = 0;
1305 for (s = 0; s < width; s += 8)
1307 unsigned int d = width - s - 8;
1308 unsigned HOST_WIDE_INT byte;
1309 byte = (arg0 >> s) & 0xff;
1310 val |= byte << d;
1313 break;
1315 case TRUNCATE:
1316 val = arg0;
1317 break;
1319 case ZERO_EXTEND:
1320 /* When zero-extending a CONST_INT, we need to know its
1321 original mode. */
1322 gcc_assert (op_mode != VOIDmode);
1323 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1325 /* If we were really extending the mode,
1326 we would have to distinguish between zero-extension
1327 and sign-extension. */
1328 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1329 val = arg0;
1331 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1332 val = arg0 & ~((unsigned HOST_WIDE_INT) (-1)
1333 << GET_MODE_BITSIZE (op_mode));
1334 else
1335 return 0;
1336 break;
1338 case SIGN_EXTEND:
1339 if (op_mode == VOIDmode)
1340 op_mode = mode;
1341 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1343 /* If we were really extending the mode,
1344 we would have to distinguish between zero-extension
1345 and sign-extension. */
1346 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1347 val = arg0;
1349 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1352 = arg0 & ~((unsigned HOST_WIDE_INT) (-1)
1353 << GET_MODE_BITSIZE (op_mode));
1354 if (val & ((unsigned HOST_WIDE_INT) 1
1355 << (GET_MODE_BITSIZE (op_mode) - 1)))
1357 -= (unsigned HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1359 else
1360 return 0;
1361 break;
1363 case SQRT:
1364 case FLOAT_EXTEND:
1365 case FLOAT_TRUNCATE:
1366 case SS_TRUNCATE:
1367 case US_TRUNCATE:
1368 case SS_NEG:
1369 case US_NEG:
1370 case SS_ABS:
1371 return 0;
1373 default:
1374 gcc_unreachable ();
1377 return gen_int_mode (val, mode);
1380 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1381 for a DImode operation on a CONST_INT. */
1382 else if (GET_MODE (op) == VOIDmode
1383 && width <= HOST_BITS_PER_WIDE_INT * 2
1384 && (GET_CODE (op) == CONST_DOUBLE
1385 || CONST_INT_P (op)))
1387 unsigned HOST_WIDE_INT l1, lv;
1388 HOST_WIDE_INT h1, hv;
1390 if (GET_CODE (op) == CONST_DOUBLE)
1391 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1392 else
1393 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1395 switch (code)
1397 case NOT:
1398 lv = ~ l1;
1399 hv = ~ h1;
1400 break;
1402 case NEG:
1403 neg_double (l1, h1, &lv, &hv);
1404 break;
1406 case ABS:
1407 if (h1 < 0)
1408 neg_double (l1, h1, &lv, &hv);
1409 else
1410 lv = l1, hv = h1;
1411 break;
1413 case FFS:
1414 hv = 0;
1415 if (l1 != 0)
1416 lv = ffs_hwi (l1);
1417 else if (h1 != 0)
1418 lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
1419 else
1420 lv = 0;
1421 break;
1423 case CLZ:
1424 hv = 0;
1425 if (h1 != 0)
1426 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1427 - HOST_BITS_PER_WIDE_INT;
1428 else if (l1 != 0)
1429 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1430 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1431 lv = GET_MODE_BITSIZE (mode);
1432 break;
1434 case CTZ:
1435 hv = 0;
1436 if (l1 != 0)
1437 lv = ctz_hwi (l1);
1438 else if (h1 != 0)
1439 lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
1440 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1441 lv = GET_MODE_BITSIZE (mode);
1442 break;
1444 case POPCOUNT:
1445 hv = 0;
1446 lv = 0;
1447 while (l1)
1448 lv++, l1 &= l1 - 1;
1449 while (h1)
1450 lv++, h1 &= h1 - 1;
1451 break;
1453 case PARITY:
1454 hv = 0;
1455 lv = 0;
1456 while (l1)
1457 lv++, l1 &= l1 - 1;
1458 while (h1)
1459 lv++, h1 &= h1 - 1;
1460 lv &= 1;
1461 break;
1463 case BSWAP:
1465 unsigned int s;
1467 hv = 0;
1468 lv = 0;
1469 for (s = 0; s < width; s += 8)
1471 unsigned int d = width - s - 8;
1472 unsigned HOST_WIDE_INT byte;
1474 if (s < HOST_BITS_PER_WIDE_INT)
1475 byte = (l1 >> s) & 0xff;
1476 else
1477 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1479 if (d < HOST_BITS_PER_WIDE_INT)
1480 lv |= byte << d;
1481 else
1482 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1485 break;
1487 case TRUNCATE:
1488 /* This is just a change-of-mode, so do nothing. */
1489 lv = l1, hv = h1;
1490 break;
1492 case ZERO_EXTEND:
1493 gcc_assert (op_mode != VOIDmode);
1495 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1496 return 0;
1498 hv = 0;
1499 lv = l1 & GET_MODE_MASK (op_mode);
1500 break;
1502 case SIGN_EXTEND:
1503 if (op_mode == VOIDmode
1504 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1505 return 0;
1506 else
1508 lv = l1 & GET_MODE_MASK (op_mode);
1509 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1510 && (lv & ((unsigned HOST_WIDE_INT) 1
1511 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1512 lv -= (unsigned HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1514 hv = HWI_SIGN_EXTEND (lv);
1516 break;
1518 case SQRT:
1519 return 0;
1521 default:
1522 return 0;
1525 return immed_double_const (lv, hv, mode);
1528 else if (GET_CODE (op) == CONST_DOUBLE
1529 && SCALAR_FLOAT_MODE_P (mode))
1531 REAL_VALUE_TYPE d, t;
1532 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1534 switch (code)
1536 case SQRT:
1537 if (HONOR_SNANS (mode) && real_isnan (&d))
1538 return 0;
1539 real_sqrt (&t, mode, &d);
1540 d = t;
1541 break;
1542 case ABS:
1543 d = real_value_abs (&d);
1544 break;
1545 case NEG:
1546 d = real_value_negate (&d);
1547 break;
1548 case FLOAT_TRUNCATE:
1549 d = real_value_truncate (mode, d);
1550 break;
1551 case FLOAT_EXTEND:
1552 /* All this does is change the mode. */
1553 break;
1554 case FIX:
1555 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1556 break;
1557 case NOT:
1559 long tmp[4];
1560 int i;
1562 real_to_target (tmp, &d, GET_MODE (op));
1563 for (i = 0; i < 4; i++)
1564 tmp[i] = ~tmp[i];
1565 real_from_target (&d, tmp, mode);
1566 break;
1568 default:
1569 gcc_unreachable ();
1571 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1574 else if (GET_CODE (op) == CONST_DOUBLE
1575 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1576 && GET_MODE_CLASS (mode) == MODE_INT
1577 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1579 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1580 operators are intentionally left unspecified (to ease implementation
1581 by target backends), for consistency, this routine implements the
1582 same semantics for constant folding as used by the middle-end. */
1584 /* This was formerly used only for non-IEEE float.
1585 eggert@twinsun.com says it is safe for IEEE also. */
1586 HOST_WIDE_INT xh, xl, th, tl;
1587 REAL_VALUE_TYPE x, t;
1588 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1589 switch (code)
1591 case FIX:
1592 if (REAL_VALUE_ISNAN (x))
1593 return const0_rtx;
1595 /* Test against the signed upper bound. */
1596 if (width > HOST_BITS_PER_WIDE_INT)
1598 th = ((unsigned HOST_WIDE_INT) 1
1599 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1600 tl = -1;
1602 else
1604 th = 0;
1605 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1607 real_from_integer (&t, VOIDmode, tl, th, 0);
1608 if (REAL_VALUES_LESS (t, x))
1610 xh = th;
1611 xl = tl;
1612 break;
1615 /* Test against the signed lower bound. */
1616 if (width > HOST_BITS_PER_WIDE_INT)
1618 th = (unsigned HOST_WIDE_INT) (-1)
1619 << (width - HOST_BITS_PER_WIDE_INT - 1);
1620 tl = 0;
1622 else
1624 th = -1;
1625 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1627 real_from_integer (&t, VOIDmode, tl, th, 0);
1628 if (REAL_VALUES_LESS (x, t))
1630 xh = th;
1631 xl = tl;
1632 break;
1634 REAL_VALUE_TO_INT (&xl, &xh, x);
1635 break;
1637 case UNSIGNED_FIX:
1638 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1639 return const0_rtx;
1641 /* Test against the unsigned upper bound. */
1642 if (width == 2*HOST_BITS_PER_WIDE_INT)
1644 th = -1;
1645 tl = -1;
1647 else if (width >= HOST_BITS_PER_WIDE_INT)
1649 th = ((unsigned HOST_WIDE_INT) 1
1650 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1651 tl = -1;
1653 else
1655 th = 0;
1656 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1658 real_from_integer (&t, VOIDmode, tl, th, 1);
1659 if (REAL_VALUES_LESS (t, x))
1661 xh = th;
1662 xl = tl;
1663 break;
1666 REAL_VALUE_TO_INT (&xl, &xh, x);
1667 break;
1669 default:
1670 gcc_unreachable ();
1672 return immed_double_const (xl, xh, mode);
1675 return NULL_RTX;
1678 /* Subroutine of simplify_binary_operation to simplify a commutative,
1679 associative binary operation CODE with result mode MODE, operating
1680 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1681 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1682 canonicalization is possible. */
1684 static rtx
1685 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1686 rtx op0, rtx op1)
1688 rtx tem;
1690 /* Linearize the operator to the left. */
1691 if (GET_CODE (op1) == code)
1693 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1694 if (GET_CODE (op0) == code)
1696 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1697 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1700 /* "a op (b op c)" becomes "(b op c) op a". */
1701 if (! swap_commutative_operands_p (op1, op0))
1702 return simplify_gen_binary (code, mode, op1, op0);
1704 tem = op0;
1705 op0 = op1;
1706 op1 = tem;
1709 if (GET_CODE (op0) == code)
1711 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1712 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1714 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1715 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1718 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1719 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1720 if (tem != 0)
1721 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1723 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1724 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1725 if (tem != 0)
1726 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1729 return 0;
1733 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1734 and OP1. Return 0 if no simplification is possible.
1736 Don't use this for relational operations such as EQ or LT.
1737 Use simplify_relational_operation instead. */
1739 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1740 rtx op0, rtx op1)
1742 rtx trueop0, trueop1;
1743 rtx tem;
1745 /* Relational operations don't work here. We must know the mode
1746 of the operands in order to do the comparison correctly.
1747 Assuming a full word can give incorrect results.
1748 Consider comparing 128 with -128 in QImode. */
1749 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1750 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1752 /* Make sure the constant is second. */
1753 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1754 && swap_commutative_operands_p (op0, op1))
1756 tem = op0, op0 = op1, op1 = tem;
1759 trueop0 = avoid_constant_pool_reference (op0);
1760 trueop1 = avoid_constant_pool_reference (op1);
1762 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1763 if (tem)
1764 return tem;
1765 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1768 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1769 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1770 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1771 actual constants. */
1773 static rtx
1774 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1775 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1777 rtx tem, reversed, opleft, opright;
1778 HOST_WIDE_INT val;
1779 unsigned int width = GET_MODE_BITSIZE (mode);
1781 /* Even if we can't compute a constant result,
1782 there are some cases worth simplifying. */
1784 switch (code)
1786 case PLUS:
1787 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1788 when x is NaN, infinite, or finite and nonzero. They aren't
1789 when x is -0 and the rounding mode is not towards -infinity,
1790 since (-0) + 0 is then 0. */
1791 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1792 return op0;
1794 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1795 transformations are safe even for IEEE. */
1796 if (GET_CODE (op0) == NEG)
1797 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1798 else if (GET_CODE (op1) == NEG)
1799 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1801 /* (~a) + 1 -> -a */
1802 if (INTEGRAL_MODE_P (mode)
1803 && GET_CODE (op0) == NOT
1804 && trueop1 == const1_rtx)
1805 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1807 /* Handle both-operands-constant cases. We can only add
1808 CONST_INTs to constants since the sum of relocatable symbols
1809 can't be handled by most assemblers. Don't add CONST_INT
1810 to CONST_INT since overflow won't be computed properly if wider
1811 than HOST_BITS_PER_WIDE_INT. */
1813 if ((GET_CODE (op0) == CONST
1814 || GET_CODE (op0) == SYMBOL_REF
1815 || GET_CODE (op0) == LABEL_REF)
1816 && CONST_INT_P (op1))
1817 return plus_constant (op0, INTVAL (op1));
1818 else if ((GET_CODE (op1) == CONST
1819 || GET_CODE (op1) == SYMBOL_REF
1820 || GET_CODE (op1) == LABEL_REF)
1821 && CONST_INT_P (op0))
1822 return plus_constant (op1, INTVAL (op0));
1824 /* See if this is something like X * C - X or vice versa or
1825 if the multiplication is written as a shift. If so, we can
1826 distribute and make a new multiply, shift, or maybe just
1827 have X (if C is 2 in the example above). But don't make
1828 something more expensive than we had before. */
1830 if (SCALAR_INT_MODE_P (mode))
1832 double_int coeff0, coeff1;
1833 rtx lhs = op0, rhs = op1;
1835 coeff0 = double_int_one;
1836 coeff1 = double_int_one;
1838 if (GET_CODE (lhs) == NEG)
1840 coeff0 = double_int_minus_one;
1841 lhs = XEXP (lhs, 0);
1843 else if (GET_CODE (lhs) == MULT
1844 && CONST_INT_P (XEXP (lhs, 1)))
1846 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1847 lhs = XEXP (lhs, 0);
1849 else if (GET_CODE (lhs) == ASHIFT
1850 && CONST_INT_P (XEXP (lhs, 1))
1851 && INTVAL (XEXP (lhs, 1)) >= 0
1852 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1854 coeff0 = double_int_setbit (double_int_zero,
1855 INTVAL (XEXP (lhs, 1)));
1856 lhs = XEXP (lhs, 0);
1859 if (GET_CODE (rhs) == NEG)
1861 coeff1 = double_int_minus_one;
1862 rhs = XEXP (rhs, 0);
1864 else if (GET_CODE (rhs) == MULT
1865 && CONST_INT_P (XEXP (rhs, 1)))
1867 coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
1868 rhs = XEXP (rhs, 0);
1870 else if (GET_CODE (rhs) == ASHIFT
1871 && CONST_INT_P (XEXP (rhs, 1))
1872 && INTVAL (XEXP (rhs, 1)) >= 0
1873 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1875 coeff1 = double_int_setbit (double_int_zero,
1876 INTVAL (XEXP (rhs, 1)));
1877 rhs = XEXP (rhs, 0);
1880 if (rtx_equal_p (lhs, rhs))
1882 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1883 rtx coeff;
1884 double_int val;
1885 bool speed = optimize_function_for_speed_p (cfun);
1887 val = double_int_add (coeff0, coeff1);
1888 coeff = immed_double_int_const (val, mode);
1890 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1891 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1892 ? tem : 0;
1896 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1897 if ((CONST_INT_P (op1)
1898 || GET_CODE (op1) == CONST_DOUBLE)
1899 && GET_CODE (op0) == XOR
1900 && (CONST_INT_P (XEXP (op0, 1))
1901 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1902 && mode_signbit_p (mode, op1))
1903 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1904 simplify_gen_binary (XOR, mode, op1,
1905 XEXP (op0, 1)));
1907 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1908 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1909 && GET_CODE (op0) == MULT
1910 && GET_CODE (XEXP (op0, 0)) == NEG)
1912 rtx in1, in2;
1914 in1 = XEXP (XEXP (op0, 0), 0);
1915 in2 = XEXP (op0, 1);
1916 return simplify_gen_binary (MINUS, mode, op1,
1917 simplify_gen_binary (MULT, mode,
1918 in1, in2));
1921 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1922 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1923 is 1. */
1924 if (COMPARISON_P (op0)
1925 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1926 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1927 && (reversed = reversed_comparison (op0, mode)))
1928 return
1929 simplify_gen_unary (NEG, mode, reversed, mode);
1931 /* If one of the operands is a PLUS or a MINUS, see if we can
1932 simplify this by the associative law.
1933 Don't use the associative law for floating point.
1934 The inaccuracy makes it nonassociative,
1935 and subtle programs can break if operations are associated. */
1937 if (INTEGRAL_MODE_P (mode)
1938 && (plus_minus_operand_p (op0)
1939 || plus_minus_operand_p (op1))
1940 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1941 return tem;
1943 /* Reassociate floating point addition only when the user
1944 specifies associative math operations. */
1945 if (FLOAT_MODE_P (mode)
1946 && flag_associative_math)
1948 tem = simplify_associative_operation (code, mode, op0, op1);
1949 if (tem)
1950 return tem;
1952 break;
1954 case COMPARE:
1955 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1956 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1957 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1958 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1960 rtx xop00 = XEXP (op0, 0);
1961 rtx xop10 = XEXP (op1, 0);
1963 #ifdef HAVE_cc0
1964 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1965 #else
1966 if (REG_P (xop00) && REG_P (xop10)
1967 && GET_MODE (xop00) == GET_MODE (xop10)
1968 && REGNO (xop00) == REGNO (xop10)
1969 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1970 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1971 #endif
1972 return xop00;
1974 break;
1976 case MINUS:
1977 /* We can't assume x-x is 0 even with non-IEEE floating point,
1978 but since it is zero except in very strange circumstances, we
1979 will treat it as zero with -ffinite-math-only. */
1980 if (rtx_equal_p (trueop0, trueop1)
1981 && ! side_effects_p (op0)
1982 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1983 return CONST0_RTX (mode);
1985 /* Change subtraction from zero into negation. (0 - x) is the
1986 same as -x when x is NaN, infinite, or finite and nonzero.
1987 But if the mode has signed zeros, and does not round towards
1988 -infinity, then 0 - 0 is 0, not -0. */
1989 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1990 return simplify_gen_unary (NEG, mode, op1, mode);
1992 /* (-1 - a) is ~a. */
1993 if (trueop0 == constm1_rtx)
1994 return simplify_gen_unary (NOT, mode, op1, mode);
1996 /* Subtracting 0 has no effect unless the mode has signed zeros
1997 and supports rounding towards -infinity. In such a case,
1998 0 - 0 is -0. */
1999 if (!(HONOR_SIGNED_ZEROS (mode)
2000 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2001 && trueop1 == CONST0_RTX (mode))
2002 return op0;
2004 /* See if this is something like X * C - X or vice versa or
2005 if the multiplication is written as a shift. If so, we can
2006 distribute and make a new multiply, shift, or maybe just
2007 have X (if C is 2 in the example above). But don't make
2008 something more expensive than we had before. */
2010 if (SCALAR_INT_MODE_P (mode))
2012 double_int coeff0, negcoeff1;
2013 rtx lhs = op0, rhs = op1;
2015 coeff0 = double_int_one;
2016 negcoeff1 = double_int_minus_one;
2018 if (GET_CODE (lhs) == NEG)
2020 coeff0 = double_int_minus_one;
2021 lhs = XEXP (lhs, 0);
2023 else if (GET_CODE (lhs) == MULT
2024 && CONST_INT_P (XEXP (lhs, 1)))
2026 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
2027 lhs = XEXP (lhs, 0);
2029 else if (GET_CODE (lhs) == ASHIFT
2030 && CONST_INT_P (XEXP (lhs, 1))
2031 && INTVAL (XEXP (lhs, 1)) >= 0
2032 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2034 coeff0 = double_int_setbit (double_int_zero,
2035 INTVAL (XEXP (lhs, 1)));
2036 lhs = XEXP (lhs, 0);
2039 if (GET_CODE (rhs) == NEG)
2041 negcoeff1 = double_int_one;
2042 rhs = XEXP (rhs, 0);
2044 else if (GET_CODE (rhs) == MULT
2045 && CONST_INT_P (XEXP (rhs, 1)))
2047 negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
2048 rhs = XEXP (rhs, 0);
2050 else if (GET_CODE (rhs) == ASHIFT
2051 && CONST_INT_P (XEXP (rhs, 1))
2052 && INTVAL (XEXP (rhs, 1)) >= 0
2053 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2055 negcoeff1 = double_int_setbit (double_int_zero,
2056 INTVAL (XEXP (rhs, 1)));
2057 negcoeff1 = double_int_neg (negcoeff1);
2058 rhs = XEXP (rhs, 0);
2061 if (rtx_equal_p (lhs, rhs))
2063 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2064 rtx coeff;
2065 double_int val;
2066 bool speed = optimize_function_for_speed_p (cfun);
2068 val = double_int_add (coeff0, negcoeff1);
2069 coeff = immed_double_int_const (val, mode);
2071 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2072 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
2073 ? tem : 0;
2077 /* (a - (-b)) -> (a + b). True even for IEEE. */
2078 if (GET_CODE (op1) == NEG)
2079 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2081 /* (-x - c) may be simplified as (-c - x). */
2082 if (GET_CODE (op0) == NEG
2083 && (CONST_INT_P (op1)
2084 || GET_CODE (op1) == CONST_DOUBLE))
2086 tem = simplify_unary_operation (NEG, mode, op1, mode);
2087 if (tem)
2088 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2091 /* Don't let a relocatable value get a negative coeff. */
2092 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2093 return simplify_gen_binary (PLUS, mode,
2094 op0,
2095 neg_const_int (mode, op1));
2097 /* (x - (x & y)) -> (x & ~y) */
2098 if (GET_CODE (op1) == AND)
2100 if (rtx_equal_p (op0, XEXP (op1, 0)))
2102 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2103 GET_MODE (XEXP (op1, 1)));
2104 return simplify_gen_binary (AND, mode, op0, tem);
2106 if (rtx_equal_p (op0, XEXP (op1, 1)))
2108 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2109 GET_MODE (XEXP (op1, 0)));
2110 return simplify_gen_binary (AND, mode, op0, tem);
2114 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2115 by reversing the comparison code if valid. */
2116 if (STORE_FLAG_VALUE == 1
2117 && trueop0 == const1_rtx
2118 && COMPARISON_P (op1)
2119 && (reversed = reversed_comparison (op1, mode)))
2120 return reversed;
2122 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2123 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2124 && GET_CODE (op1) == MULT
2125 && GET_CODE (XEXP (op1, 0)) == NEG)
2127 rtx in1, in2;
2129 in1 = XEXP (XEXP (op1, 0), 0);
2130 in2 = XEXP (op1, 1);
2131 return simplify_gen_binary (PLUS, mode,
2132 simplify_gen_binary (MULT, mode,
2133 in1, in2),
2134 op0);
2137 /* Canonicalize (minus (neg A) (mult B C)) to
2138 (minus (mult (neg B) C) A). */
2139 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2140 && GET_CODE (op1) == MULT
2141 && GET_CODE (op0) == NEG)
2143 rtx in1, in2;
2145 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2146 in2 = XEXP (op1, 1);
2147 return simplify_gen_binary (MINUS, mode,
2148 simplify_gen_binary (MULT, mode,
2149 in1, in2),
2150 XEXP (op0, 0));
2153 /* If one of the operands is a PLUS or a MINUS, see if we can
2154 simplify this by the associative law. This will, for example,
2155 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2156 Don't use the associative law for floating point.
2157 The inaccuracy makes it nonassociative,
2158 and subtle programs can break if operations are associated. */
2160 if (INTEGRAL_MODE_P (mode)
2161 && (plus_minus_operand_p (op0)
2162 || plus_minus_operand_p (op1))
2163 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2164 return tem;
2165 break;
2167 case MULT:
2168 if (trueop1 == constm1_rtx)
2169 return simplify_gen_unary (NEG, mode, op0, mode);
2171 if (GET_CODE (op0) == NEG)
2173 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2174 if (temp)
2175 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2177 if (GET_CODE (op1) == NEG)
2179 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2180 if (temp)
2181 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2184 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2185 x is NaN, since x * 0 is then also NaN. Nor is it valid
2186 when the mode has signed zeros, since multiplying a negative
2187 number by 0 will give -0, not 0. */
2188 if (!HONOR_NANS (mode)
2189 && !HONOR_SIGNED_ZEROS (mode)
2190 && trueop1 == CONST0_RTX (mode)
2191 && ! side_effects_p (op0))
2192 return op1;
2194 /* In IEEE floating point, x*1 is not equivalent to x for
2195 signalling NaNs. */
2196 if (!HONOR_SNANS (mode)
2197 && trueop1 == CONST1_RTX (mode))
2198 return op0;
2200 /* Convert multiply by constant power of two into shift unless
2201 we are still generating RTL. This test is a kludge. */
2202 if (CONST_INT_P (trueop1)
2203 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2204 /* If the mode is larger than the host word size, and the
2205 uppermost bit is set, then this isn't a power of two due
2206 to implicit sign extension. */
2207 && (width <= HOST_BITS_PER_WIDE_INT
2208 || val != HOST_BITS_PER_WIDE_INT - 1))
2209 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2211 /* Likewise for multipliers wider than a word. */
2212 if (GET_CODE (trueop1) == CONST_DOUBLE
2213 && (GET_MODE (trueop1) == VOIDmode
2214 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2215 && GET_MODE (op0) == mode
2216 && CONST_DOUBLE_LOW (trueop1) == 0
2217 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2218 return simplify_gen_binary (ASHIFT, mode, op0,
2219 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2221 /* x*2 is x+x and x*(-1) is -x */
2222 if (GET_CODE (trueop1) == CONST_DOUBLE
2223 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2224 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2225 && GET_MODE (op0) == mode)
2227 REAL_VALUE_TYPE d;
2228 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2230 if (REAL_VALUES_EQUAL (d, dconst2))
2231 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2233 if (!HONOR_SNANS (mode)
2234 && REAL_VALUES_EQUAL (d, dconstm1))
2235 return simplify_gen_unary (NEG, mode, op0, mode);
2238 /* Optimize -x * -x as x * x. */
2239 if (FLOAT_MODE_P (mode)
2240 && GET_CODE (op0) == NEG
2241 && GET_CODE (op1) == NEG
2242 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2243 && !side_effects_p (XEXP (op0, 0)))
2244 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2246 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2247 if (SCALAR_FLOAT_MODE_P (mode)
2248 && GET_CODE (op0) == ABS
2249 && GET_CODE (op1) == ABS
2250 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2251 && !side_effects_p (XEXP (op0, 0)))
2252 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2254 /* Reassociate multiplication, but for floating point MULTs
2255 only when the user specifies unsafe math optimizations. */
2256 if (! FLOAT_MODE_P (mode)
2257 || flag_unsafe_math_optimizations)
2259 tem = simplify_associative_operation (code, mode, op0, op1);
2260 if (tem)
2261 return tem;
2263 break;
2265 case IOR:
2266 if (trueop1 == CONST0_RTX (mode))
2267 return op0;
2268 if (CONST_INT_P (trueop1)
2269 && ((UINTVAL (trueop1) & GET_MODE_MASK (mode))
2270 == GET_MODE_MASK (mode)))
2271 return op1;
2272 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2273 return op0;
2274 /* A | (~A) -> -1 */
2275 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2276 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2277 && ! side_effects_p (op0)
2278 && SCALAR_INT_MODE_P (mode))
2279 return constm1_rtx;
2281 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2282 if (CONST_INT_P (op1)
2283 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2284 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0)
2285 return op1;
2287 /* Canonicalize (X & C1) | C2. */
2288 if (GET_CODE (op0) == AND
2289 && CONST_INT_P (trueop1)
2290 && CONST_INT_P (XEXP (op0, 1)))
2292 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2293 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2294 HOST_WIDE_INT c2 = INTVAL (trueop1);
2296 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2297 if ((c1 & c2) == c1
2298 && !side_effects_p (XEXP (op0, 0)))
2299 return trueop1;
2301 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2302 if (((c1|c2) & mask) == mask)
2303 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2305 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2306 if (((c1 & ~c2) & mask) != (c1 & mask))
2308 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2309 gen_int_mode (c1 & ~c2, mode));
2310 return simplify_gen_binary (IOR, mode, tem, op1);
2314 /* Convert (A & B) | A to A. */
2315 if (GET_CODE (op0) == AND
2316 && (rtx_equal_p (XEXP (op0, 0), op1)
2317 || rtx_equal_p (XEXP (op0, 1), op1))
2318 && ! side_effects_p (XEXP (op0, 0))
2319 && ! side_effects_p (XEXP (op0, 1)))
2320 return op1;
2322 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2323 mode size to (rotate A CX). */
2325 if (GET_CODE (op1) == ASHIFT
2326 || GET_CODE (op1) == SUBREG)
2328 opleft = op1;
2329 opright = op0;
2331 else
2333 opright = op1;
2334 opleft = op0;
2337 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2338 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2339 && CONST_INT_P (XEXP (opleft, 1))
2340 && CONST_INT_P (XEXP (opright, 1))
2341 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2342 == GET_MODE_BITSIZE (mode)))
2343 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2345 /* Same, but for ashift that has been "simplified" to a wider mode
2346 by simplify_shift_const. */
2348 if (GET_CODE (opleft) == SUBREG
2349 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2350 && GET_CODE (opright) == LSHIFTRT
2351 && GET_CODE (XEXP (opright, 0)) == SUBREG
2352 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2353 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2354 && (GET_MODE_SIZE (GET_MODE (opleft))
2355 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2356 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2357 SUBREG_REG (XEXP (opright, 0)))
2358 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2359 && CONST_INT_P (XEXP (opright, 1))
2360 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2361 == GET_MODE_BITSIZE (mode)))
2362 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2363 XEXP (SUBREG_REG (opleft), 1));
2365 /* If we have (ior (and (X C1) C2)), simplify this by making
2366 C1 as small as possible if C1 actually changes. */
2367 if (CONST_INT_P (op1)
2368 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2369 || INTVAL (op1) > 0)
2370 && GET_CODE (op0) == AND
2371 && CONST_INT_P (XEXP (op0, 1))
2372 && CONST_INT_P (op1)
2373 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2374 return simplify_gen_binary (IOR, mode,
2375 simplify_gen_binary
2376 (AND, mode, XEXP (op0, 0),
2377 GEN_INT (UINTVAL (XEXP (op0, 1))
2378 & ~UINTVAL (op1))),
2379 op1);
2381 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2382 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2383 the PLUS does not affect any of the bits in OP1: then we can do
2384 the IOR as a PLUS and we can associate. This is valid if OP1
2385 can be safely shifted left C bits. */
2386 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2387 && GET_CODE (XEXP (op0, 0)) == PLUS
2388 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2389 && CONST_INT_P (XEXP (op0, 1))
2390 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2392 int count = INTVAL (XEXP (op0, 1));
2393 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2395 if (mask >> count == INTVAL (trueop1)
2396 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2397 return simplify_gen_binary (ASHIFTRT, mode,
2398 plus_constant (XEXP (op0, 0), mask),
2399 XEXP (op0, 1));
2402 tem = simplify_associative_operation (code, mode, op0, op1);
2403 if (tem)
2404 return tem;
2405 break;
2407 case XOR:
2408 if (trueop1 == CONST0_RTX (mode))
2409 return op0;
2410 if (CONST_INT_P (trueop1)
2411 && ((UINTVAL (trueop1) & GET_MODE_MASK (mode))
2412 == GET_MODE_MASK (mode)))
2413 return simplify_gen_unary (NOT, mode, op0, mode);
2414 if (rtx_equal_p (trueop0, trueop1)
2415 && ! side_effects_p (op0)
2416 && GET_MODE_CLASS (mode) != MODE_CC)
2417 return CONST0_RTX (mode);
2419 /* Canonicalize XOR of the most significant bit to PLUS. */
2420 if ((CONST_INT_P (op1)
2421 || GET_CODE (op1) == CONST_DOUBLE)
2422 && mode_signbit_p (mode, op1))
2423 return simplify_gen_binary (PLUS, mode, op0, op1);
2424 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2425 if ((CONST_INT_P (op1)
2426 || GET_CODE (op1) == CONST_DOUBLE)
2427 && GET_CODE (op0) == PLUS
2428 && (CONST_INT_P (XEXP (op0, 1))
2429 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2430 && mode_signbit_p (mode, XEXP (op0, 1)))
2431 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2432 simplify_gen_binary (XOR, mode, op1,
2433 XEXP (op0, 1)));
2435 /* If we are XORing two things that have no bits in common,
2436 convert them into an IOR. This helps to detect rotation encoded
2437 using those methods and possibly other simplifications. */
2439 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2440 && (nonzero_bits (op0, mode)
2441 & nonzero_bits (op1, mode)) == 0)
2442 return (simplify_gen_binary (IOR, mode, op0, op1));
2444 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2445 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2446 (NOT y). */
2448 int num_negated = 0;
2450 if (GET_CODE (op0) == NOT)
2451 num_negated++, op0 = XEXP (op0, 0);
2452 if (GET_CODE (op1) == NOT)
2453 num_negated++, op1 = XEXP (op1, 0);
2455 if (num_negated == 2)
2456 return simplify_gen_binary (XOR, mode, op0, op1);
2457 else if (num_negated == 1)
2458 return simplify_gen_unary (NOT, mode,
2459 simplify_gen_binary (XOR, mode, op0, op1),
2460 mode);
2463 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2464 correspond to a machine insn or result in further simplifications
2465 if B is a constant. */
2467 if (GET_CODE (op0) == AND
2468 && rtx_equal_p (XEXP (op0, 1), op1)
2469 && ! side_effects_p (op1))
2470 return simplify_gen_binary (AND, mode,
2471 simplify_gen_unary (NOT, mode,
2472 XEXP (op0, 0), mode),
2473 op1);
2475 else if (GET_CODE (op0) == AND
2476 && rtx_equal_p (XEXP (op0, 0), op1)
2477 && ! side_effects_p (op1))
2478 return simplify_gen_binary (AND, mode,
2479 simplify_gen_unary (NOT, mode,
2480 XEXP (op0, 1), mode),
2481 op1);
2483 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2484 comparison if STORE_FLAG_VALUE is 1. */
2485 if (STORE_FLAG_VALUE == 1
2486 && trueop1 == const1_rtx
2487 && COMPARISON_P (op0)
2488 && (reversed = reversed_comparison (op0, mode)))
2489 return reversed;
2491 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2492 is (lt foo (const_int 0)), so we can perform the above
2493 simplification if STORE_FLAG_VALUE is 1. */
2495 if (STORE_FLAG_VALUE == 1
2496 && trueop1 == const1_rtx
2497 && GET_CODE (op0) == LSHIFTRT
2498 && CONST_INT_P (XEXP (op0, 1))
2499 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2500 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2502 /* (xor (comparison foo bar) (const_int sign-bit))
2503 when STORE_FLAG_VALUE is the sign bit. */
2504 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2505 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2506 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2507 && trueop1 == const_true_rtx
2508 && COMPARISON_P (op0)
2509 && (reversed = reversed_comparison (op0, mode)))
2510 return reversed;
2512 tem = simplify_associative_operation (code, mode, op0, op1);
2513 if (tem)
2514 return tem;
2515 break;
2517 case AND:
2518 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2519 return trueop1;
2520 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2522 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2523 HOST_WIDE_INT nzop1;
2524 if (CONST_INT_P (trueop1))
2526 HOST_WIDE_INT val1 = INTVAL (trueop1);
2527 /* If we are turning off bits already known off in OP0, we need
2528 not do an AND. */
2529 if ((nzop0 & ~val1) == 0)
2530 return op0;
2532 nzop1 = nonzero_bits (trueop1, mode);
2533 /* If we are clearing all the nonzero bits, the result is zero. */
2534 if ((nzop1 & nzop0) == 0
2535 && !side_effects_p (op0) && !side_effects_p (op1))
2536 return CONST0_RTX (mode);
2538 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2539 && GET_MODE_CLASS (mode) != MODE_CC)
2540 return op0;
2541 /* A & (~A) -> 0 */
2542 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2543 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2544 && ! side_effects_p (op0)
2545 && GET_MODE_CLASS (mode) != MODE_CC)
2546 return CONST0_RTX (mode);
2548 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2549 there are no nonzero bits of C outside of X's mode. */
2550 if ((GET_CODE (op0) == SIGN_EXTEND
2551 || GET_CODE (op0) == ZERO_EXTEND)
2552 && CONST_INT_P (trueop1)
2553 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2554 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2555 & UINTVAL (trueop1)) == 0)
2557 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2558 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2559 gen_int_mode (INTVAL (trueop1),
2560 imode));
2561 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2564 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2565 we might be able to further simplify the AND with X and potentially
2566 remove the truncation altogether. */
2567 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2569 rtx x = XEXP (op0, 0);
2570 enum machine_mode xmode = GET_MODE (x);
2571 tem = simplify_gen_binary (AND, xmode, x,
2572 gen_int_mode (INTVAL (trueop1), xmode));
2573 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2576 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2577 if (GET_CODE (op0) == IOR
2578 && CONST_INT_P (trueop1)
2579 && CONST_INT_P (XEXP (op0, 1)))
2581 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2582 return simplify_gen_binary (IOR, mode,
2583 simplify_gen_binary (AND, mode,
2584 XEXP (op0, 0), op1),
2585 gen_int_mode (tmp, mode));
2588 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2589 insn (and may simplify more). */
2590 if (GET_CODE (op0) == XOR
2591 && rtx_equal_p (XEXP (op0, 0), op1)
2592 && ! side_effects_p (op1))
2593 return simplify_gen_binary (AND, mode,
2594 simplify_gen_unary (NOT, mode,
2595 XEXP (op0, 1), mode),
2596 op1);
2598 if (GET_CODE (op0) == XOR
2599 && rtx_equal_p (XEXP (op0, 1), op1)
2600 && ! side_effects_p (op1))
2601 return simplify_gen_binary (AND, mode,
2602 simplify_gen_unary (NOT, mode,
2603 XEXP (op0, 0), mode),
2604 op1);
2606 /* Similarly for (~(A ^ B)) & A. */
2607 if (GET_CODE (op0) == NOT
2608 && GET_CODE (XEXP (op0, 0)) == XOR
2609 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2610 && ! side_effects_p (op1))
2611 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2613 if (GET_CODE (op0) == NOT
2614 && GET_CODE (XEXP (op0, 0)) == XOR
2615 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2616 && ! side_effects_p (op1))
2617 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2619 /* Convert (A | B) & A to A. */
2620 if (GET_CODE (op0) == IOR
2621 && (rtx_equal_p (XEXP (op0, 0), op1)
2622 || rtx_equal_p (XEXP (op0, 1), op1))
2623 && ! side_effects_p (XEXP (op0, 0))
2624 && ! side_effects_p (XEXP (op0, 1)))
2625 return op1;
2627 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2628 ((A & N) + B) & M -> (A + B) & M
2629 Similarly if (N & M) == 0,
2630 ((A | N) + B) & M -> (A + B) & M
2631 and for - instead of + and/or ^ instead of |.
2632 Also, if (N & M) == 0, then
2633 (A +- N) & M -> A & M. */
2634 if (CONST_INT_P (trueop1)
2635 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2636 && ~UINTVAL (trueop1)
2637 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2638 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2640 rtx pmop[2];
2641 int which;
2643 pmop[0] = XEXP (op0, 0);
2644 pmop[1] = XEXP (op0, 1);
2646 if (CONST_INT_P (pmop[1])
2647 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2648 return simplify_gen_binary (AND, mode, pmop[0], op1);
2650 for (which = 0; which < 2; which++)
2652 tem = pmop[which];
2653 switch (GET_CODE (tem))
2655 case AND:
2656 if (CONST_INT_P (XEXP (tem, 1))
2657 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2658 == UINTVAL (trueop1))
2659 pmop[which] = XEXP (tem, 0);
2660 break;
2661 case IOR:
2662 case XOR:
2663 if (CONST_INT_P (XEXP (tem, 1))
2664 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2665 pmop[which] = XEXP (tem, 0);
2666 break;
2667 default:
2668 break;
2672 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2674 tem = simplify_gen_binary (GET_CODE (op0), mode,
2675 pmop[0], pmop[1]);
2676 return simplify_gen_binary (code, mode, tem, op1);
2680 /* (and X (ior (not X) Y) -> (and X Y) */
2681 if (GET_CODE (op1) == IOR
2682 && GET_CODE (XEXP (op1, 0)) == NOT
2683 && op0 == XEXP (XEXP (op1, 0), 0))
2684 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2686 /* (and (ior (not X) Y) X) -> (and X Y) */
2687 if (GET_CODE (op0) == IOR
2688 && GET_CODE (XEXP (op0, 0)) == NOT
2689 && op1 == XEXP (XEXP (op0, 0), 0))
2690 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2692 tem = simplify_associative_operation (code, mode, op0, op1);
2693 if (tem)
2694 return tem;
2695 break;
2697 case UDIV:
2698 /* 0/x is 0 (or x&0 if x has side-effects). */
2699 if (trueop0 == CONST0_RTX (mode))
2701 if (side_effects_p (op1))
2702 return simplify_gen_binary (AND, mode, op1, trueop0);
2703 return trueop0;
2705 /* x/1 is x. */
2706 if (trueop1 == CONST1_RTX (mode))
2707 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2708 /* Convert divide by power of two into shift. */
2709 if (CONST_INT_P (trueop1)
2710 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2711 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2712 break;
2714 case DIV:
2715 /* Handle floating point and integers separately. */
2716 if (SCALAR_FLOAT_MODE_P (mode))
2718 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2719 safe for modes with NaNs, since 0.0 / 0.0 will then be
2720 NaN rather than 0.0. Nor is it safe for modes with signed
2721 zeros, since dividing 0 by a negative number gives -0.0 */
2722 if (trueop0 == CONST0_RTX (mode)
2723 && !HONOR_NANS (mode)
2724 && !HONOR_SIGNED_ZEROS (mode)
2725 && ! side_effects_p (op1))
2726 return op0;
2727 /* x/1.0 is x. */
2728 if (trueop1 == CONST1_RTX (mode)
2729 && !HONOR_SNANS (mode))
2730 return op0;
2732 if (GET_CODE (trueop1) == CONST_DOUBLE
2733 && trueop1 != CONST0_RTX (mode))
2735 REAL_VALUE_TYPE d;
2736 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2738 /* x/-1.0 is -x. */
2739 if (REAL_VALUES_EQUAL (d, dconstm1)
2740 && !HONOR_SNANS (mode))
2741 return simplify_gen_unary (NEG, mode, op0, mode);
2743 /* Change FP division by a constant into multiplication.
2744 Only do this with -freciprocal-math. */
2745 if (flag_reciprocal_math
2746 && !REAL_VALUES_EQUAL (d, dconst0))
2748 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2749 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2750 return simplify_gen_binary (MULT, mode, op0, tem);
2754 else
2756 /* 0/x is 0 (or x&0 if x has side-effects). */
2757 if (trueop0 == CONST0_RTX (mode)
2758 && !cfun->can_throw_non_call_exceptions)
2760 if (side_effects_p (op1))
2761 return simplify_gen_binary (AND, mode, op1, trueop0);
2762 return trueop0;
2764 /* x/1 is x. */
2765 if (trueop1 == CONST1_RTX (mode))
2766 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2767 /* x/-1 is -x. */
2768 if (trueop1 == constm1_rtx)
2770 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2771 return simplify_gen_unary (NEG, mode, x, mode);
2774 break;
2776 case UMOD:
2777 /* 0%x is 0 (or x&0 if x has side-effects). */
2778 if (trueop0 == CONST0_RTX (mode))
2780 if (side_effects_p (op1))
2781 return simplify_gen_binary (AND, mode, op1, trueop0);
2782 return trueop0;
2784 /* x%1 is 0 (of x&0 if x has side-effects). */
2785 if (trueop1 == CONST1_RTX (mode))
2787 if (side_effects_p (op0))
2788 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2789 return CONST0_RTX (mode);
2791 /* Implement modulus by power of two as AND. */
2792 if (CONST_INT_P (trueop1)
2793 && exact_log2 (UINTVAL (trueop1)) > 0)
2794 return simplify_gen_binary (AND, mode, op0,
2795 GEN_INT (INTVAL (op1) - 1));
2796 break;
2798 case MOD:
2799 /* 0%x is 0 (or x&0 if x has side-effects). */
2800 if (trueop0 == CONST0_RTX (mode))
2802 if (side_effects_p (op1))
2803 return simplify_gen_binary (AND, mode, op1, trueop0);
2804 return trueop0;
2806 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2807 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2809 if (side_effects_p (op0))
2810 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2811 return CONST0_RTX (mode);
2813 break;
2815 case ROTATERT:
2816 case ROTATE:
2817 case ASHIFTRT:
2818 if (trueop1 == CONST0_RTX (mode))
2819 return op0;
2820 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2821 return op0;
2822 /* Rotating ~0 always results in ~0. */
2823 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2824 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
2825 && ! side_effects_p (op1))
2826 return op0;
2827 canonicalize_shift:
2828 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2830 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2831 if (val != INTVAL (op1))
2832 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2834 break;
2836 case ASHIFT:
2837 case SS_ASHIFT:
2838 case US_ASHIFT:
2839 if (trueop1 == CONST0_RTX (mode))
2840 return op0;
2841 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2842 return op0;
2843 goto canonicalize_shift;
2845 case LSHIFTRT:
2846 if (trueop1 == CONST0_RTX (mode))
2847 return op0;
2848 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2849 return op0;
2850 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2851 if (GET_CODE (op0) == CLZ
2852 && CONST_INT_P (trueop1)
2853 && STORE_FLAG_VALUE == 1
2854 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2856 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2857 unsigned HOST_WIDE_INT zero_val = 0;
2859 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2860 && zero_val == GET_MODE_BITSIZE (imode)
2861 && INTVAL (trueop1) == exact_log2 (zero_val))
2862 return simplify_gen_relational (EQ, mode, imode,
2863 XEXP (op0, 0), const0_rtx);
2865 goto canonicalize_shift;
2867 case SMIN:
2868 if (width <= HOST_BITS_PER_WIDE_INT
2869 && CONST_INT_P (trueop1)
2870 && UINTVAL (trueop1) == (unsigned HOST_WIDE_INT) 1 << (width -1)
2871 && ! side_effects_p (op0))
2872 return op1;
2873 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2874 return op0;
2875 tem = simplify_associative_operation (code, mode, op0, op1);
2876 if (tem)
2877 return tem;
2878 break;
2880 case SMAX:
2881 if (width <= HOST_BITS_PER_WIDE_INT
2882 && CONST_INT_P (trueop1)
2883 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
2884 && ! side_effects_p (op0))
2885 return op1;
2886 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2887 return op0;
2888 tem = simplify_associative_operation (code, mode, op0, op1);
2889 if (tem)
2890 return tem;
2891 break;
2893 case UMIN:
2894 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2895 return op1;
2896 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2897 return op0;
2898 tem = simplify_associative_operation (code, mode, op0, op1);
2899 if (tem)
2900 return tem;
2901 break;
2903 case UMAX:
2904 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2905 return op1;
2906 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2907 return op0;
2908 tem = simplify_associative_operation (code, mode, op0, op1);
2909 if (tem)
2910 return tem;
2911 break;
2913 case SS_PLUS:
2914 case US_PLUS:
2915 case SS_MINUS:
2916 case US_MINUS:
2917 case SS_MULT:
2918 case US_MULT:
2919 case SS_DIV:
2920 case US_DIV:
2921 /* ??? There are simplifications that can be done. */
2922 return 0;
2924 case VEC_SELECT:
2925 if (!VECTOR_MODE_P (mode))
2927 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2928 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2929 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2930 gcc_assert (XVECLEN (trueop1, 0) == 1);
2931 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
2933 if (GET_CODE (trueop0) == CONST_VECTOR)
2934 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2935 (trueop1, 0, 0)));
2937 /* Extract a scalar element from a nested VEC_SELECT expression
2938 (with optional nested VEC_CONCAT expression). Some targets
2939 (i386) extract scalar element from a vector using chain of
2940 nested VEC_SELECT expressions. When input operand is a memory
2941 operand, this operation can be simplified to a simple scalar
2942 load from an offseted memory address. */
2943 if (GET_CODE (trueop0) == VEC_SELECT)
2945 rtx op0 = XEXP (trueop0, 0);
2946 rtx op1 = XEXP (trueop0, 1);
2948 enum machine_mode opmode = GET_MODE (op0);
2949 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2950 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2952 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2953 int elem;
2955 rtvec vec;
2956 rtx tmp_op, tmp;
2958 gcc_assert (GET_CODE (op1) == PARALLEL);
2959 gcc_assert (i < n_elts);
2961 /* Select element, pointed by nested selector. */
2962 elem = INTVAL (XVECEXP (op1, 0, i));
2964 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2965 if (GET_CODE (op0) == VEC_CONCAT)
2967 rtx op00 = XEXP (op0, 0);
2968 rtx op01 = XEXP (op0, 1);
2970 enum machine_mode mode00, mode01;
2971 int n_elts00, n_elts01;
2973 mode00 = GET_MODE (op00);
2974 mode01 = GET_MODE (op01);
2976 /* Find out number of elements of each operand. */
2977 if (VECTOR_MODE_P (mode00))
2979 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
2980 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
2982 else
2983 n_elts00 = 1;
2985 if (VECTOR_MODE_P (mode01))
2987 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
2988 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
2990 else
2991 n_elts01 = 1;
2993 gcc_assert (n_elts == n_elts00 + n_elts01);
2995 /* Select correct operand of VEC_CONCAT
2996 and adjust selector. */
2997 if (elem < n_elts01)
2998 tmp_op = op00;
2999 else
3001 tmp_op = op01;
3002 elem -= n_elts00;
3005 else
3006 tmp_op = op0;
3008 vec = rtvec_alloc (1);
3009 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3011 tmp = gen_rtx_fmt_ee (code, mode,
3012 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3013 return tmp;
3015 if (GET_CODE (trueop0) == VEC_DUPLICATE
3016 && GET_MODE (XEXP (trueop0, 0)) == mode)
3017 return XEXP (trueop0, 0);
3019 else
3021 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3022 gcc_assert (GET_MODE_INNER (mode)
3023 == GET_MODE_INNER (GET_MODE (trueop0)));
3024 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3026 if (GET_CODE (trueop0) == CONST_VECTOR)
3028 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3029 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3030 rtvec v = rtvec_alloc (n_elts);
3031 unsigned int i;
3033 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3034 for (i = 0; i < n_elts; i++)
3036 rtx x = XVECEXP (trueop1, 0, i);
3038 gcc_assert (CONST_INT_P (x));
3039 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3040 INTVAL (x));
3043 return gen_rtx_CONST_VECTOR (mode, v);
3047 if (XVECLEN (trueop1, 0) == 1
3048 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3049 && GET_CODE (trueop0) == VEC_CONCAT)
3051 rtx vec = trueop0;
3052 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3054 /* Try to find the element in the VEC_CONCAT. */
3055 while (GET_MODE (vec) != mode
3056 && GET_CODE (vec) == VEC_CONCAT)
3058 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3059 if (offset < vec_size)
3060 vec = XEXP (vec, 0);
3061 else
3063 offset -= vec_size;
3064 vec = XEXP (vec, 1);
3066 vec = avoid_constant_pool_reference (vec);
3069 if (GET_MODE (vec) == mode)
3070 return vec;
3073 return 0;
3074 case VEC_CONCAT:
3076 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3077 ? GET_MODE (trueop0)
3078 : GET_MODE_INNER (mode));
3079 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3080 ? GET_MODE (trueop1)
3081 : GET_MODE_INNER (mode));
3083 gcc_assert (VECTOR_MODE_P (mode));
3084 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3085 == GET_MODE_SIZE (mode));
3087 if (VECTOR_MODE_P (op0_mode))
3088 gcc_assert (GET_MODE_INNER (mode)
3089 == GET_MODE_INNER (op0_mode));
3090 else
3091 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3093 if (VECTOR_MODE_P (op1_mode))
3094 gcc_assert (GET_MODE_INNER (mode)
3095 == GET_MODE_INNER (op1_mode));
3096 else
3097 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3099 if ((GET_CODE (trueop0) == CONST_VECTOR
3100 || CONST_INT_P (trueop0)
3101 || GET_CODE (trueop0) == CONST_DOUBLE)
3102 && (GET_CODE (trueop1) == CONST_VECTOR
3103 || CONST_INT_P (trueop1)
3104 || GET_CODE (trueop1) == CONST_DOUBLE))
3106 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3107 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3108 rtvec v = rtvec_alloc (n_elts);
3109 unsigned int i;
3110 unsigned in_n_elts = 1;
3112 if (VECTOR_MODE_P (op0_mode))
3113 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3114 for (i = 0; i < n_elts; i++)
3116 if (i < in_n_elts)
3118 if (!VECTOR_MODE_P (op0_mode))
3119 RTVEC_ELT (v, i) = trueop0;
3120 else
3121 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3123 else
3125 if (!VECTOR_MODE_P (op1_mode))
3126 RTVEC_ELT (v, i) = trueop1;
3127 else
3128 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3129 i - in_n_elts);
3133 return gen_rtx_CONST_VECTOR (mode, v);
3136 return 0;
3138 default:
3139 gcc_unreachable ();
3142 return 0;
3146 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3147 rtx op0, rtx op1)
3149 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3150 HOST_WIDE_INT val;
3151 unsigned int width = GET_MODE_BITSIZE (mode);
3153 if (VECTOR_MODE_P (mode)
3154 && code != VEC_CONCAT
3155 && GET_CODE (op0) == CONST_VECTOR
3156 && GET_CODE (op1) == CONST_VECTOR)
3158 unsigned n_elts = GET_MODE_NUNITS (mode);
3159 enum machine_mode op0mode = GET_MODE (op0);
3160 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3161 enum machine_mode op1mode = GET_MODE (op1);
3162 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3163 rtvec v = rtvec_alloc (n_elts);
3164 unsigned int i;
3166 gcc_assert (op0_n_elts == n_elts);
3167 gcc_assert (op1_n_elts == n_elts);
3168 for (i = 0; i < n_elts; i++)
3170 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3171 CONST_VECTOR_ELT (op0, i),
3172 CONST_VECTOR_ELT (op1, i));
3173 if (!x)
3174 return 0;
3175 RTVEC_ELT (v, i) = x;
3178 return gen_rtx_CONST_VECTOR (mode, v);
3181 if (VECTOR_MODE_P (mode)
3182 && code == VEC_CONCAT
3183 && (CONST_INT_P (op0)
3184 || GET_CODE (op0) == CONST_DOUBLE
3185 || GET_CODE (op0) == CONST_FIXED)
3186 && (CONST_INT_P (op1)
3187 || GET_CODE (op1) == CONST_DOUBLE
3188 || GET_CODE (op1) == CONST_FIXED))
3190 unsigned n_elts = GET_MODE_NUNITS (mode);
3191 rtvec v = rtvec_alloc (n_elts);
3193 gcc_assert (n_elts >= 2);
3194 if (n_elts == 2)
3196 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3197 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3199 RTVEC_ELT (v, 0) = op0;
3200 RTVEC_ELT (v, 1) = op1;
3202 else
3204 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3205 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3206 unsigned i;
3208 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3209 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3210 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3212 for (i = 0; i < op0_n_elts; ++i)
3213 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3214 for (i = 0; i < op1_n_elts; ++i)
3215 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3218 return gen_rtx_CONST_VECTOR (mode, v);
3221 if (SCALAR_FLOAT_MODE_P (mode)
3222 && GET_CODE (op0) == CONST_DOUBLE
3223 && GET_CODE (op1) == CONST_DOUBLE
3224 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3226 if (code == AND
3227 || code == IOR
3228 || code == XOR)
3230 long tmp0[4];
3231 long tmp1[4];
3232 REAL_VALUE_TYPE r;
3233 int i;
3235 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3236 GET_MODE (op0));
3237 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3238 GET_MODE (op1));
3239 for (i = 0; i < 4; i++)
3241 switch (code)
3243 case AND:
3244 tmp0[i] &= tmp1[i];
3245 break;
3246 case IOR:
3247 tmp0[i] |= tmp1[i];
3248 break;
3249 case XOR:
3250 tmp0[i] ^= tmp1[i];
3251 break;
3252 default:
3253 gcc_unreachable ();
3256 real_from_target (&r, tmp0, mode);
3257 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3259 else
3261 REAL_VALUE_TYPE f0, f1, value, result;
3262 bool inexact;
3264 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3265 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3266 real_convert (&f0, mode, &f0);
3267 real_convert (&f1, mode, &f1);
3269 if (HONOR_SNANS (mode)
3270 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3271 return 0;
3273 if (code == DIV
3274 && REAL_VALUES_EQUAL (f1, dconst0)
3275 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3276 return 0;
3278 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3279 && flag_trapping_math
3280 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3282 int s0 = REAL_VALUE_NEGATIVE (f0);
3283 int s1 = REAL_VALUE_NEGATIVE (f1);
3285 switch (code)
3287 case PLUS:
3288 /* Inf + -Inf = NaN plus exception. */
3289 if (s0 != s1)
3290 return 0;
3291 break;
3292 case MINUS:
3293 /* Inf - Inf = NaN plus exception. */
3294 if (s0 == s1)
3295 return 0;
3296 break;
3297 case DIV:
3298 /* Inf / Inf = NaN plus exception. */
3299 return 0;
3300 default:
3301 break;
3305 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3306 && flag_trapping_math
3307 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3308 || (REAL_VALUE_ISINF (f1)
3309 && REAL_VALUES_EQUAL (f0, dconst0))))
3310 /* Inf * 0 = NaN plus exception. */
3311 return 0;
3313 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3314 &f0, &f1);
3315 real_convert (&result, mode, &value);
3317 /* Don't constant fold this floating point operation if
3318 the result has overflowed and flag_trapping_math. */
3320 if (flag_trapping_math
3321 && MODE_HAS_INFINITIES (mode)
3322 && REAL_VALUE_ISINF (result)
3323 && !REAL_VALUE_ISINF (f0)
3324 && !REAL_VALUE_ISINF (f1))
3325 /* Overflow plus exception. */
3326 return 0;
3328 /* Don't constant fold this floating point operation if the
3329 result may dependent upon the run-time rounding mode and
3330 flag_rounding_math is set, or if GCC's software emulation
3331 is unable to accurately represent the result. */
3333 if ((flag_rounding_math
3334 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3335 && (inexact || !real_identical (&result, &value)))
3336 return NULL_RTX;
3338 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3342 /* We can fold some multi-word operations. */
3343 if (GET_MODE_CLASS (mode) == MODE_INT
3344 && width == HOST_BITS_PER_DOUBLE_INT
3345 && (CONST_DOUBLE_P (op0) || CONST_INT_P (op0))
3346 && (CONST_DOUBLE_P (op1) || CONST_INT_P (op1)))
3348 double_int o0, o1, res, tmp;
3350 o0 = rtx_to_double_int (op0);
3351 o1 = rtx_to_double_int (op1);
3353 switch (code)
3355 case MINUS:
3356 /* A - B == A + (-B). */
3357 o1 = double_int_neg (o1);
3359 /* Fall through.... */
3361 case PLUS:
3362 res = double_int_add (o0, o1);
3363 break;
3365 case MULT:
3366 res = double_int_mul (o0, o1);
3367 break;
3369 case DIV:
3370 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3371 o0.low, o0.high, o1.low, o1.high,
3372 &res.low, &res.high,
3373 &tmp.low, &tmp.high))
3374 return 0;
3375 break;
3377 case MOD:
3378 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3379 o0.low, o0.high, o1.low, o1.high,
3380 &tmp.low, &tmp.high,
3381 &res.low, &res.high))
3382 return 0;
3383 break;
3385 case UDIV:
3386 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3387 o0.low, o0.high, o1.low, o1.high,
3388 &res.low, &res.high,
3389 &tmp.low, &tmp.high))
3390 return 0;
3391 break;
3393 case UMOD:
3394 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3395 o0.low, o0.high, o1.low, o1.high,
3396 &tmp.low, &tmp.high,
3397 &res.low, &res.high))
3398 return 0;
3399 break;
3401 case AND:
3402 res = double_int_and (o0, o1);
3403 break;
3405 case IOR:
3406 res = double_int_ior (o0, o1);
3407 break;
3409 case XOR:
3410 res = double_int_xor (o0, o1);
3411 break;
3413 case SMIN:
3414 res = double_int_smin (o0, o1);
3415 break;
3417 case SMAX:
3418 res = double_int_smax (o0, o1);
3419 break;
3421 case UMIN:
3422 res = double_int_umin (o0, o1);
3423 break;
3425 case UMAX:
3426 res = double_int_umax (o0, o1);
3427 break;
3429 case LSHIFTRT: case ASHIFTRT:
3430 case ASHIFT:
3431 case ROTATE: case ROTATERT:
3433 unsigned HOST_WIDE_INT cnt;
3435 if (SHIFT_COUNT_TRUNCATED)
3436 o1 = double_int_zext (o1, GET_MODE_BITSIZE (mode));
3438 if (!double_int_fits_in_uhwi_p (o1)
3439 || double_int_to_uhwi (o1) >= GET_MODE_BITSIZE (mode))
3440 return 0;
3442 cnt = double_int_to_uhwi (o1);
3444 if (code == LSHIFTRT || code == ASHIFTRT)
3445 res = double_int_rshift (o0, cnt, GET_MODE_BITSIZE (mode),
3446 code == ASHIFTRT);
3447 else if (code == ASHIFT)
3448 res = double_int_lshift (o0, cnt, GET_MODE_BITSIZE (mode),
3449 true);
3450 else if (code == ROTATE)
3451 res = double_int_lrotate (o0, cnt, GET_MODE_BITSIZE (mode));
3452 else /* code == ROTATERT */
3453 res = double_int_rrotate (o0, cnt, GET_MODE_BITSIZE (mode));
3455 break;
3457 default:
3458 return 0;
3461 return immed_double_int_const (res, mode);
3464 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3465 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3467 /* Get the integer argument values in two forms:
3468 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3470 arg0 = INTVAL (op0);
3471 arg1 = INTVAL (op1);
3473 if (width < HOST_BITS_PER_WIDE_INT)
3475 arg0 &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
3476 arg1 &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
3478 arg0s = arg0;
3479 if (arg0s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
3480 arg0s |= ((unsigned HOST_WIDE_INT) (-1) << width);
3482 arg1s = arg1;
3483 if (arg1s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
3484 arg1s |= ((unsigned HOST_WIDE_INT) (-1) << width);
3486 else
3488 arg0s = arg0;
3489 arg1s = arg1;
3492 /* Compute the value of the arithmetic. */
3494 switch (code)
3496 case PLUS:
3497 val = arg0s + arg1s;
3498 break;
3500 case MINUS:
3501 val = arg0s - arg1s;
3502 break;
3504 case MULT:
3505 val = arg0s * arg1s;
3506 break;
3508 case DIV:
3509 if (arg1s == 0
3510 || ((unsigned HOST_WIDE_INT) arg0s
3511 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3512 && arg1s == -1))
3513 return 0;
3514 val = arg0s / arg1s;
3515 break;
3517 case MOD:
3518 if (arg1s == 0
3519 || ((unsigned HOST_WIDE_INT) arg0s
3520 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3521 && arg1s == -1))
3522 return 0;
3523 val = arg0s % arg1s;
3524 break;
3526 case UDIV:
3527 if (arg1 == 0
3528 || ((unsigned HOST_WIDE_INT) arg0s
3529 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3530 && arg1s == -1))
3531 return 0;
3532 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3533 break;
3535 case UMOD:
3536 if (arg1 == 0
3537 || ((unsigned HOST_WIDE_INT) arg0s
3538 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3539 && arg1s == -1))
3540 return 0;
3541 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3542 break;
3544 case AND:
3545 val = arg0 & arg1;
3546 break;
3548 case IOR:
3549 val = arg0 | arg1;
3550 break;
3552 case XOR:
3553 val = arg0 ^ arg1;
3554 break;
3556 case LSHIFTRT:
3557 case ASHIFT:
3558 case ASHIFTRT:
3559 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3560 the value is in range. We can't return any old value for
3561 out-of-range arguments because either the middle-end (via
3562 shift_truncation_mask) or the back-end might be relying on
3563 target-specific knowledge. Nor can we rely on
3564 shift_truncation_mask, since the shift might not be part of an
3565 ashlM3, lshrM3 or ashrM3 instruction. */
3566 if (SHIFT_COUNT_TRUNCATED)
3567 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3568 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3569 return 0;
3571 val = (code == ASHIFT
3572 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3573 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3575 /* Sign-extend the result for arithmetic right shifts. */
3576 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3577 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
3578 break;
3580 case ROTATERT:
3581 if (arg1 < 0)
3582 return 0;
3584 arg1 %= width;
3585 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3586 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3587 break;
3589 case ROTATE:
3590 if (arg1 < 0)
3591 return 0;
3593 arg1 %= width;
3594 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3595 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3596 break;
3598 case COMPARE:
3599 /* Do nothing here. */
3600 return 0;
3602 case SMIN:
3603 val = arg0s <= arg1s ? arg0s : arg1s;
3604 break;
3606 case UMIN:
3607 val = ((unsigned HOST_WIDE_INT) arg0
3608 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3609 break;
3611 case SMAX:
3612 val = arg0s > arg1s ? arg0s : arg1s;
3613 break;
3615 case UMAX:
3616 val = ((unsigned HOST_WIDE_INT) arg0
3617 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3618 break;
3620 case SS_PLUS:
3621 case US_PLUS:
3622 case SS_MINUS:
3623 case US_MINUS:
3624 case SS_MULT:
3625 case US_MULT:
3626 case SS_DIV:
3627 case US_DIV:
3628 case SS_ASHIFT:
3629 case US_ASHIFT:
3630 /* ??? There are simplifications that can be done. */
3631 return 0;
3633 default:
3634 gcc_unreachable ();
3637 return gen_int_mode (val, mode);
3640 return NULL_RTX;
3645 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3646 PLUS or MINUS.
3648 Rather than test for specific case, we do this by a brute-force method
3649 and do all possible simplifications until no more changes occur. Then
3650 we rebuild the operation. */
3652 struct simplify_plus_minus_op_data
3654 rtx op;
3655 short neg;
3658 static bool
3659 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3661 int result;
3663 result = (commutative_operand_precedence (y)
3664 - commutative_operand_precedence (x));
3665 if (result)
3666 return result > 0;
3668 /* Group together equal REGs to do more simplification. */
3669 if (REG_P (x) && REG_P (y))
3670 return REGNO (x) > REGNO (y);
3671 else
3672 return false;
3675 static rtx
3676 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3677 rtx op1)
3679 struct simplify_plus_minus_op_data ops[8];
3680 rtx result, tem;
3681 int n_ops = 2, input_ops = 2;
3682 int changed, n_constants = 0, canonicalized = 0;
3683 int i, j;
3685 memset (ops, 0, sizeof ops);
3687 /* Set up the two operands and then expand them until nothing has been
3688 changed. If we run out of room in our array, give up; this should
3689 almost never happen. */
3691 ops[0].op = op0;
3692 ops[0].neg = 0;
3693 ops[1].op = op1;
3694 ops[1].neg = (code == MINUS);
3698 changed = 0;
3700 for (i = 0; i < n_ops; i++)
3702 rtx this_op = ops[i].op;
3703 int this_neg = ops[i].neg;
3704 enum rtx_code this_code = GET_CODE (this_op);
3706 switch (this_code)
3708 case PLUS:
3709 case MINUS:
3710 if (n_ops == 7)
3711 return NULL_RTX;
3713 ops[n_ops].op = XEXP (this_op, 1);
3714 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3715 n_ops++;
3717 ops[i].op = XEXP (this_op, 0);
3718 input_ops++;
3719 changed = 1;
3720 canonicalized |= this_neg;
3721 break;
3723 case NEG:
3724 ops[i].op = XEXP (this_op, 0);
3725 ops[i].neg = ! this_neg;
3726 changed = 1;
3727 canonicalized = 1;
3728 break;
3730 case CONST:
3731 if (n_ops < 7
3732 && GET_CODE (XEXP (this_op, 0)) == PLUS
3733 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3734 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3736 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3737 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3738 ops[n_ops].neg = this_neg;
3739 n_ops++;
3740 changed = 1;
3741 canonicalized = 1;
3743 break;
3745 case NOT:
3746 /* ~a -> (-a - 1) */
3747 if (n_ops != 7)
3749 ops[n_ops].op = constm1_rtx;
3750 ops[n_ops++].neg = this_neg;
3751 ops[i].op = XEXP (this_op, 0);
3752 ops[i].neg = !this_neg;
3753 changed = 1;
3754 canonicalized = 1;
3756 break;
3758 case CONST_INT:
3759 n_constants++;
3760 if (this_neg)
3762 ops[i].op = neg_const_int (mode, this_op);
3763 ops[i].neg = 0;
3764 changed = 1;
3765 canonicalized = 1;
3767 break;
3769 default:
3770 break;
3774 while (changed);
3776 if (n_constants > 1)
3777 canonicalized = 1;
3779 gcc_assert (n_ops >= 2);
3781 /* If we only have two operands, we can avoid the loops. */
3782 if (n_ops == 2)
3784 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3785 rtx lhs, rhs;
3787 /* Get the two operands. Be careful with the order, especially for
3788 the cases where code == MINUS. */
3789 if (ops[0].neg && ops[1].neg)
3791 lhs = gen_rtx_NEG (mode, ops[0].op);
3792 rhs = ops[1].op;
3794 else if (ops[0].neg)
3796 lhs = ops[1].op;
3797 rhs = ops[0].op;
3799 else
3801 lhs = ops[0].op;
3802 rhs = ops[1].op;
3805 return simplify_const_binary_operation (code, mode, lhs, rhs);
3808 /* Now simplify each pair of operands until nothing changes. */
3811 /* Insertion sort is good enough for an eight-element array. */
3812 for (i = 1; i < n_ops; i++)
3814 struct simplify_plus_minus_op_data save;
3815 j = i - 1;
3816 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3817 continue;
3819 canonicalized = 1;
3820 save = ops[i];
3822 ops[j + 1] = ops[j];
3823 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3824 ops[j + 1] = save;
3827 changed = 0;
3828 for (i = n_ops - 1; i > 0; i--)
3829 for (j = i - 1; j >= 0; j--)
3831 rtx lhs = ops[j].op, rhs = ops[i].op;
3832 int lneg = ops[j].neg, rneg = ops[i].neg;
3834 if (lhs != 0 && rhs != 0)
3836 enum rtx_code ncode = PLUS;
3838 if (lneg != rneg)
3840 ncode = MINUS;
3841 if (lneg)
3842 tem = lhs, lhs = rhs, rhs = tem;
3844 else if (swap_commutative_operands_p (lhs, rhs))
3845 tem = lhs, lhs = rhs, rhs = tem;
3847 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
3848 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
3850 rtx tem_lhs, tem_rhs;
3852 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3853 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3854 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3856 if (tem && !CONSTANT_P (tem))
3857 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3859 else
3860 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3862 /* Reject "simplifications" that just wrap the two
3863 arguments in a CONST. Failure to do so can result
3864 in infinite recursion with simplify_binary_operation
3865 when it calls us to simplify CONST operations. */
3866 if (tem
3867 && ! (GET_CODE (tem) == CONST
3868 && GET_CODE (XEXP (tem, 0)) == ncode
3869 && XEXP (XEXP (tem, 0), 0) == lhs
3870 && XEXP (XEXP (tem, 0), 1) == rhs))
3872 lneg &= rneg;
3873 if (GET_CODE (tem) == NEG)
3874 tem = XEXP (tem, 0), lneg = !lneg;
3875 if (CONST_INT_P (tem) && lneg)
3876 tem = neg_const_int (mode, tem), lneg = 0;
3878 ops[i].op = tem;
3879 ops[i].neg = lneg;
3880 ops[j].op = NULL_RTX;
3881 changed = 1;
3882 canonicalized = 1;
3887 /* If nothing changed, fail. */
3888 if (!canonicalized)
3889 return NULL_RTX;
3891 /* Pack all the operands to the lower-numbered entries. */
3892 for (i = 0, j = 0; j < n_ops; j++)
3893 if (ops[j].op)
3895 ops[i] = ops[j];
3896 i++;
3898 n_ops = i;
3900 while (changed);
3902 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3903 if (n_ops == 2
3904 && CONST_INT_P (ops[1].op)
3905 && CONSTANT_P (ops[0].op)
3906 && ops[0].neg)
3907 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3909 /* We suppressed creation of trivial CONST expressions in the
3910 combination loop to avoid recursion. Create one manually now.
3911 The combination loop should have ensured that there is exactly
3912 one CONST_INT, and the sort will have ensured that it is last
3913 in the array and that any other constant will be next-to-last. */
3915 if (n_ops > 1
3916 && CONST_INT_P (ops[n_ops - 1].op)
3917 && CONSTANT_P (ops[n_ops - 2].op))
3919 rtx value = ops[n_ops - 1].op;
3920 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3921 value = neg_const_int (mode, value);
3922 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3923 n_ops--;
3926 /* Put a non-negated operand first, if possible. */
3928 for (i = 0; i < n_ops && ops[i].neg; i++)
3929 continue;
3930 if (i == n_ops)
3931 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3932 else if (i != 0)
3934 tem = ops[0].op;
3935 ops[0] = ops[i];
3936 ops[i].op = tem;
3937 ops[i].neg = 1;
3940 /* Now make the result by performing the requested operations. */
3941 result = ops[0].op;
3942 for (i = 1; i < n_ops; i++)
3943 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3944 mode, result, ops[i].op);
3946 return result;
3949 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3950 static bool
3951 plus_minus_operand_p (const_rtx x)
3953 return GET_CODE (x) == PLUS
3954 || GET_CODE (x) == MINUS
3955 || (GET_CODE (x) == CONST
3956 && GET_CODE (XEXP (x, 0)) == PLUS
3957 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3958 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3961 /* Like simplify_binary_operation except used for relational operators.
3962 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3963 not also be VOIDmode.
3965 CMP_MODE specifies in which mode the comparison is done in, so it is
3966 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3967 the operands or, if both are VOIDmode, the operands are compared in
3968 "infinite precision". */
3970 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3971 enum machine_mode cmp_mode, rtx op0, rtx op1)
3973 rtx tem, trueop0, trueop1;
3975 if (cmp_mode == VOIDmode)
3976 cmp_mode = GET_MODE (op0);
3977 if (cmp_mode == VOIDmode)
3978 cmp_mode = GET_MODE (op1);
3980 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3981 if (tem)
3983 if (SCALAR_FLOAT_MODE_P (mode))
3985 if (tem == const0_rtx)
3986 return CONST0_RTX (mode);
3987 #ifdef FLOAT_STORE_FLAG_VALUE
3989 REAL_VALUE_TYPE val;
3990 val = FLOAT_STORE_FLAG_VALUE (mode);
3991 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3993 #else
3994 return NULL_RTX;
3995 #endif
3997 if (VECTOR_MODE_P (mode))
3999 if (tem == const0_rtx)
4000 return CONST0_RTX (mode);
4001 #ifdef VECTOR_STORE_FLAG_VALUE
4003 int i, units;
4004 rtvec v;
4006 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4007 if (val == NULL_RTX)
4008 return NULL_RTX;
4009 if (val == const1_rtx)
4010 return CONST1_RTX (mode);
4012 units = GET_MODE_NUNITS (mode);
4013 v = rtvec_alloc (units);
4014 for (i = 0; i < units; i++)
4015 RTVEC_ELT (v, i) = val;
4016 return gen_rtx_raw_CONST_VECTOR (mode, v);
4018 #else
4019 return NULL_RTX;
4020 #endif
4023 return tem;
4026 /* For the following tests, ensure const0_rtx is op1. */
4027 if (swap_commutative_operands_p (op0, op1)
4028 || (op0 == const0_rtx && op1 != const0_rtx))
4029 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4031 /* If op0 is a compare, extract the comparison arguments from it. */
4032 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4033 return simplify_gen_relational (code, mode, VOIDmode,
4034 XEXP (op0, 0), XEXP (op0, 1));
4036 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4037 || CC0_P (op0))
4038 return NULL_RTX;
4040 trueop0 = avoid_constant_pool_reference (op0);
4041 trueop1 = avoid_constant_pool_reference (op1);
4042 return simplify_relational_operation_1 (code, mode, cmp_mode,
4043 trueop0, trueop1);
4046 /* This part of simplify_relational_operation is only used when CMP_MODE
4047 is not in class MODE_CC (i.e. it is a real comparison).
4049 MODE is the mode of the result, while CMP_MODE specifies in which
4050 mode the comparison is done in, so it is the mode of the operands. */
4052 static rtx
4053 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4054 enum machine_mode cmp_mode, rtx op0, rtx op1)
4056 enum rtx_code op0code = GET_CODE (op0);
4058 if (op1 == const0_rtx && COMPARISON_P (op0))
4060 /* If op0 is a comparison, extract the comparison arguments
4061 from it. */
4062 if (code == NE)
4064 if (GET_MODE (op0) == mode)
4065 return simplify_rtx (op0);
4066 else
4067 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4068 XEXP (op0, 0), XEXP (op0, 1));
4070 else if (code == EQ)
4072 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4073 if (new_code != UNKNOWN)
4074 return simplify_gen_relational (new_code, mode, VOIDmode,
4075 XEXP (op0, 0), XEXP (op0, 1));
4079 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4080 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4081 if ((code == LTU || code == GEU)
4082 && GET_CODE (op0) == PLUS
4083 && CONST_INT_P (XEXP (op0, 1))
4084 && (rtx_equal_p (op1, XEXP (op0, 0))
4085 || rtx_equal_p (op1, XEXP (op0, 1))))
4087 rtx new_cmp
4088 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4089 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4090 cmp_mode, XEXP (op0, 0), new_cmp);
4093 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4094 if ((code == LTU || code == GEU)
4095 && GET_CODE (op0) == PLUS
4096 && rtx_equal_p (op1, XEXP (op0, 1))
4097 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4098 && !rtx_equal_p (op1, XEXP (op0, 0)))
4099 return simplify_gen_relational (code, mode, cmp_mode, op0,
4100 copy_rtx (XEXP (op0, 0)));
4102 if (op1 == const0_rtx)
4104 /* Canonicalize (GTU x 0) as (NE x 0). */
4105 if (code == GTU)
4106 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4107 /* Canonicalize (LEU x 0) as (EQ x 0). */
4108 if (code == LEU)
4109 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4111 else if (op1 == const1_rtx)
4113 switch (code)
4115 case GE:
4116 /* Canonicalize (GE x 1) as (GT x 0). */
4117 return simplify_gen_relational (GT, mode, cmp_mode,
4118 op0, const0_rtx);
4119 case GEU:
4120 /* Canonicalize (GEU x 1) as (NE x 0). */
4121 return simplify_gen_relational (NE, mode, cmp_mode,
4122 op0, const0_rtx);
4123 case LT:
4124 /* Canonicalize (LT x 1) as (LE x 0). */
4125 return simplify_gen_relational (LE, mode, cmp_mode,
4126 op0, const0_rtx);
4127 case LTU:
4128 /* Canonicalize (LTU x 1) as (EQ x 0). */
4129 return simplify_gen_relational (EQ, mode, cmp_mode,
4130 op0, const0_rtx);
4131 default:
4132 break;
4135 else if (op1 == constm1_rtx)
4137 /* Canonicalize (LE x -1) as (LT x 0). */
4138 if (code == LE)
4139 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4140 /* Canonicalize (GT x -1) as (GE x 0). */
4141 if (code == GT)
4142 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4145 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4146 if ((code == EQ || code == NE)
4147 && (op0code == PLUS || op0code == MINUS)
4148 && CONSTANT_P (op1)
4149 && CONSTANT_P (XEXP (op0, 1))
4150 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4152 rtx x = XEXP (op0, 0);
4153 rtx c = XEXP (op0, 1);
4155 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
4156 cmp_mode, op1, c);
4157 return simplify_gen_relational (code, mode, cmp_mode, x, c);
4160 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4161 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4162 if (code == NE
4163 && op1 == const0_rtx
4164 && GET_MODE_CLASS (mode) == MODE_INT
4165 && cmp_mode != VOIDmode
4166 /* ??? Work-around BImode bugs in the ia64 backend. */
4167 && mode != BImode
4168 && cmp_mode != BImode
4169 && nonzero_bits (op0, cmp_mode) == 1
4170 && STORE_FLAG_VALUE == 1)
4171 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4172 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4173 : lowpart_subreg (mode, op0, cmp_mode);
4175 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4176 if ((code == EQ || code == NE)
4177 && op1 == const0_rtx
4178 && op0code == XOR)
4179 return simplify_gen_relational (code, mode, cmp_mode,
4180 XEXP (op0, 0), XEXP (op0, 1));
4182 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4183 if ((code == EQ || code == NE)
4184 && op0code == XOR
4185 && rtx_equal_p (XEXP (op0, 0), op1)
4186 && !side_effects_p (XEXP (op0, 0)))
4187 return simplify_gen_relational (code, mode, cmp_mode,
4188 XEXP (op0, 1), const0_rtx);
4190 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4191 if ((code == EQ || code == NE)
4192 && op0code == XOR
4193 && rtx_equal_p (XEXP (op0, 1), op1)
4194 && !side_effects_p (XEXP (op0, 1)))
4195 return simplify_gen_relational (code, mode, cmp_mode,
4196 XEXP (op0, 0), const0_rtx);
4198 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4199 if ((code == EQ || code == NE)
4200 && op0code == XOR
4201 && (CONST_INT_P (op1)
4202 || GET_CODE (op1) == CONST_DOUBLE)
4203 && (CONST_INT_P (XEXP (op0, 1))
4204 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4205 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4206 simplify_gen_binary (XOR, cmp_mode,
4207 XEXP (op0, 1), op1));
4209 if (op0code == POPCOUNT && op1 == const0_rtx)
4210 switch (code)
4212 case EQ:
4213 case LE:
4214 case LEU:
4215 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4216 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4217 XEXP (op0, 0), const0_rtx);
4219 case NE:
4220 case GT:
4221 case GTU:
4222 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4223 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4224 XEXP (op0, 0), const0_rtx);
4226 default:
4227 break;
4230 return NULL_RTX;
4233 enum
4235 CMP_EQ = 1,
4236 CMP_LT = 2,
4237 CMP_GT = 4,
4238 CMP_LTU = 8,
4239 CMP_GTU = 16
4243 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4244 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4245 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4246 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4247 For floating-point comparisons, assume that the operands were ordered. */
4249 static rtx
4250 comparison_result (enum rtx_code code, int known_results)
4252 switch (code)
4254 case EQ:
4255 case UNEQ:
4256 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4257 case NE:
4258 case LTGT:
4259 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4261 case LT:
4262 case UNLT:
4263 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4264 case GE:
4265 case UNGE:
4266 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4268 case GT:
4269 case UNGT:
4270 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4271 case LE:
4272 case UNLE:
4273 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4275 case LTU:
4276 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4277 case GEU:
4278 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4280 case GTU:
4281 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4282 case LEU:
4283 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4285 case ORDERED:
4286 return const_true_rtx;
4287 case UNORDERED:
4288 return const0_rtx;
4289 default:
4290 gcc_unreachable ();
4294 /* Check if the given comparison (done in the given MODE) is actually a
4295 tautology or a contradiction.
4296 If no simplification is possible, this function returns zero.
4297 Otherwise, it returns either const_true_rtx or const0_rtx. */
4300 simplify_const_relational_operation (enum rtx_code code,
4301 enum machine_mode mode,
4302 rtx op0, rtx op1)
4304 rtx tem;
4305 rtx trueop0;
4306 rtx trueop1;
4308 gcc_assert (mode != VOIDmode
4309 || (GET_MODE (op0) == VOIDmode
4310 && GET_MODE (op1) == VOIDmode));
4312 /* If op0 is a compare, extract the comparison arguments from it. */
4313 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4315 op1 = XEXP (op0, 1);
4316 op0 = XEXP (op0, 0);
4318 if (GET_MODE (op0) != VOIDmode)
4319 mode = GET_MODE (op0);
4320 else if (GET_MODE (op1) != VOIDmode)
4321 mode = GET_MODE (op1);
4322 else
4323 return 0;
4326 /* We can't simplify MODE_CC values since we don't know what the
4327 actual comparison is. */
4328 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4329 return 0;
4331 /* Make sure the constant is second. */
4332 if (swap_commutative_operands_p (op0, op1))
4334 tem = op0, op0 = op1, op1 = tem;
4335 code = swap_condition (code);
4338 trueop0 = avoid_constant_pool_reference (op0);
4339 trueop1 = avoid_constant_pool_reference (op1);
4341 /* For integer comparisons of A and B maybe we can simplify A - B and can
4342 then simplify a comparison of that with zero. If A and B are both either
4343 a register or a CONST_INT, this can't help; testing for these cases will
4344 prevent infinite recursion here and speed things up.
4346 We can only do this for EQ and NE comparisons as otherwise we may
4347 lose or introduce overflow which we cannot disregard as undefined as
4348 we do not know the signedness of the operation on either the left or
4349 the right hand side of the comparison. */
4351 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4352 && (code == EQ || code == NE)
4353 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4354 && (REG_P (op1) || CONST_INT_P (trueop1)))
4355 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4356 /* We cannot do this if tem is a nonzero address. */
4357 && ! nonzero_address_p (tem))
4358 return simplify_const_relational_operation (signed_condition (code),
4359 mode, tem, const0_rtx);
4361 if (! HONOR_NANS (mode) && code == ORDERED)
4362 return const_true_rtx;
4364 if (! HONOR_NANS (mode) && code == UNORDERED)
4365 return const0_rtx;
4367 /* For modes without NaNs, if the two operands are equal, we know the
4368 result except if they have side-effects. Even with NaNs we know
4369 the result of unordered comparisons and, if signaling NaNs are
4370 irrelevant, also the result of LT/GT/LTGT. */
4371 if ((! HONOR_NANS (GET_MODE (trueop0))
4372 || code == UNEQ || code == UNLE || code == UNGE
4373 || ((code == LT || code == GT || code == LTGT)
4374 && ! HONOR_SNANS (GET_MODE (trueop0))))
4375 && rtx_equal_p (trueop0, trueop1)
4376 && ! side_effects_p (trueop0))
4377 return comparison_result (code, CMP_EQ);
4379 /* If the operands are floating-point constants, see if we can fold
4380 the result. */
4381 if (GET_CODE (trueop0) == CONST_DOUBLE
4382 && GET_CODE (trueop1) == CONST_DOUBLE
4383 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4385 REAL_VALUE_TYPE d0, d1;
4387 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4388 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4390 /* Comparisons are unordered iff at least one of the values is NaN. */
4391 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4392 switch (code)
4394 case UNEQ:
4395 case UNLT:
4396 case UNGT:
4397 case UNLE:
4398 case UNGE:
4399 case NE:
4400 case UNORDERED:
4401 return const_true_rtx;
4402 case EQ:
4403 case LT:
4404 case GT:
4405 case LE:
4406 case GE:
4407 case LTGT:
4408 case ORDERED:
4409 return const0_rtx;
4410 default:
4411 return 0;
4414 return comparison_result (code,
4415 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4416 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4419 /* Otherwise, see if the operands are both integers. */
4420 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4421 && (GET_CODE (trueop0) == CONST_DOUBLE
4422 || CONST_INT_P (trueop0))
4423 && (GET_CODE (trueop1) == CONST_DOUBLE
4424 || CONST_INT_P (trueop1)))
4426 int width = GET_MODE_BITSIZE (mode);
4427 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4428 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4430 /* Get the two words comprising each integer constant. */
4431 if (GET_CODE (trueop0) == CONST_DOUBLE)
4433 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4434 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4436 else
4438 l0u = l0s = INTVAL (trueop0);
4439 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4442 if (GET_CODE (trueop1) == CONST_DOUBLE)
4444 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4445 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4447 else
4449 l1u = l1s = INTVAL (trueop1);
4450 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4453 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4454 we have to sign or zero-extend the values. */
4455 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4457 l0u &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4458 l1u &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4460 if (l0s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
4461 l0s |= ((unsigned HOST_WIDE_INT) (-1) << width);
4463 if (l1s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
4464 l1s |= ((unsigned HOST_WIDE_INT) (-1) << width);
4466 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4467 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4469 if (h0u == h1u && l0u == l1u)
4470 return comparison_result (code, CMP_EQ);
4471 else
4473 int cr;
4474 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4475 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4476 return comparison_result (code, cr);
4480 /* Optimize comparisons with upper and lower bounds. */
4481 if (SCALAR_INT_MODE_P (mode)
4482 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4483 && CONST_INT_P (trueop1))
4485 int sign;
4486 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4487 HOST_WIDE_INT val = INTVAL (trueop1);
4488 HOST_WIDE_INT mmin, mmax;
4490 if (code == GEU
4491 || code == LEU
4492 || code == GTU
4493 || code == LTU)
4494 sign = 0;
4495 else
4496 sign = 1;
4498 /* Get a reduced range if the sign bit is zero. */
4499 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4501 mmin = 0;
4502 mmax = nonzero;
4504 else
4506 rtx mmin_rtx, mmax_rtx;
4507 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4509 mmin = INTVAL (mmin_rtx);
4510 mmax = INTVAL (mmax_rtx);
4511 if (sign)
4513 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4515 mmin >>= (sign_copies - 1);
4516 mmax >>= (sign_copies - 1);
4520 switch (code)
4522 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4523 case GEU:
4524 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4525 return const_true_rtx;
4526 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4527 return const0_rtx;
4528 break;
4529 case GE:
4530 if (val <= mmin)
4531 return const_true_rtx;
4532 if (val > mmax)
4533 return const0_rtx;
4534 break;
4536 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4537 case LEU:
4538 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4539 return const_true_rtx;
4540 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4541 return const0_rtx;
4542 break;
4543 case LE:
4544 if (val >= mmax)
4545 return const_true_rtx;
4546 if (val < mmin)
4547 return const0_rtx;
4548 break;
4550 case EQ:
4551 /* x == y is always false for y out of range. */
4552 if (val < mmin || val > mmax)
4553 return const0_rtx;
4554 break;
4556 /* x > y is always false for y >= mmax, always true for y < mmin. */
4557 case GTU:
4558 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4559 return const0_rtx;
4560 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4561 return const_true_rtx;
4562 break;
4563 case GT:
4564 if (val >= mmax)
4565 return const0_rtx;
4566 if (val < mmin)
4567 return const_true_rtx;
4568 break;
4570 /* x < y is always false for y <= mmin, always true for y > mmax. */
4571 case LTU:
4572 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4573 return const0_rtx;
4574 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4575 return const_true_rtx;
4576 break;
4577 case LT:
4578 if (val <= mmin)
4579 return const0_rtx;
4580 if (val > mmax)
4581 return const_true_rtx;
4582 break;
4584 case NE:
4585 /* x != y is always true for y out of range. */
4586 if (val < mmin || val > mmax)
4587 return const_true_rtx;
4588 break;
4590 default:
4591 break;
4595 /* Optimize integer comparisons with zero. */
4596 if (trueop1 == const0_rtx)
4598 /* Some addresses are known to be nonzero. We don't know
4599 their sign, but equality comparisons are known. */
4600 if (nonzero_address_p (trueop0))
4602 if (code == EQ || code == LEU)
4603 return const0_rtx;
4604 if (code == NE || code == GTU)
4605 return const_true_rtx;
4608 /* See if the first operand is an IOR with a constant. If so, we
4609 may be able to determine the result of this comparison. */
4610 if (GET_CODE (op0) == IOR)
4612 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4613 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4615 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4616 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4617 && (UINTVAL (inner_const)
4618 & ((unsigned HOST_WIDE_INT) 1
4619 << sign_bitnum)));
4621 switch (code)
4623 case EQ:
4624 case LEU:
4625 return const0_rtx;
4626 case NE:
4627 case GTU:
4628 return const_true_rtx;
4629 case LT:
4630 case LE:
4631 if (has_sign)
4632 return const_true_rtx;
4633 break;
4634 case GT:
4635 case GE:
4636 if (has_sign)
4637 return const0_rtx;
4638 break;
4639 default:
4640 break;
4646 /* Optimize comparison of ABS with zero. */
4647 if (trueop1 == CONST0_RTX (mode)
4648 && (GET_CODE (trueop0) == ABS
4649 || (GET_CODE (trueop0) == FLOAT_EXTEND
4650 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4652 switch (code)
4654 case LT:
4655 /* Optimize abs(x) < 0.0. */
4656 if (!HONOR_SNANS (mode)
4657 && (!INTEGRAL_MODE_P (mode)
4658 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4660 if (INTEGRAL_MODE_P (mode)
4661 && (issue_strict_overflow_warning
4662 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4663 warning (OPT_Wstrict_overflow,
4664 ("assuming signed overflow does not occur when "
4665 "assuming abs (x) < 0 is false"));
4666 return const0_rtx;
4668 break;
4670 case GE:
4671 /* Optimize abs(x) >= 0.0. */
4672 if (!HONOR_NANS (mode)
4673 && (!INTEGRAL_MODE_P (mode)
4674 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4676 if (INTEGRAL_MODE_P (mode)
4677 && (issue_strict_overflow_warning
4678 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4679 warning (OPT_Wstrict_overflow,
4680 ("assuming signed overflow does not occur when "
4681 "assuming abs (x) >= 0 is true"));
4682 return const_true_rtx;
4684 break;
4686 case UNGE:
4687 /* Optimize ! (abs(x) < 0.0). */
4688 return const_true_rtx;
4690 default:
4691 break;
4695 return 0;
4698 /* Simplify CODE, an operation with result mode MODE and three operands,
4699 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4700 a constant. Return 0 if no simplifications is possible. */
4703 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4704 enum machine_mode op0_mode, rtx op0, rtx op1,
4705 rtx op2)
4707 unsigned int width = GET_MODE_BITSIZE (mode);
4708 bool any_change = false;
4709 rtx tem;
4711 /* VOIDmode means "infinite" precision. */
4712 if (width == 0)
4713 width = HOST_BITS_PER_WIDE_INT;
4715 switch (code)
4717 case FMA:
4718 /* Simplify negations around the multiplication. */
4719 /* -a * -b + c => a * b + c. */
4720 if (GET_CODE (op0) == NEG)
4722 tem = simplify_unary_operation (NEG, mode, op1, mode);
4723 if (tem)
4724 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4726 else if (GET_CODE (op1) == NEG)
4728 tem = simplify_unary_operation (NEG, mode, op0, mode);
4729 if (tem)
4730 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
4733 /* Canonicalize the two multiplication operands. */
4734 /* a * -b + c => -b * a + c. */
4735 if (swap_commutative_operands_p (op0, op1))
4736 tem = op0, op0 = op1, op1 = tem, any_change = true;
4738 if (any_change)
4739 return gen_rtx_FMA (mode, op0, op1, op2);
4740 return NULL_RTX;
4742 case SIGN_EXTRACT:
4743 case ZERO_EXTRACT:
4744 if (CONST_INT_P (op0)
4745 && CONST_INT_P (op1)
4746 && CONST_INT_P (op2)
4747 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4748 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4750 /* Extracting a bit-field from a constant */
4751 unsigned HOST_WIDE_INT val = UINTVAL (op0);
4753 if (BITS_BIG_ENDIAN)
4754 val >>= GET_MODE_BITSIZE (op0_mode) - INTVAL (op2) - INTVAL (op1);
4755 else
4756 val >>= INTVAL (op2);
4758 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4760 /* First zero-extend. */
4761 val &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4762 /* If desired, propagate sign bit. */
4763 if (code == SIGN_EXTRACT
4764 && (val & ((unsigned HOST_WIDE_INT) 1 << (INTVAL (op1) - 1)))
4765 != 0)
4766 val |= ~ (((unsigned HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4769 /* Clear the bits that don't belong in our mode,
4770 unless they and our sign bit are all one.
4771 So we get either a reasonable negative value or a reasonable
4772 unsigned value for this mode. */
4773 if (width < HOST_BITS_PER_WIDE_INT
4774 && ((val & ((unsigned HOST_WIDE_INT) (-1) << (width - 1)))
4775 != ((unsigned HOST_WIDE_INT) (-1) << (width - 1))))
4776 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4778 return gen_int_mode (val, mode);
4780 break;
4782 case IF_THEN_ELSE:
4783 if (CONST_INT_P (op0))
4784 return op0 != const0_rtx ? op1 : op2;
4786 /* Convert c ? a : a into "a". */
4787 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4788 return op1;
4790 /* Convert a != b ? a : b into "a". */
4791 if (GET_CODE (op0) == NE
4792 && ! side_effects_p (op0)
4793 && ! HONOR_NANS (mode)
4794 && ! HONOR_SIGNED_ZEROS (mode)
4795 && ((rtx_equal_p (XEXP (op0, 0), op1)
4796 && rtx_equal_p (XEXP (op0, 1), op2))
4797 || (rtx_equal_p (XEXP (op0, 0), op2)
4798 && rtx_equal_p (XEXP (op0, 1), op1))))
4799 return op1;
4801 /* Convert a == b ? a : b into "b". */
4802 if (GET_CODE (op0) == EQ
4803 && ! side_effects_p (op0)
4804 && ! HONOR_NANS (mode)
4805 && ! HONOR_SIGNED_ZEROS (mode)
4806 && ((rtx_equal_p (XEXP (op0, 0), op1)
4807 && rtx_equal_p (XEXP (op0, 1), op2))
4808 || (rtx_equal_p (XEXP (op0, 0), op2)
4809 && rtx_equal_p (XEXP (op0, 1), op1))))
4810 return op2;
4812 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4814 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4815 ? GET_MODE (XEXP (op0, 1))
4816 : GET_MODE (XEXP (op0, 0)));
4817 rtx temp;
4819 /* Look for happy constants in op1 and op2. */
4820 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4822 HOST_WIDE_INT t = INTVAL (op1);
4823 HOST_WIDE_INT f = INTVAL (op2);
4825 if (t == STORE_FLAG_VALUE && f == 0)
4826 code = GET_CODE (op0);
4827 else if (t == 0 && f == STORE_FLAG_VALUE)
4829 enum rtx_code tmp;
4830 tmp = reversed_comparison_code (op0, NULL_RTX);
4831 if (tmp == UNKNOWN)
4832 break;
4833 code = tmp;
4835 else
4836 break;
4838 return simplify_gen_relational (code, mode, cmp_mode,
4839 XEXP (op0, 0), XEXP (op0, 1));
4842 if (cmp_mode == VOIDmode)
4843 cmp_mode = op0_mode;
4844 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4845 cmp_mode, XEXP (op0, 0),
4846 XEXP (op0, 1));
4848 /* See if any simplifications were possible. */
4849 if (temp)
4851 if (CONST_INT_P (temp))
4852 return temp == const0_rtx ? op2 : op1;
4853 else if (temp)
4854 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4857 break;
4859 case VEC_MERGE:
4860 gcc_assert (GET_MODE (op0) == mode);
4861 gcc_assert (GET_MODE (op1) == mode);
4862 gcc_assert (VECTOR_MODE_P (mode));
4863 op2 = avoid_constant_pool_reference (op2);
4864 if (CONST_INT_P (op2))
4866 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4867 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4868 int mask = (1 << n_elts) - 1;
4870 if (!(INTVAL (op2) & mask))
4871 return op1;
4872 if ((INTVAL (op2) & mask) == mask)
4873 return op0;
4875 op0 = avoid_constant_pool_reference (op0);
4876 op1 = avoid_constant_pool_reference (op1);
4877 if (GET_CODE (op0) == CONST_VECTOR
4878 && GET_CODE (op1) == CONST_VECTOR)
4880 rtvec v = rtvec_alloc (n_elts);
4881 unsigned int i;
4883 for (i = 0; i < n_elts; i++)
4884 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4885 ? CONST_VECTOR_ELT (op0, i)
4886 : CONST_VECTOR_ELT (op1, i));
4887 return gen_rtx_CONST_VECTOR (mode, v);
4890 break;
4892 default:
4893 gcc_unreachable ();
4896 return 0;
4899 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4900 or CONST_VECTOR,
4901 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4903 Works by unpacking OP into a collection of 8-bit values
4904 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4905 and then repacking them again for OUTERMODE. */
4907 static rtx
4908 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4909 enum machine_mode innermode, unsigned int byte)
4911 /* We support up to 512-bit values (for V8DFmode). */
4912 enum {
4913 max_bitsize = 512,
4914 value_bit = 8,
4915 value_mask = (1 << value_bit) - 1
4917 unsigned char value[max_bitsize / value_bit];
4918 int value_start;
4919 int i;
4920 int elem;
4922 int num_elem;
4923 rtx * elems;
4924 int elem_bitsize;
4925 rtx result_s;
4926 rtvec result_v = NULL;
4927 enum mode_class outer_class;
4928 enum machine_mode outer_submode;
4930 /* Some ports misuse CCmode. */
4931 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
4932 return op;
4934 /* We have no way to represent a complex constant at the rtl level. */
4935 if (COMPLEX_MODE_P (outermode))
4936 return NULL_RTX;
4938 /* Unpack the value. */
4940 if (GET_CODE (op) == CONST_VECTOR)
4942 num_elem = CONST_VECTOR_NUNITS (op);
4943 elems = &CONST_VECTOR_ELT (op, 0);
4944 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4946 else
4948 num_elem = 1;
4949 elems = &op;
4950 elem_bitsize = max_bitsize;
4952 /* If this asserts, it is too complicated; reducing value_bit may help. */
4953 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4954 /* I don't know how to handle endianness of sub-units. */
4955 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4957 for (elem = 0; elem < num_elem; elem++)
4959 unsigned char * vp;
4960 rtx el = elems[elem];
4962 /* Vectors are kept in target memory order. (This is probably
4963 a mistake.) */
4965 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4966 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4967 / BITS_PER_UNIT);
4968 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4969 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4970 unsigned bytele = (subword_byte % UNITS_PER_WORD
4971 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4972 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4975 switch (GET_CODE (el))
4977 case CONST_INT:
4978 for (i = 0;
4979 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4980 i += value_bit)
4981 *vp++ = INTVAL (el) >> i;
4982 /* CONST_INTs are always logically sign-extended. */
4983 for (; i < elem_bitsize; i += value_bit)
4984 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4985 break;
4987 case CONST_DOUBLE:
4988 if (GET_MODE (el) == VOIDmode)
4990 /* If this triggers, someone should have generated a
4991 CONST_INT instead. */
4992 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4994 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4995 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4996 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4998 *vp++
4999 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5000 i += value_bit;
5002 /* It shouldn't matter what's done here, so fill it with
5003 zero. */
5004 for (; i < elem_bitsize; i += value_bit)
5005 *vp++ = 0;
5007 else
5009 long tmp[max_bitsize / 32];
5010 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5012 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5013 gcc_assert (bitsize <= elem_bitsize);
5014 gcc_assert (bitsize % value_bit == 0);
5016 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5017 GET_MODE (el));
5019 /* real_to_target produces its result in words affected by
5020 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5021 and use WORDS_BIG_ENDIAN instead; see the documentation
5022 of SUBREG in rtl.texi. */
5023 for (i = 0; i < bitsize; i += value_bit)
5025 int ibase;
5026 if (WORDS_BIG_ENDIAN)
5027 ibase = bitsize - 1 - i;
5028 else
5029 ibase = i;
5030 *vp++ = tmp[ibase / 32] >> i % 32;
5033 /* It shouldn't matter what's done here, so fill it with
5034 zero. */
5035 for (; i < elem_bitsize; i += value_bit)
5036 *vp++ = 0;
5038 break;
5040 case CONST_FIXED:
5041 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5043 for (i = 0; i < elem_bitsize; i += value_bit)
5044 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5046 else
5048 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5049 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5050 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5051 i += value_bit)
5052 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5053 >> (i - HOST_BITS_PER_WIDE_INT);
5054 for (; i < elem_bitsize; i += value_bit)
5055 *vp++ = 0;
5057 break;
5059 default:
5060 gcc_unreachable ();
5064 /* Now, pick the right byte to start with. */
5065 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5066 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5067 will already have offset 0. */
5068 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5070 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5071 - byte);
5072 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5073 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5074 byte = (subword_byte % UNITS_PER_WORD
5075 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5078 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5079 so if it's become negative it will instead be very large.) */
5080 gcc_assert (byte < GET_MODE_SIZE (innermode));
5082 /* Convert from bytes to chunks of size value_bit. */
5083 value_start = byte * (BITS_PER_UNIT / value_bit);
5085 /* Re-pack the value. */
5087 if (VECTOR_MODE_P (outermode))
5089 num_elem = GET_MODE_NUNITS (outermode);
5090 result_v = rtvec_alloc (num_elem);
5091 elems = &RTVEC_ELT (result_v, 0);
5092 outer_submode = GET_MODE_INNER (outermode);
5094 else
5096 num_elem = 1;
5097 elems = &result_s;
5098 outer_submode = outermode;
5101 outer_class = GET_MODE_CLASS (outer_submode);
5102 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5104 gcc_assert (elem_bitsize % value_bit == 0);
5105 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5107 for (elem = 0; elem < num_elem; elem++)
5109 unsigned char *vp;
5111 /* Vectors are stored in target memory order. (This is probably
5112 a mistake.) */
5114 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5115 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5116 / BITS_PER_UNIT);
5117 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5118 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5119 unsigned bytele = (subword_byte % UNITS_PER_WORD
5120 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5121 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5124 switch (outer_class)
5126 case MODE_INT:
5127 case MODE_PARTIAL_INT:
5129 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5131 for (i = 0;
5132 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5133 i += value_bit)
5134 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5135 for (; i < elem_bitsize; i += value_bit)
5136 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5137 << (i - HOST_BITS_PER_WIDE_INT);
5139 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5140 know why. */
5141 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5142 elems[elem] = gen_int_mode (lo, outer_submode);
5143 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5144 elems[elem] = immed_double_const (lo, hi, outer_submode);
5145 else
5146 return NULL_RTX;
5148 break;
5150 case MODE_FLOAT:
5151 case MODE_DECIMAL_FLOAT:
5153 REAL_VALUE_TYPE r;
5154 long tmp[max_bitsize / 32];
5156 /* real_from_target wants its input in words affected by
5157 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5158 and use WORDS_BIG_ENDIAN instead; see the documentation
5159 of SUBREG in rtl.texi. */
5160 for (i = 0; i < max_bitsize / 32; i++)
5161 tmp[i] = 0;
5162 for (i = 0; i < elem_bitsize; i += value_bit)
5164 int ibase;
5165 if (WORDS_BIG_ENDIAN)
5166 ibase = elem_bitsize - 1 - i;
5167 else
5168 ibase = i;
5169 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5172 real_from_target (&r, tmp, outer_submode);
5173 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5175 break;
5177 case MODE_FRACT:
5178 case MODE_UFRACT:
5179 case MODE_ACCUM:
5180 case MODE_UACCUM:
5182 FIXED_VALUE_TYPE f;
5183 f.data.low = 0;
5184 f.data.high = 0;
5185 f.mode = outer_submode;
5187 for (i = 0;
5188 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5189 i += value_bit)
5190 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5191 for (; i < elem_bitsize; i += value_bit)
5192 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5193 << (i - HOST_BITS_PER_WIDE_INT));
5195 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5197 break;
5199 default:
5200 gcc_unreachable ();
5203 if (VECTOR_MODE_P (outermode))
5204 return gen_rtx_CONST_VECTOR (outermode, result_v);
5205 else
5206 return result_s;
5209 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5210 Return 0 if no simplifications are possible. */
5212 simplify_subreg (enum machine_mode outermode, rtx op,
5213 enum machine_mode innermode, unsigned int byte)
5215 /* Little bit of sanity checking. */
5216 gcc_assert (innermode != VOIDmode);
5217 gcc_assert (outermode != VOIDmode);
5218 gcc_assert (innermode != BLKmode);
5219 gcc_assert (outermode != BLKmode);
5221 gcc_assert (GET_MODE (op) == innermode
5222 || GET_MODE (op) == VOIDmode);
5224 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5225 gcc_assert (byte < GET_MODE_SIZE (innermode));
5227 if (outermode == innermode && !byte)
5228 return op;
5230 if (CONST_INT_P (op)
5231 || GET_CODE (op) == CONST_DOUBLE
5232 || GET_CODE (op) == CONST_FIXED
5233 || GET_CODE (op) == CONST_VECTOR)
5234 return simplify_immed_subreg (outermode, op, innermode, byte);
5236 /* Changing mode twice with SUBREG => just change it once,
5237 or not at all if changing back op starting mode. */
5238 if (GET_CODE (op) == SUBREG)
5240 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5241 int final_offset = byte + SUBREG_BYTE (op);
5242 rtx newx;
5244 if (outermode == innermostmode
5245 && byte == 0 && SUBREG_BYTE (op) == 0)
5246 return SUBREG_REG (op);
5248 /* The SUBREG_BYTE represents offset, as if the value were stored
5249 in memory. Irritating exception is paradoxical subreg, where
5250 we define SUBREG_BYTE to be 0. On big endian machines, this
5251 value should be negative. For a moment, undo this exception. */
5252 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5254 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5255 if (WORDS_BIG_ENDIAN)
5256 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5257 if (BYTES_BIG_ENDIAN)
5258 final_offset += difference % UNITS_PER_WORD;
5260 if (SUBREG_BYTE (op) == 0
5261 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5263 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5264 if (WORDS_BIG_ENDIAN)
5265 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5266 if (BYTES_BIG_ENDIAN)
5267 final_offset += difference % UNITS_PER_WORD;
5270 /* See whether resulting subreg will be paradoxical. */
5271 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5273 /* In nonparadoxical subregs we can't handle negative offsets. */
5274 if (final_offset < 0)
5275 return NULL_RTX;
5276 /* Bail out in case resulting subreg would be incorrect. */
5277 if (final_offset % GET_MODE_SIZE (outermode)
5278 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5279 return NULL_RTX;
5281 else
5283 int offset = 0;
5284 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5286 /* In paradoxical subreg, see if we are still looking on lower part.
5287 If so, our SUBREG_BYTE will be 0. */
5288 if (WORDS_BIG_ENDIAN)
5289 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5290 if (BYTES_BIG_ENDIAN)
5291 offset += difference % UNITS_PER_WORD;
5292 if (offset == final_offset)
5293 final_offset = 0;
5294 else
5295 return NULL_RTX;
5298 /* Recurse for further possible simplifications. */
5299 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5300 final_offset);
5301 if (newx)
5302 return newx;
5303 if (validate_subreg (outermode, innermostmode,
5304 SUBREG_REG (op), final_offset))
5306 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5307 if (SUBREG_PROMOTED_VAR_P (op)
5308 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5309 && GET_MODE_CLASS (outermode) == MODE_INT
5310 && IN_RANGE (GET_MODE_SIZE (outermode),
5311 GET_MODE_SIZE (innermode),
5312 GET_MODE_SIZE (innermostmode))
5313 && subreg_lowpart_p (newx))
5315 SUBREG_PROMOTED_VAR_P (newx) = 1;
5316 SUBREG_PROMOTED_UNSIGNED_SET
5317 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5319 return newx;
5321 return NULL_RTX;
5324 /* Merge implicit and explicit truncations. */
5326 if (GET_CODE (op) == TRUNCATE
5327 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5328 && subreg_lowpart_offset (outermode, innermode) == byte)
5329 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5330 GET_MODE (XEXP (op, 0)));
5332 /* SUBREG of a hard register => just change the register number
5333 and/or mode. If the hard register is not valid in that mode,
5334 suppress this simplification. If the hard register is the stack,
5335 frame, or argument pointer, leave this as a SUBREG. */
5337 if (REG_P (op) && HARD_REGISTER_P (op))
5339 unsigned int regno, final_regno;
5341 regno = REGNO (op);
5342 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5343 if (HARD_REGISTER_NUM_P (final_regno))
5345 rtx x;
5346 int final_offset = byte;
5348 /* Adjust offset for paradoxical subregs. */
5349 if (byte == 0
5350 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5352 int difference = (GET_MODE_SIZE (innermode)
5353 - GET_MODE_SIZE (outermode));
5354 if (WORDS_BIG_ENDIAN)
5355 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5356 if (BYTES_BIG_ENDIAN)
5357 final_offset += difference % UNITS_PER_WORD;
5360 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5362 /* Propagate original regno. We don't have any way to specify
5363 the offset inside original regno, so do so only for lowpart.
5364 The information is used only by alias analysis that can not
5365 grog partial register anyway. */
5367 if (subreg_lowpart_offset (outermode, innermode) == byte)
5368 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5369 return x;
5373 /* If we have a SUBREG of a register that we are replacing and we are
5374 replacing it with a MEM, make a new MEM and try replacing the
5375 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5376 or if we would be widening it. */
5378 if (MEM_P (op)
5379 && ! mode_dependent_address_p (XEXP (op, 0))
5380 /* Allow splitting of volatile memory references in case we don't
5381 have instruction to move the whole thing. */
5382 && (! MEM_VOLATILE_P (op)
5383 || ! have_insn_for (SET, innermode))
5384 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5385 return adjust_address_nv (op, outermode, byte);
5387 /* Handle complex values represented as CONCAT
5388 of real and imaginary part. */
5389 if (GET_CODE (op) == CONCAT)
5391 unsigned int part_size, final_offset;
5392 rtx part, res;
5394 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5395 if (byte < part_size)
5397 part = XEXP (op, 0);
5398 final_offset = byte;
5400 else
5402 part = XEXP (op, 1);
5403 final_offset = byte - part_size;
5406 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5407 return NULL_RTX;
5409 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5410 if (res)
5411 return res;
5412 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5413 return gen_rtx_SUBREG (outermode, part, final_offset);
5414 return NULL_RTX;
5417 /* Optimize SUBREG truncations of zero and sign extended values. */
5418 if ((GET_CODE (op) == ZERO_EXTEND
5419 || GET_CODE (op) == SIGN_EXTEND)
5420 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5422 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5424 /* If we're requesting the lowpart of a zero or sign extension,
5425 there are three possibilities. If the outermode is the same
5426 as the origmode, we can omit both the extension and the subreg.
5427 If the outermode is not larger than the origmode, we can apply
5428 the truncation without the extension. Finally, if the outermode
5429 is larger than the origmode, but both are integer modes, we
5430 can just extend to the appropriate mode. */
5431 if (bitpos == 0)
5433 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5434 if (outermode == origmode)
5435 return XEXP (op, 0);
5436 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5437 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5438 subreg_lowpart_offset (outermode,
5439 origmode));
5440 if (SCALAR_INT_MODE_P (outermode))
5441 return simplify_gen_unary (GET_CODE (op), outermode,
5442 XEXP (op, 0), origmode);
5445 /* A SUBREG resulting from a zero extension may fold to zero if
5446 it extracts higher bits that the ZERO_EXTEND's source bits. */
5447 if (GET_CODE (op) == ZERO_EXTEND
5448 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5449 return CONST0_RTX (outermode);
5452 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5453 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5454 the outer subreg is effectively a truncation to the original mode. */
5455 if ((GET_CODE (op) == LSHIFTRT
5456 || GET_CODE (op) == ASHIFTRT)
5457 && SCALAR_INT_MODE_P (outermode)
5458 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5459 to avoid the possibility that an outer LSHIFTRT shifts by more
5460 than the sign extension's sign_bit_copies and introduces zeros
5461 into the high bits of the result. */
5462 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5463 && CONST_INT_P (XEXP (op, 1))
5464 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5465 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5466 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5467 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5468 return simplify_gen_binary (ASHIFTRT, outermode,
5469 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5471 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5472 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5473 the outer subreg is effectively a truncation to the original mode. */
5474 if ((GET_CODE (op) == LSHIFTRT
5475 || GET_CODE (op) == ASHIFTRT)
5476 && SCALAR_INT_MODE_P (outermode)
5477 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5478 && CONST_INT_P (XEXP (op, 1))
5479 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5480 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5481 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5482 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5483 return simplify_gen_binary (LSHIFTRT, outermode,
5484 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5486 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5487 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5488 the outer subreg is effectively a truncation to the original mode. */
5489 if (GET_CODE (op) == ASHIFT
5490 && SCALAR_INT_MODE_P (outermode)
5491 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5492 && CONST_INT_P (XEXP (op, 1))
5493 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5494 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5495 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5496 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5497 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5498 return simplify_gen_binary (ASHIFT, outermode,
5499 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5501 /* Recognize a word extraction from a multi-word subreg. */
5502 if ((GET_CODE (op) == LSHIFTRT
5503 || GET_CODE (op) == ASHIFTRT)
5504 && SCALAR_INT_MODE_P (outermode)
5505 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5506 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5507 && CONST_INT_P (XEXP (op, 1))
5508 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5509 && INTVAL (XEXP (op, 1)) >= 0
5510 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5511 && byte == subreg_lowpart_offset (outermode, innermode))
5513 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5514 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5515 (WORDS_BIG_ENDIAN
5516 ? byte - shifted_bytes
5517 : byte + shifted_bytes));
5520 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5521 and try replacing the SUBREG and shift with it. Don't do this if
5522 the MEM has a mode-dependent address or if we would be widening it. */
5524 if ((GET_CODE (op) == LSHIFTRT
5525 || GET_CODE (op) == ASHIFTRT)
5526 && MEM_P (XEXP (op, 0))
5527 && CONST_INT_P (XEXP (op, 1))
5528 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
5529 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
5530 && INTVAL (XEXP (op, 1)) > 0
5531 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5532 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0))
5533 && ! MEM_VOLATILE_P (XEXP (op, 0))
5534 && byte == subreg_lowpart_offset (outermode, innermode)
5535 && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
5536 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
5538 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5539 return adjust_address_nv (XEXP (op, 0), outermode,
5540 (WORDS_BIG_ENDIAN
5541 ? byte - shifted_bytes
5542 : byte + shifted_bytes));
5545 return NULL_RTX;
5548 /* Make a SUBREG operation or equivalent if it folds. */
5551 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5552 enum machine_mode innermode, unsigned int byte)
5554 rtx newx;
5556 newx = simplify_subreg (outermode, op, innermode, byte);
5557 if (newx)
5558 return newx;
5560 if (GET_CODE (op) == SUBREG
5561 || GET_CODE (op) == CONCAT
5562 || GET_MODE (op) == VOIDmode)
5563 return NULL_RTX;
5565 if (validate_subreg (outermode, innermode, op, byte))
5566 return gen_rtx_SUBREG (outermode, op, byte);
5568 return NULL_RTX;
5571 /* Simplify X, an rtx expression.
5573 Return the simplified expression or NULL if no simplifications
5574 were possible.
5576 This is the preferred entry point into the simplification routines;
5577 however, we still allow passes to call the more specific routines.
5579 Right now GCC has three (yes, three) major bodies of RTL simplification
5580 code that need to be unified.
5582 1. fold_rtx in cse.c. This code uses various CSE specific
5583 information to aid in RTL simplification.
5585 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5586 it uses combine specific information to aid in RTL
5587 simplification.
5589 3. The routines in this file.
5592 Long term we want to only have one body of simplification code; to
5593 get to that state I recommend the following steps:
5595 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5596 which are not pass dependent state into these routines.
5598 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5599 use this routine whenever possible.
5601 3. Allow for pass dependent state to be provided to these
5602 routines and add simplifications based on the pass dependent
5603 state. Remove code from cse.c & combine.c that becomes
5604 redundant/dead.
5606 It will take time, but ultimately the compiler will be easier to
5607 maintain and improve. It's totally silly that when we add a
5608 simplification that it needs to be added to 4 places (3 for RTL
5609 simplification and 1 for tree simplification. */
5612 simplify_rtx (const_rtx x)
5614 const enum rtx_code code = GET_CODE (x);
5615 const enum machine_mode mode = GET_MODE (x);
5617 switch (GET_RTX_CLASS (code))
5619 case RTX_UNARY:
5620 return simplify_unary_operation (code, mode,
5621 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5622 case RTX_COMM_ARITH:
5623 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5624 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5626 /* Fall through.... */
5628 case RTX_BIN_ARITH:
5629 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5631 case RTX_TERNARY:
5632 case RTX_BITFIELD_OPS:
5633 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5634 XEXP (x, 0), XEXP (x, 1),
5635 XEXP (x, 2));
5637 case RTX_COMPARE:
5638 case RTX_COMM_COMPARE:
5639 return simplify_relational_operation (code, mode,
5640 ((GET_MODE (XEXP (x, 0))
5641 != VOIDmode)
5642 ? GET_MODE (XEXP (x, 0))
5643 : GET_MODE (XEXP (x, 1))),
5644 XEXP (x, 0),
5645 XEXP (x, 1));
5647 case RTX_EXTRA:
5648 if (code == SUBREG)
5649 return simplify_subreg (mode, SUBREG_REG (x),
5650 GET_MODE (SUBREG_REG (x)),
5651 SUBREG_BYTE (x));
5652 break;
5654 case RTX_OBJ:
5655 if (code == LO_SUM)
5657 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5658 if (GET_CODE (XEXP (x, 0)) == HIGH
5659 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5660 return XEXP (x, 1);
5662 break;
5664 default:
5665 break;
5667 return NULL;