* config/rs6000/rs6000.c (rs6000_option_override_internal): Do not
[official-gcc.git] / gcc / simplify-rtx.c
blobc3e8a0a7e1850542fda146e03ec8e837d38d8d7c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "diagnostic-core.h"
38 #include "ggc.h"
39 #include "target.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Much code operates on (low, high) pairs; the low value is an
44 unsigned wide int, the high value a signed wide int. We
45 occasionally need to sign extend from low to high as if low were a
46 signed wide int. */
47 #define HWI_SIGN_EXTEND(low) \
48 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
50 static rtx neg_const_int (enum machine_mode, const_rtx);
51 static bool plus_minus_operand_p (const_rtx);
52 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
53 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
54 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
55 unsigned int);
56 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
57 rtx, rtx);
58 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
59 enum machine_mode, rtx, rtx);
60 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
61 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
62 rtx, rtx, rtx, rtx);
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
66 static rtx
67 neg_const_int (enum machine_mode mode, const_rtx i)
69 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 bool
76 mode_signbit_p (enum machine_mode mode, const_rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
84 width = GET_MODE_PRECISION (mode);
85 if (width == 0)
86 return false;
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && CONST_INT_P (x))
90 val = INTVAL (x);
91 else if (width <= HOST_BITS_PER_DOUBLE_INT
92 && CONST_DOUBLE_AS_INT_P (x)
93 && CONST_DOUBLE_LOW (x) == 0)
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
98 else
99 /* FIXME: We don't yet have a representation for wider modes. */
100 return false;
102 if (width < HOST_BITS_PER_WIDE_INT)
103 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
107 /* Test whether VAL is equal to the most significant bit of mode MODE
108 (after masking with the mode mask of MODE). Returns false if the
109 precision of MODE is too large to handle. */
111 bool
112 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
114 unsigned int width;
116 if (GET_MODE_CLASS (mode) != MODE_INT)
117 return false;
119 width = GET_MODE_PRECISION (mode);
120 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
121 return false;
123 val &= GET_MODE_MASK (mode);
124 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
127 /* Test whether the most significant bit of mode MODE is set in VAL.
128 Returns false if the precision of MODE is too large to handle. */
129 bool
130 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
132 unsigned int width;
134 if (GET_MODE_CLASS (mode) != MODE_INT)
135 return false;
137 width = GET_MODE_PRECISION (mode);
138 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
139 return false;
141 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
142 return val != 0;
145 /* Test whether the most significant bit of mode MODE is clear in VAL.
146 Returns false if the precision of MODE is too large to handle. */
147 bool
148 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
150 unsigned int width;
152 if (GET_MODE_CLASS (mode) != MODE_INT)
153 return false;
155 width = GET_MODE_PRECISION (mode);
156 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
157 return false;
159 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
160 return val == 0;
163 /* Make a binary operation by properly ordering the operands and
164 seeing if the expression folds. */
167 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
168 rtx op1)
170 rtx tem;
172 /* If this simplifies, do it. */
173 tem = simplify_binary_operation (code, mode, op0, op1);
174 if (tem)
175 return tem;
177 /* Put complex operands first and constants second if commutative. */
178 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
179 && swap_commutative_operands_p (op0, op1))
180 tem = op0, op0 = op1, op1 = tem;
182 return gen_rtx_fmt_ee (code, mode, op0, op1);
185 /* If X is a MEM referencing the constant pool, return the real value.
186 Otherwise return X. */
188 avoid_constant_pool_reference (rtx x)
190 rtx c, tmp, addr;
191 enum machine_mode cmode;
192 HOST_WIDE_INT offset = 0;
194 switch (GET_CODE (x))
196 case MEM:
197 break;
199 case FLOAT_EXTEND:
200 /* Handle float extensions of constant pool references. */
201 tmp = XEXP (x, 0);
202 c = avoid_constant_pool_reference (tmp);
203 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
205 REAL_VALUE_TYPE d;
207 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
208 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
210 return x;
212 default:
213 return x;
216 if (GET_MODE (x) == BLKmode)
217 return x;
219 addr = XEXP (x, 0);
221 /* Call target hook to avoid the effects of -fpic etc.... */
222 addr = targetm.delegitimize_address (addr);
224 /* Split the address into a base and integer offset. */
225 if (GET_CODE (addr) == CONST
226 && GET_CODE (XEXP (addr, 0)) == PLUS
227 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
229 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
230 addr = XEXP (XEXP (addr, 0), 0);
233 if (GET_CODE (addr) == LO_SUM)
234 addr = XEXP (addr, 1);
236 /* If this is a constant pool reference, we can turn it into its
237 constant and hope that simplifications happen. */
238 if (GET_CODE (addr) == SYMBOL_REF
239 && CONSTANT_POOL_ADDRESS_P (addr))
241 c = get_pool_constant (addr);
242 cmode = get_pool_mode (addr);
244 /* If we're accessing the constant in a different mode than it was
245 originally stored, attempt to fix that up via subreg simplifications.
246 If that fails we have no choice but to return the original memory. */
247 if (offset != 0 || cmode != GET_MODE (x))
249 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
250 if (tem && CONSTANT_P (tem))
251 return tem;
253 else
254 return c;
257 return x;
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
265 delegitimize_mem_from_attrs (rtx x)
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
269 if (MEM_P (x)
270 && MEM_EXPR (x)
271 && MEM_OFFSET_KNOWN_P (x))
273 tree decl = MEM_EXPR (x);
274 enum machine_mode mode = GET_MODE (x);
275 HOST_WIDE_INT offset = 0;
277 switch (TREE_CODE (decl))
279 default:
280 decl = NULL;
281 break;
283 case VAR_DECL:
284 break;
286 case ARRAY_REF:
287 case ARRAY_RANGE_REF:
288 case COMPONENT_REF:
289 case BIT_FIELD_REF:
290 case REALPART_EXPR:
291 case IMAGPART_EXPR:
292 case VIEW_CONVERT_EXPR:
294 HOST_WIDE_INT bitsize, bitpos;
295 tree toffset;
296 int unsignedp, volatilep = 0;
298 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
299 &mode, &unsignedp, &volatilep, false);
300 if (bitsize != GET_MODE_BITSIZE (mode)
301 || (bitpos % BITS_PER_UNIT)
302 || (toffset && !host_integerp (toffset, 0)))
303 decl = NULL;
304 else
306 offset += bitpos / BITS_PER_UNIT;
307 if (toffset)
308 offset += TREE_INT_CST_LOW (toffset);
310 break;
314 if (decl
315 && mode == GET_MODE (x)
316 && TREE_CODE (decl) == VAR_DECL
317 && (TREE_STATIC (decl)
318 || DECL_THREAD_LOCAL_P (decl))
319 && DECL_RTL_SET_P (decl)
320 && MEM_P (DECL_RTL (decl)))
322 rtx newx;
324 offset += MEM_OFFSET (x);
326 newx = DECL_RTL (decl);
328 if (MEM_P (newx))
330 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
338 if (!((offset == 0
339 || (GET_CODE (o) == PLUS
340 && GET_CODE (XEXP (o, 1)) == CONST_INT
341 && (offset == INTVAL (XEXP (o, 1))
342 || (GET_CODE (n) == PLUS
343 && GET_CODE (XEXP (n, 1)) == CONST_INT
344 && (INTVAL (XEXP (n, 1)) + offset
345 == INTVAL (XEXP (o, 1)))
346 && (n = XEXP (n, 0))))
347 && (o = XEXP (o, 0))))
348 && rtx_equal_p (o, n)))
349 x = adjust_address_nv (newx, mode, offset);
351 else if (GET_MODE (x) == GET_MODE (newx)
352 && offset == 0)
353 x = newx;
357 return x;
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
364 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
365 enum machine_mode op_mode)
367 rtx tem;
369 /* If this simplifies, use it. */
370 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
371 return tem;
373 return gen_rtx_fmt_e (code, mode, op);
376 /* Likewise for ternary operations. */
379 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
380 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
382 rtx tem;
384 /* If this simplifies, use it. */
385 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
386 op0, op1, op2)))
387 return tem;
389 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
396 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
397 enum machine_mode cmp_mode, rtx op0, rtx op1)
399 rtx tem;
401 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
402 op0, op1)))
403 return tem;
405 return gen_rtx_fmt_ee (code, mode, op0, op1);
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
411 result. */
414 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
415 rtx (*fn) (rtx, const_rtx, void *), void *data)
417 enum rtx_code code = GET_CODE (x);
418 enum machine_mode mode = GET_MODE (x);
419 enum machine_mode op_mode;
420 const char *fmt;
421 rtx op0, op1, op2, newx, op;
422 rtvec vec, newvec;
423 int i, j;
425 if (__builtin_expect (fn != NULL, 0))
427 newx = fn (x, old_rtx, data);
428 if (newx)
429 return newx;
431 else if (rtx_equal_p (x, old_rtx))
432 return copy_rtx ((rtx) data);
434 switch (GET_RTX_CLASS (code))
436 case RTX_UNARY:
437 op0 = XEXP (x, 0);
438 op_mode = GET_MODE (op0);
439 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
440 if (op0 == XEXP (x, 0))
441 return x;
442 return simplify_gen_unary (code, mode, op0, op_mode);
444 case RTX_BIN_ARITH:
445 case RTX_COMM_ARITH:
446 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
447 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
448 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
449 return x;
450 return simplify_gen_binary (code, mode, op0, op1);
452 case RTX_COMPARE:
453 case RTX_COMM_COMPARE:
454 op0 = XEXP (x, 0);
455 op1 = XEXP (x, 1);
456 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
459 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
460 return x;
461 return simplify_gen_relational (code, mode, op_mode, op0, op1);
463 case RTX_TERNARY:
464 case RTX_BITFIELD_OPS:
465 op0 = XEXP (x, 0);
466 op_mode = GET_MODE (op0);
467 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
468 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
469 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
470 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
471 return x;
472 if (op_mode == VOIDmode)
473 op_mode = GET_MODE (op0);
474 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
476 case RTX_EXTRA:
477 if (code == SUBREG)
479 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
480 if (op0 == SUBREG_REG (x))
481 return x;
482 op0 = simplify_gen_subreg (GET_MODE (x), op0,
483 GET_MODE (SUBREG_REG (x)),
484 SUBREG_BYTE (x));
485 return op0 ? op0 : x;
487 break;
489 case RTX_OBJ:
490 if (code == MEM)
492 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
493 if (op0 == XEXP (x, 0))
494 return x;
495 return replace_equiv_address_nv (x, op0);
497 else if (code == LO_SUM)
499 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
500 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
504 return op1;
506 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
507 return x;
508 return gen_rtx_LO_SUM (mode, op0, op1);
510 break;
512 default:
513 break;
516 newx = x;
517 fmt = GET_RTX_FORMAT (code);
518 for (i = 0; fmt[i]; i++)
519 switch (fmt[i])
521 case 'E':
522 vec = XVEC (x, i);
523 newvec = XVEC (newx, i);
524 for (j = 0; j < GET_NUM_ELEM (vec); j++)
526 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
527 old_rtx, fn, data);
528 if (op != RTVEC_ELT (vec, j))
530 if (newvec == vec)
532 newvec = shallow_copy_rtvec (vec);
533 if (x == newx)
534 newx = shallow_copy_rtx (x);
535 XVEC (newx, i) = newvec;
537 RTVEC_ELT (newvec, j) = op;
540 break;
542 case 'e':
543 if (XEXP (x, i))
545 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
546 if (op != XEXP (x, i))
548 if (x == newx)
549 newx = shallow_copy_rtx (x);
550 XEXP (newx, i) = op;
553 break;
555 return newx;
558 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559 resulting RTX. Return a new RTX which is as simplified as possible. */
562 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
564 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
567 /* Try to simplify a unary operation CODE whose output mode is to be
568 MODE with input operand OP whose mode was originally OP_MODE.
569 Return zero if no simplification can be made. */
571 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
572 rtx op, enum machine_mode op_mode)
574 rtx trueop, tem;
576 trueop = avoid_constant_pool_reference (op);
578 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
579 if (tem)
580 return tem;
582 return simplify_unary_operation_1 (code, mode, op);
585 /* Perform some simplifications we can do even if the operands
586 aren't constant. */
587 static rtx
588 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
590 enum rtx_code reversed;
591 rtx temp;
593 switch (code)
595 case NOT:
596 /* (not (not X)) == X. */
597 if (GET_CODE (op) == NOT)
598 return XEXP (op, 0);
600 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
601 comparison is all ones. */
602 if (COMPARISON_P (op)
603 && (mode == BImode || STORE_FLAG_VALUE == -1)
604 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
605 return simplify_gen_relational (reversed, mode, VOIDmode,
606 XEXP (op, 0), XEXP (op, 1));
608 /* (not (plus X -1)) can become (neg X). */
609 if (GET_CODE (op) == PLUS
610 && XEXP (op, 1) == constm1_rtx)
611 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
613 /* Similarly, (not (neg X)) is (plus X -1). */
614 if (GET_CODE (op) == NEG)
615 return plus_constant (mode, XEXP (op, 0), -1);
617 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
618 if (GET_CODE (op) == XOR
619 && CONST_INT_P (XEXP (op, 1))
620 && (temp = simplify_unary_operation (NOT, mode,
621 XEXP (op, 1), mode)) != 0)
622 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
624 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
625 if (GET_CODE (op) == PLUS
626 && CONST_INT_P (XEXP (op, 1))
627 && mode_signbit_p (mode, XEXP (op, 1))
628 && (temp = simplify_unary_operation (NOT, mode,
629 XEXP (op, 1), mode)) != 0)
630 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
633 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
634 operands other than 1, but that is not valid. We could do a
635 similar simplification for (not (lshiftrt C X)) where C is
636 just the sign bit, but this doesn't seem common enough to
637 bother with. */
638 if (GET_CODE (op) == ASHIFT
639 && XEXP (op, 0) == const1_rtx)
641 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
642 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
645 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
646 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
647 so we can perform the above simplification. */
649 if (STORE_FLAG_VALUE == -1
650 && GET_CODE (op) == ASHIFTRT
651 && GET_CODE (XEXP (op, 1))
652 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
653 return simplify_gen_relational (GE, mode, VOIDmode,
654 XEXP (op, 0), const0_rtx);
657 if (GET_CODE (op) == SUBREG
658 && subreg_lowpart_p (op)
659 && (GET_MODE_SIZE (GET_MODE (op))
660 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
661 && GET_CODE (SUBREG_REG (op)) == ASHIFT
662 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
664 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
665 rtx x;
667 x = gen_rtx_ROTATE (inner_mode,
668 simplify_gen_unary (NOT, inner_mode, const1_rtx,
669 inner_mode),
670 XEXP (SUBREG_REG (op), 1));
671 return rtl_hooks.gen_lowpart_no_emit (mode, x);
674 /* Apply De Morgan's laws to reduce number of patterns for machines
675 with negating logical insns (and-not, nand, etc.). If result has
676 only one NOT, put it first, since that is how the patterns are
677 coded. */
679 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
681 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
682 enum machine_mode op_mode;
684 op_mode = GET_MODE (in1);
685 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
687 op_mode = GET_MODE (in2);
688 if (op_mode == VOIDmode)
689 op_mode = mode;
690 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
692 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
694 rtx tem = in2;
695 in2 = in1; in1 = tem;
698 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
699 mode, in1, in2);
701 break;
703 case NEG:
704 /* (neg (neg X)) == X. */
705 if (GET_CODE (op) == NEG)
706 return XEXP (op, 0);
708 /* (neg (plus X 1)) can become (not X). */
709 if (GET_CODE (op) == PLUS
710 && XEXP (op, 1) == const1_rtx)
711 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
713 /* Similarly, (neg (not X)) is (plus X 1). */
714 if (GET_CODE (op) == NOT)
715 return plus_constant (mode, XEXP (op, 0), 1);
717 /* (neg (minus X Y)) can become (minus Y X). This transformation
718 isn't safe for modes with signed zeros, since if X and Y are
719 both +0, (minus Y X) is the same as (minus X Y). If the
720 rounding mode is towards +infinity (or -infinity) then the two
721 expressions will be rounded differently. */
722 if (GET_CODE (op) == MINUS
723 && !HONOR_SIGNED_ZEROS (mode)
724 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
725 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
727 if (GET_CODE (op) == PLUS
728 && !HONOR_SIGNED_ZEROS (mode)
729 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
731 /* (neg (plus A C)) is simplified to (minus -C A). */
732 if (CONST_INT_P (XEXP (op, 1))
733 || CONST_DOUBLE_P (XEXP (op, 1)))
735 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
736 if (temp)
737 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
740 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
741 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
742 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
745 /* (neg (mult A B)) becomes (mult A (neg B)).
746 This works even for floating-point values. */
747 if (GET_CODE (op) == MULT
748 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
750 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
751 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
754 /* NEG commutes with ASHIFT since it is multiplication. Only do
755 this if we can then eliminate the NEG (e.g., if the operand
756 is a constant). */
757 if (GET_CODE (op) == ASHIFT)
759 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
760 if (temp)
761 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
764 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
765 C is equal to the width of MODE minus 1. */
766 if (GET_CODE (op) == ASHIFTRT
767 && CONST_INT_P (XEXP (op, 1))
768 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
769 return simplify_gen_binary (LSHIFTRT, mode,
770 XEXP (op, 0), XEXP (op, 1));
772 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
773 C is equal to the width of MODE minus 1. */
774 if (GET_CODE (op) == LSHIFTRT
775 && CONST_INT_P (XEXP (op, 1))
776 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
777 return simplify_gen_binary (ASHIFTRT, mode,
778 XEXP (op, 0), XEXP (op, 1));
780 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
781 if (GET_CODE (op) == XOR
782 && XEXP (op, 1) == const1_rtx
783 && nonzero_bits (XEXP (op, 0), mode) == 1)
784 return plus_constant (mode, XEXP (op, 0), -1);
786 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
787 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
788 if (GET_CODE (op) == LT
789 && XEXP (op, 1) == const0_rtx
790 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
792 enum machine_mode inner = GET_MODE (XEXP (op, 0));
793 int isize = GET_MODE_PRECISION (inner);
794 if (STORE_FLAG_VALUE == 1)
796 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
797 GEN_INT (isize - 1));
798 if (mode == inner)
799 return temp;
800 if (GET_MODE_PRECISION (mode) > isize)
801 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
802 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
804 else if (STORE_FLAG_VALUE == -1)
806 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
807 GEN_INT (isize - 1));
808 if (mode == inner)
809 return temp;
810 if (GET_MODE_PRECISION (mode) > isize)
811 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
812 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
815 break;
817 case TRUNCATE:
818 /* We can't handle truncation to a partial integer mode here
819 because we don't know the real bitsize of the partial
820 integer mode. */
821 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
822 break;
824 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
825 if ((GET_CODE (op) == SIGN_EXTEND
826 || GET_CODE (op) == ZERO_EXTEND)
827 && GET_MODE (XEXP (op, 0)) == mode)
828 return XEXP (op, 0);
830 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
831 (OP:SI foo:SI) if OP is NEG or ABS. */
832 if ((GET_CODE (op) == ABS
833 || GET_CODE (op) == NEG)
834 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
835 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
836 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
837 return simplify_gen_unary (GET_CODE (op), mode,
838 XEXP (XEXP (op, 0), 0), mode);
840 /* (truncate:A (subreg:B (truncate:C X) 0)) is
841 (truncate:A X). */
842 if (GET_CODE (op) == SUBREG
843 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
844 && subreg_lowpart_p (op))
845 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
846 GET_MODE (XEXP (SUBREG_REG (op), 0)));
848 /* If we know that the value is already truncated, we can
849 replace the TRUNCATE with a SUBREG. Note that this is also
850 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
851 modes we just have to apply a different definition for
852 truncation. But don't do this for an (LSHIFTRT (MULT ...))
853 since this will cause problems with the umulXi3_highpart
854 patterns. */
855 if ((TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
856 ? (num_sign_bit_copies (op, GET_MODE (op))
857 > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op))
858 - GET_MODE_PRECISION (mode)))
859 : truncated_to_mode (mode, op))
860 && ! (GET_CODE (op) == LSHIFTRT
861 && GET_CODE (XEXP (op, 0)) == MULT))
862 return rtl_hooks.gen_lowpart_no_emit (mode, op);
864 /* A truncate of a comparison can be replaced with a subreg if
865 STORE_FLAG_VALUE permits. This is like the previous test,
866 but it works even if the comparison is done in a mode larger
867 than HOST_BITS_PER_WIDE_INT. */
868 if (HWI_COMPUTABLE_MODE_P (mode)
869 && COMPARISON_P (op)
870 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
871 return rtl_hooks.gen_lowpart_no_emit (mode, op);
873 /* A truncate of a memory is just loading the low part of the memory
874 if we are not changing the meaning of the address. */
875 if (GET_CODE (op) == MEM
876 && !MEM_VOLATILE_P (op)
877 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
878 return rtl_hooks.gen_lowpart_no_emit (mode, op);
880 break;
882 case FLOAT_TRUNCATE:
883 if (DECIMAL_FLOAT_MODE_P (mode))
884 break;
886 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
887 if (GET_CODE (op) == FLOAT_EXTEND
888 && GET_MODE (XEXP (op, 0)) == mode)
889 return XEXP (op, 0);
891 /* (float_truncate:SF (float_truncate:DF foo:XF))
892 = (float_truncate:SF foo:XF).
893 This may eliminate double rounding, so it is unsafe.
895 (float_truncate:SF (float_extend:XF foo:DF))
896 = (float_truncate:SF foo:DF).
898 (float_truncate:DF (float_extend:XF foo:SF))
899 = (float_extend:SF foo:DF). */
900 if ((GET_CODE (op) == FLOAT_TRUNCATE
901 && flag_unsafe_math_optimizations)
902 || GET_CODE (op) == FLOAT_EXTEND)
903 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
904 0)))
905 > GET_MODE_SIZE (mode)
906 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
907 mode,
908 XEXP (op, 0), mode);
910 /* (float_truncate (float x)) is (float x) */
911 if (GET_CODE (op) == FLOAT
912 && (flag_unsafe_math_optimizations
913 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
914 && ((unsigned)significand_size (GET_MODE (op))
915 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
916 - num_sign_bit_copies (XEXP (op, 0),
917 GET_MODE (XEXP (op, 0))))))))
918 return simplify_gen_unary (FLOAT, mode,
919 XEXP (op, 0),
920 GET_MODE (XEXP (op, 0)));
922 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
923 (OP:SF foo:SF) if OP is NEG or ABS. */
924 if ((GET_CODE (op) == ABS
925 || GET_CODE (op) == NEG)
926 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
927 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
928 return simplify_gen_unary (GET_CODE (op), mode,
929 XEXP (XEXP (op, 0), 0), mode);
931 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
932 is (float_truncate:SF x). */
933 if (GET_CODE (op) == SUBREG
934 && subreg_lowpart_p (op)
935 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
936 return SUBREG_REG (op);
937 break;
939 case FLOAT_EXTEND:
940 if (DECIMAL_FLOAT_MODE_P (mode))
941 break;
943 /* (float_extend (float_extend x)) is (float_extend x)
945 (float_extend (float x)) is (float x) assuming that double
946 rounding can't happen.
948 if (GET_CODE (op) == FLOAT_EXTEND
949 || (GET_CODE (op) == FLOAT
950 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
951 && ((unsigned)significand_size (GET_MODE (op))
952 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
953 - num_sign_bit_copies (XEXP (op, 0),
954 GET_MODE (XEXP (op, 0)))))))
955 return simplify_gen_unary (GET_CODE (op), mode,
956 XEXP (op, 0),
957 GET_MODE (XEXP (op, 0)));
959 break;
961 case ABS:
962 /* (abs (neg <foo>)) -> (abs <foo>) */
963 if (GET_CODE (op) == NEG)
964 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
965 GET_MODE (XEXP (op, 0)));
967 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
968 do nothing. */
969 if (GET_MODE (op) == VOIDmode)
970 break;
972 /* If operand is something known to be positive, ignore the ABS. */
973 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
974 || val_signbit_known_clear_p (GET_MODE (op),
975 nonzero_bits (op, GET_MODE (op))))
976 return op;
978 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
979 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
980 return gen_rtx_NEG (mode, op);
982 break;
984 case FFS:
985 /* (ffs (*_extend <X>)) = (ffs <X>) */
986 if (GET_CODE (op) == SIGN_EXTEND
987 || GET_CODE (op) == ZERO_EXTEND)
988 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
989 GET_MODE (XEXP (op, 0)));
990 break;
992 case POPCOUNT:
993 switch (GET_CODE (op))
995 case BSWAP:
996 case ZERO_EXTEND:
997 /* (popcount (zero_extend <X>)) = (popcount <X>) */
998 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
999 GET_MODE (XEXP (op, 0)));
1001 case ROTATE:
1002 case ROTATERT:
1003 /* Rotations don't affect popcount. */
1004 if (!side_effects_p (XEXP (op, 1)))
1005 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1006 GET_MODE (XEXP (op, 0)));
1007 break;
1009 default:
1010 break;
1012 break;
1014 case PARITY:
1015 switch (GET_CODE (op))
1017 case NOT:
1018 case BSWAP:
1019 case ZERO_EXTEND:
1020 case SIGN_EXTEND:
1021 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1022 GET_MODE (XEXP (op, 0)));
1024 case ROTATE:
1025 case ROTATERT:
1026 /* Rotations don't affect parity. */
1027 if (!side_effects_p (XEXP (op, 1)))
1028 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1029 GET_MODE (XEXP (op, 0)));
1030 break;
1032 default:
1033 break;
1035 break;
1037 case BSWAP:
1038 /* (bswap (bswap x)) -> x. */
1039 if (GET_CODE (op) == BSWAP)
1040 return XEXP (op, 0);
1041 break;
1043 case FLOAT:
1044 /* (float (sign_extend <X>)) = (float <X>). */
1045 if (GET_CODE (op) == SIGN_EXTEND)
1046 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1047 GET_MODE (XEXP (op, 0)));
1048 break;
1050 case SIGN_EXTEND:
1051 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1052 becomes just the MINUS if its mode is MODE. This allows
1053 folding switch statements on machines using casesi (such as
1054 the VAX). */
1055 if (GET_CODE (op) == TRUNCATE
1056 && GET_MODE (XEXP (op, 0)) == mode
1057 && GET_CODE (XEXP (op, 0)) == MINUS
1058 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1059 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1060 return XEXP (op, 0);
1062 /* Extending a widening multiplication should be canonicalized to
1063 a wider widening multiplication. */
1064 if (GET_CODE (op) == MULT)
1066 rtx lhs = XEXP (op, 0);
1067 rtx rhs = XEXP (op, 1);
1068 enum rtx_code lcode = GET_CODE (lhs);
1069 enum rtx_code rcode = GET_CODE (rhs);
1071 /* Widening multiplies usually extend both operands, but sometimes
1072 they use a shift to extract a portion of a register. */
1073 if ((lcode == SIGN_EXTEND
1074 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1075 && (rcode == SIGN_EXTEND
1076 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1078 enum machine_mode lmode = GET_MODE (lhs);
1079 enum machine_mode rmode = GET_MODE (rhs);
1080 int bits;
1082 if (lcode == ASHIFTRT)
1083 /* Number of bits not shifted off the end. */
1084 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1085 else /* lcode == SIGN_EXTEND */
1086 /* Size of inner mode. */
1087 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1089 if (rcode == ASHIFTRT)
1090 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1091 else /* rcode == SIGN_EXTEND */
1092 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1094 /* We can only widen multiplies if the result is mathematiclly
1095 equivalent. I.e. if overflow was impossible. */
1096 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1097 return simplify_gen_binary
1098 (MULT, mode,
1099 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1100 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1104 /* Check for a sign extension of a subreg of a promoted
1105 variable, where the promotion is sign-extended, and the
1106 target mode is the same as the variable's promotion. */
1107 if (GET_CODE (op) == SUBREG
1108 && SUBREG_PROMOTED_VAR_P (op)
1109 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1110 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1111 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1113 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1114 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1115 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1117 gcc_assert (GET_MODE_BITSIZE (mode)
1118 > GET_MODE_BITSIZE (GET_MODE (op)));
1119 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1120 GET_MODE (XEXP (op, 0)));
1123 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1124 is (sign_extend:M (subreg:O <X>)) if there is mode with
1125 GET_MODE_BITSIZE (N) - I bits.
1126 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1127 is similarly (zero_extend:M (subreg:O <X>)). */
1128 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1129 && GET_CODE (XEXP (op, 0)) == ASHIFT
1130 && CONST_INT_P (XEXP (op, 1))
1131 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1132 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1134 enum machine_mode tmode
1135 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1136 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1137 gcc_assert (GET_MODE_BITSIZE (mode)
1138 > GET_MODE_BITSIZE (GET_MODE (op)));
1139 if (tmode != BLKmode)
1141 rtx inner =
1142 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1143 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1144 ? SIGN_EXTEND : ZERO_EXTEND,
1145 mode, inner, tmode);
1149 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1150 /* As we do not know which address space the pointer is referring to,
1151 we can do this only if the target does not support different pointer
1152 or address modes depending on the address space. */
1153 if (target_default_pointer_address_modes_p ()
1154 && ! POINTERS_EXTEND_UNSIGNED
1155 && mode == Pmode && GET_MODE (op) == ptr_mode
1156 && (CONSTANT_P (op)
1157 || (GET_CODE (op) == SUBREG
1158 && REG_P (SUBREG_REG (op))
1159 && REG_POINTER (SUBREG_REG (op))
1160 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1161 return convert_memory_address (Pmode, op);
1162 #endif
1163 break;
1165 case ZERO_EXTEND:
1166 /* Check for a zero extension of a subreg of a promoted
1167 variable, where the promotion is zero-extended, and the
1168 target mode is the same as the variable's promotion. */
1169 if (GET_CODE (op) == SUBREG
1170 && SUBREG_PROMOTED_VAR_P (op)
1171 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1172 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1173 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1175 /* Extending a widening multiplication should be canonicalized to
1176 a wider widening multiplication. */
1177 if (GET_CODE (op) == MULT)
1179 rtx lhs = XEXP (op, 0);
1180 rtx rhs = XEXP (op, 1);
1181 enum rtx_code lcode = GET_CODE (lhs);
1182 enum rtx_code rcode = GET_CODE (rhs);
1184 /* Widening multiplies usually extend both operands, but sometimes
1185 they use a shift to extract a portion of a register. */
1186 if ((lcode == ZERO_EXTEND
1187 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1188 && (rcode == ZERO_EXTEND
1189 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1191 enum machine_mode lmode = GET_MODE (lhs);
1192 enum machine_mode rmode = GET_MODE (rhs);
1193 int bits;
1195 if (lcode == LSHIFTRT)
1196 /* Number of bits not shifted off the end. */
1197 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1198 else /* lcode == ZERO_EXTEND */
1199 /* Size of inner mode. */
1200 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1202 if (rcode == LSHIFTRT)
1203 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1204 else /* rcode == ZERO_EXTEND */
1205 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1207 /* We can only widen multiplies if the result is mathematiclly
1208 equivalent. I.e. if overflow was impossible. */
1209 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1210 return simplify_gen_binary
1211 (MULT, mode,
1212 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1213 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1217 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1218 if (GET_CODE (op) == ZERO_EXTEND)
1219 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1220 GET_MODE (XEXP (op, 0)));
1222 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1223 is (zero_extend:M (subreg:O <X>)) if there is mode with
1224 GET_MODE_BITSIZE (N) - I bits. */
1225 if (GET_CODE (op) == LSHIFTRT
1226 && GET_CODE (XEXP (op, 0)) == ASHIFT
1227 && CONST_INT_P (XEXP (op, 1))
1228 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1229 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1231 enum machine_mode tmode
1232 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1233 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1234 if (tmode != BLKmode)
1236 rtx inner =
1237 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1238 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1242 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1243 /* As we do not know which address space the pointer is referring to,
1244 we can do this only if the target does not support different pointer
1245 or address modes depending on the address space. */
1246 if (target_default_pointer_address_modes_p ()
1247 && POINTERS_EXTEND_UNSIGNED > 0
1248 && mode == Pmode && GET_MODE (op) == ptr_mode
1249 && (CONSTANT_P (op)
1250 || (GET_CODE (op) == SUBREG
1251 && REG_P (SUBREG_REG (op))
1252 && REG_POINTER (SUBREG_REG (op))
1253 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1254 return convert_memory_address (Pmode, op);
1255 #endif
1256 break;
1258 default:
1259 break;
1262 return 0;
1265 /* Try to compute the value of a unary operation CODE whose output mode is to
1266 be MODE with input operand OP whose mode was originally OP_MODE.
1267 Return zero if the value cannot be computed. */
1269 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1270 rtx op, enum machine_mode op_mode)
1272 unsigned int width = GET_MODE_PRECISION (mode);
1273 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1275 if (code == VEC_DUPLICATE)
1277 gcc_assert (VECTOR_MODE_P (mode));
1278 if (GET_MODE (op) != VOIDmode)
1280 if (!VECTOR_MODE_P (GET_MODE (op)))
1281 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1282 else
1283 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1284 (GET_MODE (op)));
1286 if (CONST_INT_P (op) || CONST_DOUBLE_P (op)
1287 || GET_CODE (op) == CONST_VECTOR)
1289 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1290 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1291 rtvec v = rtvec_alloc (n_elts);
1292 unsigned int i;
1294 if (GET_CODE (op) != CONST_VECTOR)
1295 for (i = 0; i < n_elts; i++)
1296 RTVEC_ELT (v, i) = op;
1297 else
1299 enum machine_mode inmode = GET_MODE (op);
1300 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1301 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1303 gcc_assert (in_n_elts < n_elts);
1304 gcc_assert ((n_elts % in_n_elts) == 0);
1305 for (i = 0; i < n_elts; i++)
1306 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1308 return gen_rtx_CONST_VECTOR (mode, v);
1312 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1314 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1315 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1316 enum machine_mode opmode = GET_MODE (op);
1317 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1318 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1319 rtvec v = rtvec_alloc (n_elts);
1320 unsigned int i;
1322 gcc_assert (op_n_elts == n_elts);
1323 for (i = 0; i < n_elts; i++)
1325 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1326 CONST_VECTOR_ELT (op, i),
1327 GET_MODE_INNER (opmode));
1328 if (!x)
1329 return 0;
1330 RTVEC_ELT (v, i) = x;
1332 return gen_rtx_CONST_VECTOR (mode, v);
1335 /* The order of these tests is critical so that, for example, we don't
1336 check the wrong mode (input vs. output) for a conversion operation,
1337 such as FIX. At some point, this should be simplified. */
1339 if (code == FLOAT && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1341 HOST_WIDE_INT hv, lv;
1342 REAL_VALUE_TYPE d;
1344 if (CONST_INT_P (op))
1345 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1346 else
1347 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1349 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1350 d = real_value_truncate (mode, d);
1351 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1353 else if (code == UNSIGNED_FLOAT
1354 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1356 HOST_WIDE_INT hv, lv;
1357 REAL_VALUE_TYPE d;
1359 if (CONST_INT_P (op))
1360 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1361 else
1362 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1364 if (op_mode == VOIDmode
1365 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1366 /* We should never get a negative number. */
1367 gcc_assert (hv >= 0);
1368 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1369 hv = 0, lv &= GET_MODE_MASK (op_mode);
1371 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1372 d = real_value_truncate (mode, d);
1373 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1376 if (CONST_INT_P (op)
1377 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1379 HOST_WIDE_INT arg0 = INTVAL (op);
1380 HOST_WIDE_INT val;
1382 switch (code)
1384 case NOT:
1385 val = ~ arg0;
1386 break;
1388 case NEG:
1389 val = - arg0;
1390 break;
1392 case ABS:
1393 val = (arg0 >= 0 ? arg0 : - arg0);
1394 break;
1396 case FFS:
1397 arg0 &= GET_MODE_MASK (mode);
1398 val = ffs_hwi (arg0);
1399 break;
1401 case CLZ:
1402 arg0 &= GET_MODE_MASK (mode);
1403 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1405 else
1406 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1407 break;
1409 case CLRSB:
1410 arg0 &= GET_MODE_MASK (mode);
1411 if (arg0 == 0)
1412 val = GET_MODE_PRECISION (mode) - 1;
1413 else if (arg0 >= 0)
1414 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1415 else if (arg0 < 0)
1416 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1417 break;
1419 case CTZ:
1420 arg0 &= GET_MODE_MASK (mode);
1421 if (arg0 == 0)
1423 /* Even if the value at zero is undefined, we have to come
1424 up with some replacement. Seems good enough. */
1425 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1426 val = GET_MODE_PRECISION (mode);
1428 else
1429 val = ctz_hwi (arg0);
1430 break;
1432 case POPCOUNT:
1433 arg0 &= GET_MODE_MASK (mode);
1434 val = 0;
1435 while (arg0)
1436 val++, arg0 &= arg0 - 1;
1437 break;
1439 case PARITY:
1440 arg0 &= GET_MODE_MASK (mode);
1441 val = 0;
1442 while (arg0)
1443 val++, arg0 &= arg0 - 1;
1444 val &= 1;
1445 break;
1447 case BSWAP:
1449 unsigned int s;
1451 val = 0;
1452 for (s = 0; s < width; s += 8)
1454 unsigned int d = width - s - 8;
1455 unsigned HOST_WIDE_INT byte;
1456 byte = (arg0 >> s) & 0xff;
1457 val |= byte << d;
1460 break;
1462 case TRUNCATE:
1463 val = arg0;
1464 break;
1466 case ZERO_EXTEND:
1467 /* When zero-extending a CONST_INT, we need to know its
1468 original mode. */
1469 gcc_assert (op_mode != VOIDmode);
1470 if (op_width == HOST_BITS_PER_WIDE_INT)
1472 /* If we were really extending the mode,
1473 we would have to distinguish between zero-extension
1474 and sign-extension. */
1475 gcc_assert (width == op_width);
1476 val = arg0;
1478 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1479 val = arg0 & GET_MODE_MASK (op_mode);
1480 else
1481 return 0;
1482 break;
1484 case SIGN_EXTEND:
1485 if (op_mode == VOIDmode)
1486 op_mode = mode;
1487 op_width = GET_MODE_PRECISION (op_mode);
1488 if (op_width == HOST_BITS_PER_WIDE_INT)
1490 /* If we were really extending the mode,
1491 we would have to distinguish between zero-extension
1492 and sign-extension. */
1493 gcc_assert (width == op_width);
1494 val = arg0;
1496 else if (op_width < HOST_BITS_PER_WIDE_INT)
1498 val = arg0 & GET_MODE_MASK (op_mode);
1499 if (val_signbit_known_set_p (op_mode, val))
1500 val |= ~GET_MODE_MASK (op_mode);
1502 else
1503 return 0;
1504 break;
1506 case SQRT:
1507 case FLOAT_EXTEND:
1508 case FLOAT_TRUNCATE:
1509 case SS_TRUNCATE:
1510 case US_TRUNCATE:
1511 case SS_NEG:
1512 case US_NEG:
1513 case SS_ABS:
1514 return 0;
1516 default:
1517 gcc_unreachable ();
1520 return gen_int_mode (val, mode);
1523 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1524 for a DImode operation on a CONST_INT. */
1525 else if (width <= HOST_BITS_PER_DOUBLE_INT
1526 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1528 double_int first, value;
1530 if (CONST_DOUBLE_AS_INT_P (op))
1531 first = double_int::from_pair (CONST_DOUBLE_HIGH (op),
1532 CONST_DOUBLE_LOW (op));
1533 else
1534 first = double_int::from_shwi (INTVAL (op));
1536 switch (code)
1538 case NOT:
1539 value = ~first;
1540 break;
1542 case NEG:
1543 value = -first;
1544 break;
1546 case ABS:
1547 if (first.is_negative ())
1548 value = -first;
1549 else
1550 value = first;
1551 break;
1553 case FFS:
1554 value.high = 0;
1555 if (first.low != 0)
1556 value.low = ffs_hwi (first.low);
1557 else if (first.high != 0)
1558 value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high);
1559 else
1560 value.low = 0;
1561 break;
1563 case CLZ:
1564 value.high = 0;
1565 if (first.high != 0)
1566 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1
1567 - HOST_BITS_PER_WIDE_INT;
1568 else if (first.low != 0)
1569 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1;
1570 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1571 value.low = GET_MODE_PRECISION (mode);
1572 break;
1574 case CTZ:
1575 value.high = 0;
1576 if (first.low != 0)
1577 value.low = ctz_hwi (first.low);
1578 else if (first.high != 0)
1579 value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high);
1580 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1581 value.low = GET_MODE_PRECISION (mode);
1582 break;
1584 case POPCOUNT:
1585 value = double_int_zero;
1586 while (first.low)
1588 value.low++;
1589 first.low &= first.low - 1;
1591 while (first.high)
1593 value.low++;
1594 first.high &= first.high - 1;
1596 break;
1598 case PARITY:
1599 value = double_int_zero;
1600 while (first.low)
1602 value.low++;
1603 first.low &= first.low - 1;
1605 while (first.high)
1607 value.low++;
1608 first.high &= first.high - 1;
1610 value.low &= 1;
1611 break;
1613 case BSWAP:
1615 unsigned int s;
1617 value = double_int_zero;
1618 for (s = 0; s < width; s += 8)
1620 unsigned int d = width - s - 8;
1621 unsigned HOST_WIDE_INT byte;
1623 if (s < HOST_BITS_PER_WIDE_INT)
1624 byte = (first.low >> s) & 0xff;
1625 else
1626 byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1628 if (d < HOST_BITS_PER_WIDE_INT)
1629 value.low |= byte << d;
1630 else
1631 value.high |= byte << (d - HOST_BITS_PER_WIDE_INT);
1634 break;
1636 case TRUNCATE:
1637 /* This is just a change-of-mode, so do nothing. */
1638 value = first;
1639 break;
1641 case ZERO_EXTEND:
1642 gcc_assert (op_mode != VOIDmode);
1644 if (op_width > HOST_BITS_PER_WIDE_INT)
1645 return 0;
1647 value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode));
1648 break;
1650 case SIGN_EXTEND:
1651 if (op_mode == VOIDmode
1652 || op_width > HOST_BITS_PER_WIDE_INT)
1653 return 0;
1654 else
1656 value.low = first.low & GET_MODE_MASK (op_mode);
1657 if (val_signbit_known_set_p (op_mode, value.low))
1658 value.low |= ~GET_MODE_MASK (op_mode);
1660 value.high = HWI_SIGN_EXTEND (value.low);
1662 break;
1664 case SQRT:
1665 return 0;
1667 default:
1668 return 0;
1671 return immed_double_int_const (value, mode);
1674 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1675 && SCALAR_FLOAT_MODE_P (mode)
1676 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1678 REAL_VALUE_TYPE d, t;
1679 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1681 switch (code)
1683 case SQRT:
1684 if (HONOR_SNANS (mode) && real_isnan (&d))
1685 return 0;
1686 real_sqrt (&t, mode, &d);
1687 d = t;
1688 break;
1689 case ABS:
1690 d = real_value_abs (&d);
1691 break;
1692 case NEG:
1693 d = real_value_negate (&d);
1694 break;
1695 case FLOAT_TRUNCATE:
1696 d = real_value_truncate (mode, d);
1697 break;
1698 case FLOAT_EXTEND:
1699 /* All this does is change the mode, unless changing
1700 mode class. */
1701 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1702 real_convert (&d, mode, &d);
1703 break;
1704 case FIX:
1705 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1706 break;
1707 case NOT:
1709 long tmp[4];
1710 int i;
1712 real_to_target (tmp, &d, GET_MODE (op));
1713 for (i = 0; i < 4; i++)
1714 tmp[i] = ~tmp[i];
1715 real_from_target (&d, tmp, mode);
1716 break;
1718 default:
1719 gcc_unreachable ();
1721 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1724 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1725 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1726 && GET_MODE_CLASS (mode) == MODE_INT
1727 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1729 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1730 operators are intentionally left unspecified (to ease implementation
1731 by target backends), for consistency, this routine implements the
1732 same semantics for constant folding as used by the middle-end. */
1734 /* This was formerly used only for non-IEEE float.
1735 eggert@twinsun.com says it is safe for IEEE also. */
1736 HOST_WIDE_INT xh, xl, th, tl;
1737 REAL_VALUE_TYPE x, t;
1738 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1739 switch (code)
1741 case FIX:
1742 if (REAL_VALUE_ISNAN (x))
1743 return const0_rtx;
1745 /* Test against the signed upper bound. */
1746 if (width > HOST_BITS_PER_WIDE_INT)
1748 th = ((unsigned HOST_WIDE_INT) 1
1749 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1750 tl = -1;
1752 else
1754 th = 0;
1755 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1757 real_from_integer (&t, VOIDmode, tl, th, 0);
1758 if (REAL_VALUES_LESS (t, x))
1760 xh = th;
1761 xl = tl;
1762 break;
1765 /* Test against the signed lower bound. */
1766 if (width > HOST_BITS_PER_WIDE_INT)
1768 th = (unsigned HOST_WIDE_INT) (-1)
1769 << (width - HOST_BITS_PER_WIDE_INT - 1);
1770 tl = 0;
1772 else
1774 th = -1;
1775 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1777 real_from_integer (&t, VOIDmode, tl, th, 0);
1778 if (REAL_VALUES_LESS (x, t))
1780 xh = th;
1781 xl = tl;
1782 break;
1784 REAL_VALUE_TO_INT (&xl, &xh, x);
1785 break;
1787 case UNSIGNED_FIX:
1788 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1789 return const0_rtx;
1791 /* Test against the unsigned upper bound. */
1792 if (width == HOST_BITS_PER_DOUBLE_INT)
1794 th = -1;
1795 tl = -1;
1797 else if (width >= HOST_BITS_PER_WIDE_INT)
1799 th = ((unsigned HOST_WIDE_INT) 1
1800 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1801 tl = -1;
1803 else
1805 th = 0;
1806 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1808 real_from_integer (&t, VOIDmode, tl, th, 1);
1809 if (REAL_VALUES_LESS (t, x))
1811 xh = th;
1812 xl = tl;
1813 break;
1816 REAL_VALUE_TO_INT (&xl, &xh, x);
1817 break;
1819 default:
1820 gcc_unreachable ();
1822 return immed_double_const (xl, xh, mode);
1825 return NULL_RTX;
1828 /* Subroutine of simplify_binary_operation to simplify a commutative,
1829 associative binary operation CODE with result mode MODE, operating
1830 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1831 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1832 canonicalization is possible. */
1834 static rtx
1835 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1836 rtx op0, rtx op1)
1838 rtx tem;
1840 /* Linearize the operator to the left. */
1841 if (GET_CODE (op1) == code)
1843 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1844 if (GET_CODE (op0) == code)
1846 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1847 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1850 /* "a op (b op c)" becomes "(b op c) op a". */
1851 if (! swap_commutative_operands_p (op1, op0))
1852 return simplify_gen_binary (code, mode, op1, op0);
1854 tem = op0;
1855 op0 = op1;
1856 op1 = tem;
1859 if (GET_CODE (op0) == code)
1861 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1862 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1864 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1865 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1868 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1869 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1870 if (tem != 0)
1871 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1873 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1874 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1875 if (tem != 0)
1876 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1879 return 0;
1883 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1884 and OP1. Return 0 if no simplification is possible.
1886 Don't use this for relational operations such as EQ or LT.
1887 Use simplify_relational_operation instead. */
1889 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1890 rtx op0, rtx op1)
1892 rtx trueop0, trueop1;
1893 rtx tem;
1895 /* Relational operations don't work here. We must know the mode
1896 of the operands in order to do the comparison correctly.
1897 Assuming a full word can give incorrect results.
1898 Consider comparing 128 with -128 in QImode. */
1899 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1900 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1902 /* Make sure the constant is second. */
1903 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1904 && swap_commutative_operands_p (op0, op1))
1906 tem = op0, op0 = op1, op1 = tem;
1909 trueop0 = avoid_constant_pool_reference (op0);
1910 trueop1 = avoid_constant_pool_reference (op1);
1912 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1913 if (tem)
1914 return tem;
1915 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1918 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1919 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1920 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1921 actual constants. */
1923 static rtx
1924 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1925 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1927 rtx tem, reversed, opleft, opright;
1928 HOST_WIDE_INT val;
1929 unsigned int width = GET_MODE_PRECISION (mode);
1931 /* Even if we can't compute a constant result,
1932 there are some cases worth simplifying. */
1934 switch (code)
1936 case PLUS:
1937 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1938 when x is NaN, infinite, or finite and nonzero. They aren't
1939 when x is -0 and the rounding mode is not towards -infinity,
1940 since (-0) + 0 is then 0. */
1941 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1942 return op0;
1944 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1945 transformations are safe even for IEEE. */
1946 if (GET_CODE (op0) == NEG)
1947 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1948 else if (GET_CODE (op1) == NEG)
1949 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1951 /* (~a) + 1 -> -a */
1952 if (INTEGRAL_MODE_P (mode)
1953 && GET_CODE (op0) == NOT
1954 && trueop1 == const1_rtx)
1955 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1957 /* Handle both-operands-constant cases. We can only add
1958 CONST_INTs to constants since the sum of relocatable symbols
1959 can't be handled by most assemblers. Don't add CONST_INT
1960 to CONST_INT since overflow won't be computed properly if wider
1961 than HOST_BITS_PER_WIDE_INT. */
1963 if ((GET_CODE (op0) == CONST
1964 || GET_CODE (op0) == SYMBOL_REF
1965 || GET_CODE (op0) == LABEL_REF)
1966 && CONST_INT_P (op1))
1967 return plus_constant (mode, op0, INTVAL (op1));
1968 else if ((GET_CODE (op1) == CONST
1969 || GET_CODE (op1) == SYMBOL_REF
1970 || GET_CODE (op1) == LABEL_REF)
1971 && CONST_INT_P (op0))
1972 return plus_constant (mode, op1, INTVAL (op0));
1974 /* See if this is something like X * C - X or vice versa or
1975 if the multiplication is written as a shift. If so, we can
1976 distribute and make a new multiply, shift, or maybe just
1977 have X (if C is 2 in the example above). But don't make
1978 something more expensive than we had before. */
1980 if (SCALAR_INT_MODE_P (mode))
1982 double_int coeff0, coeff1;
1983 rtx lhs = op0, rhs = op1;
1985 coeff0 = double_int_one;
1986 coeff1 = double_int_one;
1988 if (GET_CODE (lhs) == NEG)
1990 coeff0 = double_int_minus_one;
1991 lhs = XEXP (lhs, 0);
1993 else if (GET_CODE (lhs) == MULT
1994 && CONST_INT_P (XEXP (lhs, 1)))
1996 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
1997 lhs = XEXP (lhs, 0);
1999 else if (GET_CODE (lhs) == ASHIFT
2000 && CONST_INT_P (XEXP (lhs, 1))
2001 && INTVAL (XEXP (lhs, 1)) >= 0
2002 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2004 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2005 lhs = XEXP (lhs, 0);
2008 if (GET_CODE (rhs) == NEG)
2010 coeff1 = double_int_minus_one;
2011 rhs = XEXP (rhs, 0);
2013 else if (GET_CODE (rhs) == MULT
2014 && CONST_INT_P (XEXP (rhs, 1)))
2016 coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
2017 rhs = XEXP (rhs, 0);
2019 else if (GET_CODE (rhs) == ASHIFT
2020 && CONST_INT_P (XEXP (rhs, 1))
2021 && INTVAL (XEXP (rhs, 1)) >= 0
2022 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2024 coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2025 rhs = XEXP (rhs, 0);
2028 if (rtx_equal_p (lhs, rhs))
2030 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2031 rtx coeff;
2032 double_int val;
2033 bool speed = optimize_function_for_speed_p (cfun);
2035 val = coeff0 + coeff1;
2036 coeff = immed_double_int_const (val, mode);
2038 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2039 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2040 ? tem : 0;
2044 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2045 if ((CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
2046 && GET_CODE (op0) == XOR
2047 && (CONST_INT_P (XEXP (op0, 1))
2048 || CONST_DOUBLE_AS_INT_P (XEXP (op0, 1)))
2049 && mode_signbit_p (mode, op1))
2050 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2051 simplify_gen_binary (XOR, mode, op1,
2052 XEXP (op0, 1)));
2054 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2055 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2056 && GET_CODE (op0) == MULT
2057 && GET_CODE (XEXP (op0, 0)) == NEG)
2059 rtx in1, in2;
2061 in1 = XEXP (XEXP (op0, 0), 0);
2062 in2 = XEXP (op0, 1);
2063 return simplify_gen_binary (MINUS, mode, op1,
2064 simplify_gen_binary (MULT, mode,
2065 in1, in2));
2068 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2069 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2070 is 1. */
2071 if (COMPARISON_P (op0)
2072 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2073 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2074 && (reversed = reversed_comparison (op0, mode)))
2075 return
2076 simplify_gen_unary (NEG, mode, reversed, mode);
2078 /* If one of the operands is a PLUS or a MINUS, see if we can
2079 simplify this by the associative law.
2080 Don't use the associative law for floating point.
2081 The inaccuracy makes it nonassociative,
2082 and subtle programs can break if operations are associated. */
2084 if (INTEGRAL_MODE_P (mode)
2085 && (plus_minus_operand_p (op0)
2086 || plus_minus_operand_p (op1))
2087 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2088 return tem;
2090 /* Reassociate floating point addition only when the user
2091 specifies associative math operations. */
2092 if (FLOAT_MODE_P (mode)
2093 && flag_associative_math)
2095 tem = simplify_associative_operation (code, mode, op0, op1);
2096 if (tem)
2097 return tem;
2099 break;
2101 case COMPARE:
2102 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2103 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2104 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2105 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2107 rtx xop00 = XEXP (op0, 0);
2108 rtx xop10 = XEXP (op1, 0);
2110 #ifdef HAVE_cc0
2111 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2112 #else
2113 if (REG_P (xop00) && REG_P (xop10)
2114 && GET_MODE (xop00) == GET_MODE (xop10)
2115 && REGNO (xop00) == REGNO (xop10)
2116 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2117 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2118 #endif
2119 return xop00;
2121 break;
2123 case MINUS:
2124 /* We can't assume x-x is 0 even with non-IEEE floating point,
2125 but since it is zero except in very strange circumstances, we
2126 will treat it as zero with -ffinite-math-only. */
2127 if (rtx_equal_p (trueop0, trueop1)
2128 && ! side_effects_p (op0)
2129 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2130 return CONST0_RTX (mode);
2132 /* Change subtraction from zero into negation. (0 - x) is the
2133 same as -x when x is NaN, infinite, or finite and nonzero.
2134 But if the mode has signed zeros, and does not round towards
2135 -infinity, then 0 - 0 is 0, not -0. */
2136 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2137 return simplify_gen_unary (NEG, mode, op1, mode);
2139 /* (-1 - a) is ~a. */
2140 if (trueop0 == constm1_rtx)
2141 return simplify_gen_unary (NOT, mode, op1, mode);
2143 /* Subtracting 0 has no effect unless the mode has signed zeros
2144 and supports rounding towards -infinity. In such a case,
2145 0 - 0 is -0. */
2146 if (!(HONOR_SIGNED_ZEROS (mode)
2147 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2148 && trueop1 == CONST0_RTX (mode))
2149 return op0;
2151 /* See if this is something like X * C - X or vice versa or
2152 if the multiplication is written as a shift. If so, we can
2153 distribute and make a new multiply, shift, or maybe just
2154 have X (if C is 2 in the example above). But don't make
2155 something more expensive than we had before. */
2157 if (SCALAR_INT_MODE_P (mode))
2159 double_int coeff0, negcoeff1;
2160 rtx lhs = op0, rhs = op1;
2162 coeff0 = double_int_one;
2163 negcoeff1 = double_int_minus_one;
2165 if (GET_CODE (lhs) == NEG)
2167 coeff0 = double_int_minus_one;
2168 lhs = XEXP (lhs, 0);
2170 else if (GET_CODE (lhs) == MULT
2171 && CONST_INT_P (XEXP (lhs, 1)))
2173 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2174 lhs = XEXP (lhs, 0);
2176 else if (GET_CODE (lhs) == ASHIFT
2177 && CONST_INT_P (XEXP (lhs, 1))
2178 && INTVAL (XEXP (lhs, 1)) >= 0
2179 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2181 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2182 lhs = XEXP (lhs, 0);
2185 if (GET_CODE (rhs) == NEG)
2187 negcoeff1 = double_int_one;
2188 rhs = XEXP (rhs, 0);
2190 else if (GET_CODE (rhs) == MULT
2191 && CONST_INT_P (XEXP (rhs, 1)))
2193 negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
2194 rhs = XEXP (rhs, 0);
2196 else if (GET_CODE (rhs) == ASHIFT
2197 && CONST_INT_P (XEXP (rhs, 1))
2198 && INTVAL (XEXP (rhs, 1)) >= 0
2199 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2201 negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2202 negcoeff1 = -negcoeff1;
2203 rhs = XEXP (rhs, 0);
2206 if (rtx_equal_p (lhs, rhs))
2208 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2209 rtx coeff;
2210 double_int val;
2211 bool speed = optimize_function_for_speed_p (cfun);
2213 val = coeff0 + negcoeff1;
2214 coeff = immed_double_int_const (val, mode);
2216 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2217 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2218 ? tem : 0;
2222 /* (a - (-b)) -> (a + b). True even for IEEE. */
2223 if (GET_CODE (op1) == NEG)
2224 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2226 /* (-x - c) may be simplified as (-c - x). */
2227 if (GET_CODE (op0) == NEG
2228 && (CONST_INT_P (op1) || CONST_DOUBLE_P (op1)))
2230 tem = simplify_unary_operation (NEG, mode, op1, mode);
2231 if (tem)
2232 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2235 /* Don't let a relocatable value get a negative coeff. */
2236 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2237 return simplify_gen_binary (PLUS, mode,
2238 op0,
2239 neg_const_int (mode, op1));
2241 /* (x - (x & y)) -> (x & ~y) */
2242 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2244 if (rtx_equal_p (op0, XEXP (op1, 0)))
2246 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2247 GET_MODE (XEXP (op1, 1)));
2248 return simplify_gen_binary (AND, mode, op0, tem);
2250 if (rtx_equal_p (op0, XEXP (op1, 1)))
2252 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2253 GET_MODE (XEXP (op1, 0)));
2254 return simplify_gen_binary (AND, mode, op0, tem);
2258 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2259 by reversing the comparison code if valid. */
2260 if (STORE_FLAG_VALUE == 1
2261 && trueop0 == const1_rtx
2262 && COMPARISON_P (op1)
2263 && (reversed = reversed_comparison (op1, mode)))
2264 return reversed;
2266 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2267 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2268 && GET_CODE (op1) == MULT
2269 && GET_CODE (XEXP (op1, 0)) == NEG)
2271 rtx in1, in2;
2273 in1 = XEXP (XEXP (op1, 0), 0);
2274 in2 = XEXP (op1, 1);
2275 return simplify_gen_binary (PLUS, mode,
2276 simplify_gen_binary (MULT, mode,
2277 in1, in2),
2278 op0);
2281 /* Canonicalize (minus (neg A) (mult B C)) to
2282 (minus (mult (neg B) C) A). */
2283 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2284 && GET_CODE (op1) == MULT
2285 && GET_CODE (op0) == NEG)
2287 rtx in1, in2;
2289 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2290 in2 = XEXP (op1, 1);
2291 return simplify_gen_binary (MINUS, mode,
2292 simplify_gen_binary (MULT, mode,
2293 in1, in2),
2294 XEXP (op0, 0));
2297 /* If one of the operands is a PLUS or a MINUS, see if we can
2298 simplify this by the associative law. This will, for example,
2299 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2300 Don't use the associative law for floating point.
2301 The inaccuracy makes it nonassociative,
2302 and subtle programs can break if operations are associated. */
2304 if (INTEGRAL_MODE_P (mode)
2305 && (plus_minus_operand_p (op0)
2306 || plus_minus_operand_p (op1))
2307 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2308 return tem;
2309 break;
2311 case MULT:
2312 if (trueop1 == constm1_rtx)
2313 return simplify_gen_unary (NEG, mode, op0, mode);
2315 if (GET_CODE (op0) == NEG)
2317 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2318 /* If op1 is a MULT as well and simplify_unary_operation
2319 just moved the NEG to the second operand, simplify_gen_binary
2320 below could through simplify_associative_operation move
2321 the NEG around again and recurse endlessly. */
2322 if (temp
2323 && GET_CODE (op1) == MULT
2324 && GET_CODE (temp) == MULT
2325 && XEXP (op1, 0) == XEXP (temp, 0)
2326 && GET_CODE (XEXP (temp, 1)) == NEG
2327 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2328 temp = NULL_RTX;
2329 if (temp)
2330 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2332 if (GET_CODE (op1) == NEG)
2334 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2335 /* If op0 is a MULT as well and simplify_unary_operation
2336 just moved the NEG to the second operand, simplify_gen_binary
2337 below could through simplify_associative_operation move
2338 the NEG around again and recurse endlessly. */
2339 if (temp
2340 && GET_CODE (op0) == MULT
2341 && GET_CODE (temp) == MULT
2342 && XEXP (op0, 0) == XEXP (temp, 0)
2343 && GET_CODE (XEXP (temp, 1)) == NEG
2344 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2345 temp = NULL_RTX;
2346 if (temp)
2347 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2350 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2351 x is NaN, since x * 0 is then also NaN. Nor is it valid
2352 when the mode has signed zeros, since multiplying a negative
2353 number by 0 will give -0, not 0. */
2354 if (!HONOR_NANS (mode)
2355 && !HONOR_SIGNED_ZEROS (mode)
2356 && trueop1 == CONST0_RTX (mode)
2357 && ! side_effects_p (op0))
2358 return op1;
2360 /* In IEEE floating point, x*1 is not equivalent to x for
2361 signalling NaNs. */
2362 if (!HONOR_SNANS (mode)
2363 && trueop1 == CONST1_RTX (mode))
2364 return op0;
2366 /* Convert multiply by constant power of two into shift unless
2367 we are still generating RTL. This test is a kludge. */
2368 if (CONST_INT_P (trueop1)
2369 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2370 /* If the mode is larger than the host word size, and the
2371 uppermost bit is set, then this isn't a power of two due
2372 to implicit sign extension. */
2373 && (width <= HOST_BITS_PER_WIDE_INT
2374 || val != HOST_BITS_PER_WIDE_INT - 1))
2375 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2377 /* Likewise for multipliers wider than a word. */
2378 if (CONST_DOUBLE_AS_INT_P (trueop1)
2379 && GET_MODE (op0) == mode
2380 && CONST_DOUBLE_LOW (trueop1) == 0
2381 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2382 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2383 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2384 return simplify_gen_binary (ASHIFT, mode, op0,
2385 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2387 /* x*2 is x+x and x*(-1) is -x */
2388 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2389 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2390 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2391 && GET_MODE (op0) == mode)
2393 REAL_VALUE_TYPE d;
2394 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2396 if (REAL_VALUES_EQUAL (d, dconst2))
2397 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2399 if (!HONOR_SNANS (mode)
2400 && REAL_VALUES_EQUAL (d, dconstm1))
2401 return simplify_gen_unary (NEG, mode, op0, mode);
2404 /* Optimize -x * -x as x * x. */
2405 if (FLOAT_MODE_P (mode)
2406 && GET_CODE (op0) == NEG
2407 && GET_CODE (op1) == NEG
2408 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2409 && !side_effects_p (XEXP (op0, 0)))
2410 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2412 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2413 if (SCALAR_FLOAT_MODE_P (mode)
2414 && GET_CODE (op0) == ABS
2415 && GET_CODE (op1) == ABS
2416 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2417 && !side_effects_p (XEXP (op0, 0)))
2418 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2420 /* Reassociate multiplication, but for floating point MULTs
2421 only when the user specifies unsafe math optimizations. */
2422 if (! FLOAT_MODE_P (mode)
2423 || flag_unsafe_math_optimizations)
2425 tem = simplify_associative_operation (code, mode, op0, op1);
2426 if (tem)
2427 return tem;
2429 break;
2431 case IOR:
2432 if (trueop1 == CONST0_RTX (mode))
2433 return op0;
2434 if (INTEGRAL_MODE_P (mode)
2435 && trueop1 == CONSTM1_RTX (mode)
2436 && !side_effects_p (op0))
2437 return op1;
2438 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2439 return op0;
2440 /* A | (~A) -> -1 */
2441 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2442 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2443 && ! side_effects_p (op0)
2444 && SCALAR_INT_MODE_P (mode))
2445 return constm1_rtx;
2447 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2448 if (CONST_INT_P (op1)
2449 && HWI_COMPUTABLE_MODE_P (mode)
2450 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2451 && !side_effects_p (op0))
2452 return op1;
2454 /* Canonicalize (X & C1) | C2. */
2455 if (GET_CODE (op0) == AND
2456 && CONST_INT_P (trueop1)
2457 && CONST_INT_P (XEXP (op0, 1)))
2459 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2460 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2461 HOST_WIDE_INT c2 = INTVAL (trueop1);
2463 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2464 if ((c1 & c2) == c1
2465 && !side_effects_p (XEXP (op0, 0)))
2466 return trueop1;
2468 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2469 if (((c1|c2) & mask) == mask)
2470 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2472 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2473 if (((c1 & ~c2) & mask) != (c1 & mask))
2475 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2476 gen_int_mode (c1 & ~c2, mode));
2477 return simplify_gen_binary (IOR, mode, tem, op1);
2481 /* Convert (A & B) | A to A. */
2482 if (GET_CODE (op0) == AND
2483 && (rtx_equal_p (XEXP (op0, 0), op1)
2484 || rtx_equal_p (XEXP (op0, 1), op1))
2485 && ! side_effects_p (XEXP (op0, 0))
2486 && ! side_effects_p (XEXP (op0, 1)))
2487 return op1;
2489 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2490 mode size to (rotate A CX). */
2492 if (GET_CODE (op1) == ASHIFT
2493 || GET_CODE (op1) == SUBREG)
2495 opleft = op1;
2496 opright = op0;
2498 else
2500 opright = op1;
2501 opleft = op0;
2504 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2505 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2506 && CONST_INT_P (XEXP (opleft, 1))
2507 && CONST_INT_P (XEXP (opright, 1))
2508 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2509 == GET_MODE_PRECISION (mode)))
2510 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2512 /* Same, but for ashift that has been "simplified" to a wider mode
2513 by simplify_shift_const. */
2515 if (GET_CODE (opleft) == SUBREG
2516 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2517 && GET_CODE (opright) == LSHIFTRT
2518 && GET_CODE (XEXP (opright, 0)) == SUBREG
2519 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2520 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2521 && (GET_MODE_SIZE (GET_MODE (opleft))
2522 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2523 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2524 SUBREG_REG (XEXP (opright, 0)))
2525 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2526 && CONST_INT_P (XEXP (opright, 1))
2527 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2528 == GET_MODE_PRECISION (mode)))
2529 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2530 XEXP (SUBREG_REG (opleft), 1));
2532 /* If we have (ior (and (X C1) C2)), simplify this by making
2533 C1 as small as possible if C1 actually changes. */
2534 if (CONST_INT_P (op1)
2535 && (HWI_COMPUTABLE_MODE_P (mode)
2536 || INTVAL (op1) > 0)
2537 && GET_CODE (op0) == AND
2538 && CONST_INT_P (XEXP (op0, 1))
2539 && CONST_INT_P (op1)
2540 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2541 return simplify_gen_binary (IOR, mode,
2542 simplify_gen_binary
2543 (AND, mode, XEXP (op0, 0),
2544 GEN_INT (UINTVAL (XEXP (op0, 1))
2545 & ~UINTVAL (op1))),
2546 op1);
2548 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2549 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2550 the PLUS does not affect any of the bits in OP1: then we can do
2551 the IOR as a PLUS and we can associate. This is valid if OP1
2552 can be safely shifted left C bits. */
2553 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2554 && GET_CODE (XEXP (op0, 0)) == PLUS
2555 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2556 && CONST_INT_P (XEXP (op0, 1))
2557 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2559 int count = INTVAL (XEXP (op0, 1));
2560 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2562 if (mask >> count == INTVAL (trueop1)
2563 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2564 return simplify_gen_binary (ASHIFTRT, mode,
2565 plus_constant (mode, XEXP (op0, 0),
2566 mask),
2567 XEXP (op0, 1));
2570 tem = simplify_associative_operation (code, mode, op0, op1);
2571 if (tem)
2572 return tem;
2573 break;
2575 case XOR:
2576 if (trueop1 == CONST0_RTX (mode))
2577 return op0;
2578 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2579 return simplify_gen_unary (NOT, mode, op0, mode);
2580 if (rtx_equal_p (trueop0, trueop1)
2581 && ! side_effects_p (op0)
2582 && GET_MODE_CLASS (mode) != MODE_CC)
2583 return CONST0_RTX (mode);
2585 /* Canonicalize XOR of the most significant bit to PLUS. */
2586 if ((CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
2587 && mode_signbit_p (mode, op1))
2588 return simplify_gen_binary (PLUS, mode, op0, op1);
2589 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2590 if ((CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
2591 && GET_CODE (op0) == PLUS
2592 && (CONST_INT_P (XEXP (op0, 1))
2593 || CONST_DOUBLE_AS_INT_P (XEXP (op0, 1)))
2594 && mode_signbit_p (mode, XEXP (op0, 1)))
2595 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2596 simplify_gen_binary (XOR, mode, op1,
2597 XEXP (op0, 1)));
2599 /* If we are XORing two things that have no bits in common,
2600 convert them into an IOR. This helps to detect rotation encoded
2601 using those methods and possibly other simplifications. */
2603 if (HWI_COMPUTABLE_MODE_P (mode)
2604 && (nonzero_bits (op0, mode)
2605 & nonzero_bits (op1, mode)) == 0)
2606 return (simplify_gen_binary (IOR, mode, op0, op1));
2608 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2609 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2610 (NOT y). */
2612 int num_negated = 0;
2614 if (GET_CODE (op0) == NOT)
2615 num_negated++, op0 = XEXP (op0, 0);
2616 if (GET_CODE (op1) == NOT)
2617 num_negated++, op1 = XEXP (op1, 0);
2619 if (num_negated == 2)
2620 return simplify_gen_binary (XOR, mode, op0, op1);
2621 else if (num_negated == 1)
2622 return simplify_gen_unary (NOT, mode,
2623 simplify_gen_binary (XOR, mode, op0, op1),
2624 mode);
2627 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2628 correspond to a machine insn or result in further simplifications
2629 if B is a constant. */
2631 if (GET_CODE (op0) == AND
2632 && rtx_equal_p (XEXP (op0, 1), op1)
2633 && ! side_effects_p (op1))
2634 return simplify_gen_binary (AND, mode,
2635 simplify_gen_unary (NOT, mode,
2636 XEXP (op0, 0), mode),
2637 op1);
2639 else if (GET_CODE (op0) == AND
2640 && rtx_equal_p (XEXP (op0, 0), op1)
2641 && ! side_effects_p (op1))
2642 return simplify_gen_binary (AND, mode,
2643 simplify_gen_unary (NOT, mode,
2644 XEXP (op0, 1), mode),
2645 op1);
2647 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2648 we can transform like this:
2649 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2650 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2651 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2652 Attempt a few simplifications when B and C are both constants. */
2653 if (GET_CODE (op0) == AND
2654 && CONST_INT_P (op1)
2655 && CONST_INT_P (XEXP (op0, 1)))
2657 rtx a = XEXP (op0, 0);
2658 rtx b = XEXP (op0, 1);
2659 rtx c = op1;
2660 HOST_WIDE_INT bval = INTVAL (b);
2661 HOST_WIDE_INT cval = INTVAL (c);
2663 rtx na_c
2664 = simplify_binary_operation (AND, mode,
2665 simplify_gen_unary (NOT, mode, a, mode),
2667 if ((~cval & bval) == 0)
2669 /* Try to simplify ~A&C | ~B&C. */
2670 if (na_c != NULL_RTX)
2671 return simplify_gen_binary (IOR, mode, na_c,
2672 GEN_INT (~bval & cval));
2674 else
2676 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2677 if (na_c == const0_rtx)
2679 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2680 GEN_INT (~cval & bval));
2681 return simplify_gen_binary (IOR, mode, a_nc_b,
2682 GEN_INT (~bval & cval));
2687 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2688 comparison if STORE_FLAG_VALUE is 1. */
2689 if (STORE_FLAG_VALUE == 1
2690 && trueop1 == const1_rtx
2691 && COMPARISON_P (op0)
2692 && (reversed = reversed_comparison (op0, mode)))
2693 return reversed;
2695 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2696 is (lt foo (const_int 0)), so we can perform the above
2697 simplification if STORE_FLAG_VALUE is 1. */
2699 if (STORE_FLAG_VALUE == 1
2700 && trueop1 == const1_rtx
2701 && GET_CODE (op0) == LSHIFTRT
2702 && CONST_INT_P (XEXP (op0, 1))
2703 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2704 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2706 /* (xor (comparison foo bar) (const_int sign-bit))
2707 when STORE_FLAG_VALUE is the sign bit. */
2708 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2709 && trueop1 == const_true_rtx
2710 && COMPARISON_P (op0)
2711 && (reversed = reversed_comparison (op0, mode)))
2712 return reversed;
2714 tem = simplify_associative_operation (code, mode, op0, op1);
2715 if (tem)
2716 return tem;
2717 break;
2719 case AND:
2720 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2721 return trueop1;
2722 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2723 return op0;
2724 if (HWI_COMPUTABLE_MODE_P (mode))
2726 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2727 HOST_WIDE_INT nzop1;
2728 if (CONST_INT_P (trueop1))
2730 HOST_WIDE_INT val1 = INTVAL (trueop1);
2731 /* If we are turning off bits already known off in OP0, we need
2732 not do an AND. */
2733 if ((nzop0 & ~val1) == 0)
2734 return op0;
2736 nzop1 = nonzero_bits (trueop1, mode);
2737 /* If we are clearing all the nonzero bits, the result is zero. */
2738 if ((nzop1 & nzop0) == 0
2739 && !side_effects_p (op0) && !side_effects_p (op1))
2740 return CONST0_RTX (mode);
2742 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2743 && GET_MODE_CLASS (mode) != MODE_CC)
2744 return op0;
2745 /* A & (~A) -> 0 */
2746 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2747 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2748 && ! side_effects_p (op0)
2749 && GET_MODE_CLASS (mode) != MODE_CC)
2750 return CONST0_RTX (mode);
2752 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2753 there are no nonzero bits of C outside of X's mode. */
2754 if ((GET_CODE (op0) == SIGN_EXTEND
2755 || GET_CODE (op0) == ZERO_EXTEND)
2756 && CONST_INT_P (trueop1)
2757 && HWI_COMPUTABLE_MODE_P (mode)
2758 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2759 & UINTVAL (trueop1)) == 0)
2761 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2762 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2763 gen_int_mode (INTVAL (trueop1),
2764 imode));
2765 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2768 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2769 we might be able to further simplify the AND with X and potentially
2770 remove the truncation altogether. */
2771 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2773 rtx x = XEXP (op0, 0);
2774 enum machine_mode xmode = GET_MODE (x);
2775 tem = simplify_gen_binary (AND, xmode, x,
2776 gen_int_mode (INTVAL (trueop1), xmode));
2777 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2780 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2781 if (GET_CODE (op0) == IOR
2782 && CONST_INT_P (trueop1)
2783 && CONST_INT_P (XEXP (op0, 1)))
2785 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2786 return simplify_gen_binary (IOR, mode,
2787 simplify_gen_binary (AND, mode,
2788 XEXP (op0, 0), op1),
2789 gen_int_mode (tmp, mode));
2792 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2793 insn (and may simplify more). */
2794 if (GET_CODE (op0) == XOR
2795 && rtx_equal_p (XEXP (op0, 0), op1)
2796 && ! side_effects_p (op1))
2797 return simplify_gen_binary (AND, mode,
2798 simplify_gen_unary (NOT, mode,
2799 XEXP (op0, 1), mode),
2800 op1);
2802 if (GET_CODE (op0) == XOR
2803 && rtx_equal_p (XEXP (op0, 1), op1)
2804 && ! side_effects_p (op1))
2805 return simplify_gen_binary (AND, mode,
2806 simplify_gen_unary (NOT, mode,
2807 XEXP (op0, 0), mode),
2808 op1);
2810 /* Similarly for (~(A ^ B)) & A. */
2811 if (GET_CODE (op0) == NOT
2812 && GET_CODE (XEXP (op0, 0)) == XOR
2813 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2814 && ! side_effects_p (op1))
2815 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2817 if (GET_CODE (op0) == NOT
2818 && GET_CODE (XEXP (op0, 0)) == XOR
2819 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2820 && ! side_effects_p (op1))
2821 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2823 /* Convert (A | B) & A to A. */
2824 if (GET_CODE (op0) == IOR
2825 && (rtx_equal_p (XEXP (op0, 0), op1)
2826 || rtx_equal_p (XEXP (op0, 1), op1))
2827 && ! side_effects_p (XEXP (op0, 0))
2828 && ! side_effects_p (XEXP (op0, 1)))
2829 return op1;
2831 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2832 ((A & N) + B) & M -> (A + B) & M
2833 Similarly if (N & M) == 0,
2834 ((A | N) + B) & M -> (A + B) & M
2835 and for - instead of + and/or ^ instead of |.
2836 Also, if (N & M) == 0, then
2837 (A +- N) & M -> A & M. */
2838 if (CONST_INT_P (trueop1)
2839 && HWI_COMPUTABLE_MODE_P (mode)
2840 && ~UINTVAL (trueop1)
2841 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2842 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2844 rtx pmop[2];
2845 int which;
2847 pmop[0] = XEXP (op0, 0);
2848 pmop[1] = XEXP (op0, 1);
2850 if (CONST_INT_P (pmop[1])
2851 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2852 return simplify_gen_binary (AND, mode, pmop[0], op1);
2854 for (which = 0; which < 2; which++)
2856 tem = pmop[which];
2857 switch (GET_CODE (tem))
2859 case AND:
2860 if (CONST_INT_P (XEXP (tem, 1))
2861 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2862 == UINTVAL (trueop1))
2863 pmop[which] = XEXP (tem, 0);
2864 break;
2865 case IOR:
2866 case XOR:
2867 if (CONST_INT_P (XEXP (tem, 1))
2868 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2869 pmop[which] = XEXP (tem, 0);
2870 break;
2871 default:
2872 break;
2876 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2878 tem = simplify_gen_binary (GET_CODE (op0), mode,
2879 pmop[0], pmop[1]);
2880 return simplify_gen_binary (code, mode, tem, op1);
2884 /* (and X (ior (not X) Y) -> (and X Y) */
2885 if (GET_CODE (op1) == IOR
2886 && GET_CODE (XEXP (op1, 0)) == NOT
2887 && op0 == XEXP (XEXP (op1, 0), 0))
2888 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2890 /* (and (ior (not X) Y) X) -> (and X Y) */
2891 if (GET_CODE (op0) == IOR
2892 && GET_CODE (XEXP (op0, 0)) == NOT
2893 && op1 == XEXP (XEXP (op0, 0), 0))
2894 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2896 tem = simplify_associative_operation (code, mode, op0, op1);
2897 if (tem)
2898 return tem;
2899 break;
2901 case UDIV:
2902 /* 0/x is 0 (or x&0 if x has side-effects). */
2903 if (trueop0 == CONST0_RTX (mode))
2905 if (side_effects_p (op1))
2906 return simplify_gen_binary (AND, mode, op1, trueop0);
2907 return trueop0;
2909 /* x/1 is x. */
2910 if (trueop1 == CONST1_RTX (mode))
2911 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2912 /* Convert divide by power of two into shift. */
2913 if (CONST_INT_P (trueop1)
2914 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2915 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2916 break;
2918 case DIV:
2919 /* Handle floating point and integers separately. */
2920 if (SCALAR_FLOAT_MODE_P (mode))
2922 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2923 safe for modes with NaNs, since 0.0 / 0.0 will then be
2924 NaN rather than 0.0. Nor is it safe for modes with signed
2925 zeros, since dividing 0 by a negative number gives -0.0 */
2926 if (trueop0 == CONST0_RTX (mode)
2927 && !HONOR_NANS (mode)
2928 && !HONOR_SIGNED_ZEROS (mode)
2929 && ! side_effects_p (op1))
2930 return op0;
2931 /* x/1.0 is x. */
2932 if (trueop1 == CONST1_RTX (mode)
2933 && !HONOR_SNANS (mode))
2934 return op0;
2936 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2937 && trueop1 != CONST0_RTX (mode))
2939 REAL_VALUE_TYPE d;
2940 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2942 /* x/-1.0 is -x. */
2943 if (REAL_VALUES_EQUAL (d, dconstm1)
2944 && !HONOR_SNANS (mode))
2945 return simplify_gen_unary (NEG, mode, op0, mode);
2947 /* Change FP division by a constant into multiplication.
2948 Only do this with -freciprocal-math. */
2949 if (flag_reciprocal_math
2950 && !REAL_VALUES_EQUAL (d, dconst0))
2952 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2953 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2954 return simplify_gen_binary (MULT, mode, op0, tem);
2958 else if (SCALAR_INT_MODE_P (mode))
2960 /* 0/x is 0 (or x&0 if x has side-effects). */
2961 if (trueop0 == CONST0_RTX (mode)
2962 && !cfun->can_throw_non_call_exceptions)
2964 if (side_effects_p (op1))
2965 return simplify_gen_binary (AND, mode, op1, trueop0);
2966 return trueop0;
2968 /* x/1 is x. */
2969 if (trueop1 == CONST1_RTX (mode))
2970 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2971 /* x/-1 is -x. */
2972 if (trueop1 == constm1_rtx)
2974 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2975 return simplify_gen_unary (NEG, mode, x, mode);
2978 break;
2980 case UMOD:
2981 /* 0%x is 0 (or x&0 if x has side-effects). */
2982 if (trueop0 == CONST0_RTX (mode))
2984 if (side_effects_p (op1))
2985 return simplify_gen_binary (AND, mode, op1, trueop0);
2986 return trueop0;
2988 /* x%1 is 0 (of x&0 if x has side-effects). */
2989 if (trueop1 == CONST1_RTX (mode))
2991 if (side_effects_p (op0))
2992 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2993 return CONST0_RTX (mode);
2995 /* Implement modulus by power of two as AND. */
2996 if (CONST_INT_P (trueop1)
2997 && exact_log2 (UINTVAL (trueop1)) > 0)
2998 return simplify_gen_binary (AND, mode, op0,
2999 GEN_INT (INTVAL (op1) - 1));
3000 break;
3002 case MOD:
3003 /* 0%x is 0 (or x&0 if x has side-effects). */
3004 if (trueop0 == CONST0_RTX (mode))
3006 if (side_effects_p (op1))
3007 return simplify_gen_binary (AND, mode, op1, trueop0);
3008 return trueop0;
3010 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3011 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3013 if (side_effects_p (op0))
3014 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3015 return CONST0_RTX (mode);
3017 break;
3019 case ROTATERT:
3020 case ROTATE:
3021 case ASHIFTRT:
3022 if (trueop1 == CONST0_RTX (mode))
3023 return op0;
3024 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3025 return op0;
3026 /* Rotating ~0 always results in ~0. */
3027 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3028 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3029 && ! side_effects_p (op1))
3030 return op0;
3031 canonicalize_shift:
3032 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3034 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3035 if (val != INTVAL (op1))
3036 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3038 break;
3040 case ASHIFT:
3041 case SS_ASHIFT:
3042 case US_ASHIFT:
3043 if (trueop1 == CONST0_RTX (mode))
3044 return op0;
3045 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3046 return op0;
3047 goto canonicalize_shift;
3049 case LSHIFTRT:
3050 if (trueop1 == CONST0_RTX (mode))
3051 return op0;
3052 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3053 return op0;
3054 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3055 if (GET_CODE (op0) == CLZ
3056 && CONST_INT_P (trueop1)
3057 && STORE_FLAG_VALUE == 1
3058 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3060 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3061 unsigned HOST_WIDE_INT zero_val = 0;
3063 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3064 && zero_val == GET_MODE_PRECISION (imode)
3065 && INTVAL (trueop1) == exact_log2 (zero_val))
3066 return simplify_gen_relational (EQ, mode, imode,
3067 XEXP (op0, 0), const0_rtx);
3069 goto canonicalize_shift;
3071 case SMIN:
3072 if (width <= HOST_BITS_PER_WIDE_INT
3073 && mode_signbit_p (mode, trueop1)
3074 && ! side_effects_p (op0))
3075 return op1;
3076 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3077 return op0;
3078 tem = simplify_associative_operation (code, mode, op0, op1);
3079 if (tem)
3080 return tem;
3081 break;
3083 case SMAX:
3084 if (width <= HOST_BITS_PER_WIDE_INT
3085 && CONST_INT_P (trueop1)
3086 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3087 && ! side_effects_p (op0))
3088 return op1;
3089 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3090 return op0;
3091 tem = simplify_associative_operation (code, mode, op0, op1);
3092 if (tem)
3093 return tem;
3094 break;
3096 case UMIN:
3097 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3098 return op1;
3099 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3100 return op0;
3101 tem = simplify_associative_operation (code, mode, op0, op1);
3102 if (tem)
3103 return tem;
3104 break;
3106 case UMAX:
3107 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3108 return op1;
3109 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3110 return op0;
3111 tem = simplify_associative_operation (code, mode, op0, op1);
3112 if (tem)
3113 return tem;
3114 break;
3116 case SS_PLUS:
3117 case US_PLUS:
3118 case SS_MINUS:
3119 case US_MINUS:
3120 case SS_MULT:
3121 case US_MULT:
3122 case SS_DIV:
3123 case US_DIV:
3124 /* ??? There are simplifications that can be done. */
3125 return 0;
3127 case VEC_SELECT:
3128 if (!VECTOR_MODE_P (mode))
3130 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3131 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3132 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3133 gcc_assert (XVECLEN (trueop1, 0) == 1);
3134 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3136 if (GET_CODE (trueop0) == CONST_VECTOR)
3137 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3138 (trueop1, 0, 0)));
3140 /* Extract a scalar element from a nested VEC_SELECT expression
3141 (with optional nested VEC_CONCAT expression). Some targets
3142 (i386) extract scalar element from a vector using chain of
3143 nested VEC_SELECT expressions. When input operand is a memory
3144 operand, this operation can be simplified to a simple scalar
3145 load from an offseted memory address. */
3146 if (GET_CODE (trueop0) == VEC_SELECT)
3148 rtx op0 = XEXP (trueop0, 0);
3149 rtx op1 = XEXP (trueop0, 1);
3151 enum machine_mode opmode = GET_MODE (op0);
3152 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3153 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3155 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3156 int elem;
3158 rtvec vec;
3159 rtx tmp_op, tmp;
3161 gcc_assert (GET_CODE (op1) == PARALLEL);
3162 gcc_assert (i < n_elts);
3164 /* Select element, pointed by nested selector. */
3165 elem = INTVAL (XVECEXP (op1, 0, i));
3167 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3168 if (GET_CODE (op0) == VEC_CONCAT)
3170 rtx op00 = XEXP (op0, 0);
3171 rtx op01 = XEXP (op0, 1);
3173 enum machine_mode mode00, mode01;
3174 int n_elts00, n_elts01;
3176 mode00 = GET_MODE (op00);
3177 mode01 = GET_MODE (op01);
3179 /* Find out number of elements of each operand. */
3180 if (VECTOR_MODE_P (mode00))
3182 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3183 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3185 else
3186 n_elts00 = 1;
3188 if (VECTOR_MODE_P (mode01))
3190 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3191 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3193 else
3194 n_elts01 = 1;
3196 gcc_assert (n_elts == n_elts00 + n_elts01);
3198 /* Select correct operand of VEC_CONCAT
3199 and adjust selector. */
3200 if (elem < n_elts01)
3201 tmp_op = op00;
3202 else
3204 tmp_op = op01;
3205 elem -= n_elts00;
3208 else
3209 tmp_op = op0;
3211 vec = rtvec_alloc (1);
3212 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3214 tmp = gen_rtx_fmt_ee (code, mode,
3215 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3216 return tmp;
3218 if (GET_CODE (trueop0) == VEC_DUPLICATE
3219 && GET_MODE (XEXP (trueop0, 0)) == mode)
3220 return XEXP (trueop0, 0);
3222 else
3224 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3225 gcc_assert (GET_MODE_INNER (mode)
3226 == GET_MODE_INNER (GET_MODE (trueop0)));
3227 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3229 if (GET_CODE (trueop0) == CONST_VECTOR)
3231 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3232 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3233 rtvec v = rtvec_alloc (n_elts);
3234 unsigned int i;
3236 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3237 for (i = 0; i < n_elts; i++)
3239 rtx x = XVECEXP (trueop1, 0, i);
3241 gcc_assert (CONST_INT_P (x));
3242 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3243 INTVAL (x));
3246 return gen_rtx_CONST_VECTOR (mode, v);
3249 /* Recognize the identity. */
3250 if (GET_MODE (trueop0) == mode)
3252 bool maybe_ident = true;
3253 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3255 rtx j = XVECEXP (trueop1, 0, i);
3256 if (!CONST_INT_P (j) || INTVAL (j) != i)
3258 maybe_ident = false;
3259 break;
3262 if (maybe_ident)
3263 return trueop0;
3266 /* If we build {a,b} then permute it, build the result directly. */
3267 if (XVECLEN (trueop1, 0) == 2
3268 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3269 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3270 && GET_CODE (trueop0) == VEC_CONCAT
3271 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3272 && GET_MODE (XEXP (trueop0, 0)) == mode
3273 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3274 && GET_MODE (XEXP (trueop0, 1)) == mode)
3276 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3277 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3278 rtx subop0, subop1;
3280 gcc_assert (i0 < 4 && i1 < 4);
3281 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3282 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3284 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3287 if (XVECLEN (trueop1, 0) == 2
3288 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3289 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3290 && GET_CODE (trueop0) == VEC_CONCAT
3291 && GET_MODE (trueop0) == mode)
3293 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3294 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3295 rtx subop0, subop1;
3297 gcc_assert (i0 < 2 && i1 < 2);
3298 subop0 = XEXP (trueop0, i0);
3299 subop1 = XEXP (trueop0, i1);
3301 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3305 if (XVECLEN (trueop1, 0) == 1
3306 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3307 && GET_CODE (trueop0) == VEC_CONCAT)
3309 rtx vec = trueop0;
3310 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3312 /* Try to find the element in the VEC_CONCAT. */
3313 while (GET_MODE (vec) != mode
3314 && GET_CODE (vec) == VEC_CONCAT)
3316 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3317 if (offset < vec_size)
3318 vec = XEXP (vec, 0);
3319 else
3321 offset -= vec_size;
3322 vec = XEXP (vec, 1);
3324 vec = avoid_constant_pool_reference (vec);
3327 if (GET_MODE (vec) == mode)
3328 return vec;
3331 return 0;
3332 case VEC_CONCAT:
3334 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3335 ? GET_MODE (trueop0)
3336 : GET_MODE_INNER (mode));
3337 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3338 ? GET_MODE (trueop1)
3339 : GET_MODE_INNER (mode));
3341 gcc_assert (VECTOR_MODE_P (mode));
3342 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3343 == GET_MODE_SIZE (mode));
3345 if (VECTOR_MODE_P (op0_mode))
3346 gcc_assert (GET_MODE_INNER (mode)
3347 == GET_MODE_INNER (op0_mode));
3348 else
3349 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3351 if (VECTOR_MODE_P (op1_mode))
3352 gcc_assert (GET_MODE_INNER (mode)
3353 == GET_MODE_INNER (op1_mode));
3354 else
3355 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3357 if ((GET_CODE (trueop0) == CONST_VECTOR
3358 || CONST_INT_P (trueop0) || CONST_DOUBLE_P (trueop0))
3359 && (GET_CODE (trueop1) == CONST_VECTOR
3360 || CONST_INT_P (trueop1) || CONST_DOUBLE_P (trueop1)))
3362 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3363 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3364 rtvec v = rtvec_alloc (n_elts);
3365 unsigned int i;
3366 unsigned in_n_elts = 1;
3368 if (VECTOR_MODE_P (op0_mode))
3369 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3370 for (i = 0; i < n_elts; i++)
3372 if (i < in_n_elts)
3374 if (!VECTOR_MODE_P (op0_mode))
3375 RTVEC_ELT (v, i) = trueop0;
3376 else
3377 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3379 else
3381 if (!VECTOR_MODE_P (op1_mode))
3382 RTVEC_ELT (v, i) = trueop1;
3383 else
3384 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3385 i - in_n_elts);
3389 return gen_rtx_CONST_VECTOR (mode, v);
3392 /* Try to merge VEC_SELECTs from the same vector into a single one. */
3393 if (GET_CODE (trueop0) == VEC_SELECT
3394 && GET_CODE (trueop1) == VEC_SELECT
3395 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0)))
3397 rtx par0 = XEXP (trueop0, 1);
3398 rtx par1 = XEXP (trueop1, 1);
3399 int len0 = XVECLEN (par0, 0);
3400 int len1 = XVECLEN (par1, 0);
3401 rtvec vec = rtvec_alloc (len0 + len1);
3402 for (int i = 0; i < len0; i++)
3403 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3404 for (int i = 0; i < len1; i++)
3405 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3406 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3407 gen_rtx_PARALLEL (VOIDmode, vec));
3410 return 0;
3412 default:
3413 gcc_unreachable ();
3416 return 0;
3420 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3421 rtx op0, rtx op1)
3423 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3424 HOST_WIDE_INT val;
3425 unsigned int width = GET_MODE_PRECISION (mode);
3427 if (VECTOR_MODE_P (mode)
3428 && code != VEC_CONCAT
3429 && GET_CODE (op0) == CONST_VECTOR
3430 && GET_CODE (op1) == CONST_VECTOR)
3432 unsigned n_elts = GET_MODE_NUNITS (mode);
3433 enum machine_mode op0mode = GET_MODE (op0);
3434 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3435 enum machine_mode op1mode = GET_MODE (op1);
3436 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3437 rtvec v = rtvec_alloc (n_elts);
3438 unsigned int i;
3440 gcc_assert (op0_n_elts == n_elts);
3441 gcc_assert (op1_n_elts == n_elts);
3442 for (i = 0; i < n_elts; i++)
3444 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3445 CONST_VECTOR_ELT (op0, i),
3446 CONST_VECTOR_ELT (op1, i));
3447 if (!x)
3448 return 0;
3449 RTVEC_ELT (v, i) = x;
3452 return gen_rtx_CONST_VECTOR (mode, v);
3455 if (VECTOR_MODE_P (mode)
3456 && code == VEC_CONCAT
3457 && (CONST_INT_P (op0)
3458 || GET_CODE (op0) == CONST_FIXED
3459 || CONST_DOUBLE_P (op0))
3460 && (CONST_INT_P (op1)
3461 || CONST_DOUBLE_P (op1)
3462 || GET_CODE (op1) == CONST_FIXED))
3464 unsigned n_elts = GET_MODE_NUNITS (mode);
3465 rtvec v = rtvec_alloc (n_elts);
3467 gcc_assert (n_elts >= 2);
3468 if (n_elts == 2)
3470 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3471 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3473 RTVEC_ELT (v, 0) = op0;
3474 RTVEC_ELT (v, 1) = op1;
3476 else
3478 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3479 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3480 unsigned i;
3482 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3483 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3484 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3486 for (i = 0; i < op0_n_elts; ++i)
3487 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3488 for (i = 0; i < op1_n_elts; ++i)
3489 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3492 return gen_rtx_CONST_VECTOR (mode, v);
3495 if (SCALAR_FLOAT_MODE_P (mode)
3496 && CONST_DOUBLE_AS_FLOAT_P (op0)
3497 && CONST_DOUBLE_AS_FLOAT_P (op1)
3498 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3500 if (code == AND
3501 || code == IOR
3502 || code == XOR)
3504 long tmp0[4];
3505 long tmp1[4];
3506 REAL_VALUE_TYPE r;
3507 int i;
3509 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3510 GET_MODE (op0));
3511 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3512 GET_MODE (op1));
3513 for (i = 0; i < 4; i++)
3515 switch (code)
3517 case AND:
3518 tmp0[i] &= tmp1[i];
3519 break;
3520 case IOR:
3521 tmp0[i] |= tmp1[i];
3522 break;
3523 case XOR:
3524 tmp0[i] ^= tmp1[i];
3525 break;
3526 default:
3527 gcc_unreachable ();
3530 real_from_target (&r, tmp0, mode);
3531 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3533 else
3535 REAL_VALUE_TYPE f0, f1, value, result;
3536 bool inexact;
3538 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3539 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3540 real_convert (&f0, mode, &f0);
3541 real_convert (&f1, mode, &f1);
3543 if (HONOR_SNANS (mode)
3544 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3545 return 0;
3547 if (code == DIV
3548 && REAL_VALUES_EQUAL (f1, dconst0)
3549 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3550 return 0;
3552 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3553 && flag_trapping_math
3554 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3556 int s0 = REAL_VALUE_NEGATIVE (f0);
3557 int s1 = REAL_VALUE_NEGATIVE (f1);
3559 switch (code)
3561 case PLUS:
3562 /* Inf + -Inf = NaN plus exception. */
3563 if (s0 != s1)
3564 return 0;
3565 break;
3566 case MINUS:
3567 /* Inf - Inf = NaN plus exception. */
3568 if (s0 == s1)
3569 return 0;
3570 break;
3571 case DIV:
3572 /* Inf / Inf = NaN plus exception. */
3573 return 0;
3574 default:
3575 break;
3579 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3580 && flag_trapping_math
3581 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3582 || (REAL_VALUE_ISINF (f1)
3583 && REAL_VALUES_EQUAL (f0, dconst0))))
3584 /* Inf * 0 = NaN plus exception. */
3585 return 0;
3587 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3588 &f0, &f1);
3589 real_convert (&result, mode, &value);
3591 /* Don't constant fold this floating point operation if
3592 the result has overflowed and flag_trapping_math. */
3594 if (flag_trapping_math
3595 && MODE_HAS_INFINITIES (mode)
3596 && REAL_VALUE_ISINF (result)
3597 && !REAL_VALUE_ISINF (f0)
3598 && !REAL_VALUE_ISINF (f1))
3599 /* Overflow plus exception. */
3600 return 0;
3602 /* Don't constant fold this floating point operation if the
3603 result may dependent upon the run-time rounding mode and
3604 flag_rounding_math is set, or if GCC's software emulation
3605 is unable to accurately represent the result. */
3607 if ((flag_rounding_math
3608 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3609 && (inexact || !real_identical (&result, &value)))
3610 return NULL_RTX;
3612 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3616 /* We can fold some multi-word operations. */
3617 if (GET_MODE_CLASS (mode) == MODE_INT
3618 && width == HOST_BITS_PER_DOUBLE_INT
3619 && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3620 && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3622 double_int o0, o1, res, tmp;
3623 bool overflow;
3625 o0 = rtx_to_double_int (op0);
3626 o1 = rtx_to_double_int (op1);
3628 switch (code)
3630 case MINUS:
3631 /* A - B == A + (-B). */
3632 o1 = -o1;
3634 /* Fall through.... */
3636 case PLUS:
3637 res = o0 + o1;
3638 break;
3640 case MULT:
3641 res = o0 * o1;
3642 break;
3644 case DIV:
3645 res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3646 &tmp, &overflow);
3647 if (overflow)
3648 return 0;
3649 break;
3651 case MOD:
3652 tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3653 &res, &overflow);
3654 if (overflow)
3655 return 0;
3656 break;
3658 case UDIV:
3659 res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3660 &tmp, &overflow);
3661 if (overflow)
3662 return 0;
3663 break;
3665 case UMOD:
3666 tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3667 &res, &overflow);
3668 if (overflow)
3669 return 0;
3670 break;
3672 case AND:
3673 res = o0 & o1;
3674 break;
3676 case IOR:
3677 res = o0 | o1;
3678 break;
3680 case XOR:
3681 res = o0 ^ o1;
3682 break;
3684 case SMIN:
3685 res = o0.smin (o1);
3686 break;
3688 case SMAX:
3689 res = o0.smax (o1);
3690 break;
3692 case UMIN:
3693 res = o0.umin (o1);
3694 break;
3696 case UMAX:
3697 res = o0.umax (o1);
3698 break;
3700 case LSHIFTRT: case ASHIFTRT:
3701 case ASHIFT:
3702 case ROTATE: case ROTATERT:
3704 unsigned HOST_WIDE_INT cnt;
3706 if (SHIFT_COUNT_TRUNCATED)
3708 o1.high = 0;
3709 o1.low &= GET_MODE_PRECISION (mode) - 1;
3712 if (!o1.fits_uhwi ()
3713 || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
3714 return 0;
3716 cnt = o1.to_uhwi ();
3717 unsigned short prec = GET_MODE_PRECISION (mode);
3719 if (code == LSHIFTRT || code == ASHIFTRT)
3720 res = o0.rshift (cnt, prec, code == ASHIFTRT);
3721 else if (code == ASHIFT)
3722 res = o0.alshift (cnt, prec);
3723 else if (code == ROTATE)
3724 res = o0.lrotate (cnt, prec);
3725 else /* code == ROTATERT */
3726 res = o0.rrotate (cnt, prec);
3728 break;
3730 default:
3731 return 0;
3734 return immed_double_int_const (res, mode);
3737 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3738 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3740 /* Get the integer argument values in two forms:
3741 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3743 arg0 = INTVAL (op0);
3744 arg1 = INTVAL (op1);
3746 if (width < HOST_BITS_PER_WIDE_INT)
3748 arg0 &= GET_MODE_MASK (mode);
3749 arg1 &= GET_MODE_MASK (mode);
3751 arg0s = arg0;
3752 if (val_signbit_known_set_p (mode, arg0s))
3753 arg0s |= ~GET_MODE_MASK (mode);
3755 arg1s = arg1;
3756 if (val_signbit_known_set_p (mode, arg1s))
3757 arg1s |= ~GET_MODE_MASK (mode);
3759 else
3761 arg0s = arg0;
3762 arg1s = arg1;
3765 /* Compute the value of the arithmetic. */
3767 switch (code)
3769 case PLUS:
3770 val = arg0s + arg1s;
3771 break;
3773 case MINUS:
3774 val = arg0s - arg1s;
3775 break;
3777 case MULT:
3778 val = arg0s * arg1s;
3779 break;
3781 case DIV:
3782 if (arg1s == 0
3783 || ((unsigned HOST_WIDE_INT) arg0s
3784 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3785 && arg1s == -1))
3786 return 0;
3787 val = arg0s / arg1s;
3788 break;
3790 case MOD:
3791 if (arg1s == 0
3792 || ((unsigned HOST_WIDE_INT) arg0s
3793 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3794 && arg1s == -1))
3795 return 0;
3796 val = arg0s % arg1s;
3797 break;
3799 case UDIV:
3800 if (arg1 == 0
3801 || ((unsigned HOST_WIDE_INT) arg0s
3802 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3803 && arg1s == -1))
3804 return 0;
3805 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3806 break;
3808 case UMOD:
3809 if (arg1 == 0
3810 || ((unsigned HOST_WIDE_INT) arg0s
3811 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3812 && arg1s == -1))
3813 return 0;
3814 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3815 break;
3817 case AND:
3818 val = arg0 & arg1;
3819 break;
3821 case IOR:
3822 val = arg0 | arg1;
3823 break;
3825 case XOR:
3826 val = arg0 ^ arg1;
3827 break;
3829 case LSHIFTRT:
3830 case ASHIFT:
3831 case ASHIFTRT:
3832 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3833 the value is in range. We can't return any old value for
3834 out-of-range arguments because either the middle-end (via
3835 shift_truncation_mask) or the back-end might be relying on
3836 target-specific knowledge. Nor can we rely on
3837 shift_truncation_mask, since the shift might not be part of an
3838 ashlM3, lshrM3 or ashrM3 instruction. */
3839 if (SHIFT_COUNT_TRUNCATED)
3840 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3841 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3842 return 0;
3844 val = (code == ASHIFT
3845 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3846 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3848 /* Sign-extend the result for arithmetic right shifts. */
3849 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3850 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
3851 break;
3853 case ROTATERT:
3854 if (arg1 < 0)
3855 return 0;
3857 arg1 %= width;
3858 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3859 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3860 break;
3862 case ROTATE:
3863 if (arg1 < 0)
3864 return 0;
3866 arg1 %= width;
3867 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3868 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3869 break;
3871 case COMPARE:
3872 /* Do nothing here. */
3873 return 0;
3875 case SMIN:
3876 val = arg0s <= arg1s ? arg0s : arg1s;
3877 break;
3879 case UMIN:
3880 val = ((unsigned HOST_WIDE_INT) arg0
3881 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3882 break;
3884 case SMAX:
3885 val = arg0s > arg1s ? arg0s : arg1s;
3886 break;
3888 case UMAX:
3889 val = ((unsigned HOST_WIDE_INT) arg0
3890 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3891 break;
3893 case SS_PLUS:
3894 case US_PLUS:
3895 case SS_MINUS:
3896 case US_MINUS:
3897 case SS_MULT:
3898 case US_MULT:
3899 case SS_DIV:
3900 case US_DIV:
3901 case SS_ASHIFT:
3902 case US_ASHIFT:
3903 /* ??? There are simplifications that can be done. */
3904 return 0;
3906 default:
3907 gcc_unreachable ();
3910 return gen_int_mode (val, mode);
3913 return NULL_RTX;
3918 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3919 PLUS or MINUS.
3921 Rather than test for specific case, we do this by a brute-force method
3922 and do all possible simplifications until no more changes occur. Then
3923 we rebuild the operation. */
3925 struct simplify_plus_minus_op_data
3927 rtx op;
3928 short neg;
3931 static bool
3932 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3934 int result;
3936 result = (commutative_operand_precedence (y)
3937 - commutative_operand_precedence (x));
3938 if (result)
3939 return result > 0;
3941 /* Group together equal REGs to do more simplification. */
3942 if (REG_P (x) && REG_P (y))
3943 return REGNO (x) > REGNO (y);
3944 else
3945 return false;
3948 static rtx
3949 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3950 rtx op1)
3952 struct simplify_plus_minus_op_data ops[8];
3953 rtx result, tem;
3954 int n_ops = 2, input_ops = 2;
3955 int changed, n_constants = 0, canonicalized = 0;
3956 int i, j;
3958 memset (ops, 0, sizeof ops);
3960 /* Set up the two operands and then expand them until nothing has been
3961 changed. If we run out of room in our array, give up; this should
3962 almost never happen. */
3964 ops[0].op = op0;
3965 ops[0].neg = 0;
3966 ops[1].op = op1;
3967 ops[1].neg = (code == MINUS);
3971 changed = 0;
3973 for (i = 0; i < n_ops; i++)
3975 rtx this_op = ops[i].op;
3976 int this_neg = ops[i].neg;
3977 enum rtx_code this_code = GET_CODE (this_op);
3979 switch (this_code)
3981 case PLUS:
3982 case MINUS:
3983 if (n_ops == 7)
3984 return NULL_RTX;
3986 ops[n_ops].op = XEXP (this_op, 1);
3987 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3988 n_ops++;
3990 ops[i].op = XEXP (this_op, 0);
3991 input_ops++;
3992 changed = 1;
3993 canonicalized |= this_neg;
3994 break;
3996 case NEG:
3997 ops[i].op = XEXP (this_op, 0);
3998 ops[i].neg = ! this_neg;
3999 changed = 1;
4000 canonicalized = 1;
4001 break;
4003 case CONST:
4004 if (n_ops < 7
4005 && GET_CODE (XEXP (this_op, 0)) == PLUS
4006 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4007 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4009 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4010 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4011 ops[n_ops].neg = this_neg;
4012 n_ops++;
4013 changed = 1;
4014 canonicalized = 1;
4016 break;
4018 case NOT:
4019 /* ~a -> (-a - 1) */
4020 if (n_ops != 7)
4022 ops[n_ops].op = CONSTM1_RTX (mode);
4023 ops[n_ops++].neg = this_neg;
4024 ops[i].op = XEXP (this_op, 0);
4025 ops[i].neg = !this_neg;
4026 changed = 1;
4027 canonicalized = 1;
4029 break;
4031 case CONST_INT:
4032 n_constants++;
4033 if (this_neg)
4035 ops[i].op = neg_const_int (mode, this_op);
4036 ops[i].neg = 0;
4037 changed = 1;
4038 canonicalized = 1;
4040 break;
4042 default:
4043 break;
4047 while (changed);
4049 if (n_constants > 1)
4050 canonicalized = 1;
4052 gcc_assert (n_ops >= 2);
4054 /* If we only have two operands, we can avoid the loops. */
4055 if (n_ops == 2)
4057 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4058 rtx lhs, rhs;
4060 /* Get the two operands. Be careful with the order, especially for
4061 the cases where code == MINUS. */
4062 if (ops[0].neg && ops[1].neg)
4064 lhs = gen_rtx_NEG (mode, ops[0].op);
4065 rhs = ops[1].op;
4067 else if (ops[0].neg)
4069 lhs = ops[1].op;
4070 rhs = ops[0].op;
4072 else
4074 lhs = ops[0].op;
4075 rhs = ops[1].op;
4078 return simplify_const_binary_operation (code, mode, lhs, rhs);
4081 /* Now simplify each pair of operands until nothing changes. */
4084 /* Insertion sort is good enough for an eight-element array. */
4085 for (i = 1; i < n_ops; i++)
4087 struct simplify_plus_minus_op_data save;
4088 j = i - 1;
4089 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4090 continue;
4092 canonicalized = 1;
4093 save = ops[i];
4095 ops[j + 1] = ops[j];
4096 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4097 ops[j + 1] = save;
4100 changed = 0;
4101 for (i = n_ops - 1; i > 0; i--)
4102 for (j = i - 1; j >= 0; j--)
4104 rtx lhs = ops[j].op, rhs = ops[i].op;
4105 int lneg = ops[j].neg, rneg = ops[i].neg;
4107 if (lhs != 0 && rhs != 0)
4109 enum rtx_code ncode = PLUS;
4111 if (lneg != rneg)
4113 ncode = MINUS;
4114 if (lneg)
4115 tem = lhs, lhs = rhs, rhs = tem;
4117 else if (swap_commutative_operands_p (lhs, rhs))
4118 tem = lhs, lhs = rhs, rhs = tem;
4120 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4121 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4123 rtx tem_lhs, tem_rhs;
4125 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4126 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4127 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4129 if (tem && !CONSTANT_P (tem))
4130 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4132 else
4133 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4135 /* Reject "simplifications" that just wrap the two
4136 arguments in a CONST. Failure to do so can result
4137 in infinite recursion with simplify_binary_operation
4138 when it calls us to simplify CONST operations. */
4139 if (tem
4140 && ! (GET_CODE (tem) == CONST
4141 && GET_CODE (XEXP (tem, 0)) == ncode
4142 && XEXP (XEXP (tem, 0), 0) == lhs
4143 && XEXP (XEXP (tem, 0), 1) == rhs))
4145 lneg &= rneg;
4146 if (GET_CODE (tem) == NEG)
4147 tem = XEXP (tem, 0), lneg = !lneg;
4148 if (CONST_INT_P (tem) && lneg)
4149 tem = neg_const_int (mode, tem), lneg = 0;
4151 ops[i].op = tem;
4152 ops[i].neg = lneg;
4153 ops[j].op = NULL_RTX;
4154 changed = 1;
4155 canonicalized = 1;
4160 /* If nothing changed, fail. */
4161 if (!canonicalized)
4162 return NULL_RTX;
4164 /* Pack all the operands to the lower-numbered entries. */
4165 for (i = 0, j = 0; j < n_ops; j++)
4166 if (ops[j].op)
4168 ops[i] = ops[j];
4169 i++;
4171 n_ops = i;
4173 while (changed);
4175 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4176 if (n_ops == 2
4177 && CONST_INT_P (ops[1].op)
4178 && CONSTANT_P (ops[0].op)
4179 && ops[0].neg)
4180 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4182 /* We suppressed creation of trivial CONST expressions in the
4183 combination loop to avoid recursion. Create one manually now.
4184 The combination loop should have ensured that there is exactly
4185 one CONST_INT, and the sort will have ensured that it is last
4186 in the array and that any other constant will be next-to-last. */
4188 if (n_ops > 1
4189 && CONST_INT_P (ops[n_ops - 1].op)
4190 && CONSTANT_P (ops[n_ops - 2].op))
4192 rtx value = ops[n_ops - 1].op;
4193 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4194 value = neg_const_int (mode, value);
4195 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4196 INTVAL (value));
4197 n_ops--;
4200 /* Put a non-negated operand first, if possible. */
4202 for (i = 0; i < n_ops && ops[i].neg; i++)
4203 continue;
4204 if (i == n_ops)
4205 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4206 else if (i != 0)
4208 tem = ops[0].op;
4209 ops[0] = ops[i];
4210 ops[i].op = tem;
4211 ops[i].neg = 1;
4214 /* Now make the result by performing the requested operations. */
4215 result = ops[0].op;
4216 for (i = 1; i < n_ops; i++)
4217 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4218 mode, result, ops[i].op);
4220 return result;
4223 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4224 static bool
4225 plus_minus_operand_p (const_rtx x)
4227 return GET_CODE (x) == PLUS
4228 || GET_CODE (x) == MINUS
4229 || (GET_CODE (x) == CONST
4230 && GET_CODE (XEXP (x, 0)) == PLUS
4231 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4232 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4235 /* Like simplify_binary_operation except used for relational operators.
4236 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4237 not also be VOIDmode.
4239 CMP_MODE specifies in which mode the comparison is done in, so it is
4240 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4241 the operands or, if both are VOIDmode, the operands are compared in
4242 "infinite precision". */
4244 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4245 enum machine_mode cmp_mode, rtx op0, rtx op1)
4247 rtx tem, trueop0, trueop1;
4249 if (cmp_mode == VOIDmode)
4250 cmp_mode = GET_MODE (op0);
4251 if (cmp_mode == VOIDmode)
4252 cmp_mode = GET_MODE (op1);
4254 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4255 if (tem)
4257 if (SCALAR_FLOAT_MODE_P (mode))
4259 if (tem == const0_rtx)
4260 return CONST0_RTX (mode);
4261 #ifdef FLOAT_STORE_FLAG_VALUE
4263 REAL_VALUE_TYPE val;
4264 val = FLOAT_STORE_FLAG_VALUE (mode);
4265 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4267 #else
4268 return NULL_RTX;
4269 #endif
4271 if (VECTOR_MODE_P (mode))
4273 if (tem == const0_rtx)
4274 return CONST0_RTX (mode);
4275 #ifdef VECTOR_STORE_FLAG_VALUE
4277 int i, units;
4278 rtvec v;
4280 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4281 if (val == NULL_RTX)
4282 return NULL_RTX;
4283 if (val == const1_rtx)
4284 return CONST1_RTX (mode);
4286 units = GET_MODE_NUNITS (mode);
4287 v = rtvec_alloc (units);
4288 for (i = 0; i < units; i++)
4289 RTVEC_ELT (v, i) = val;
4290 return gen_rtx_raw_CONST_VECTOR (mode, v);
4292 #else
4293 return NULL_RTX;
4294 #endif
4297 return tem;
4300 /* For the following tests, ensure const0_rtx is op1. */
4301 if (swap_commutative_operands_p (op0, op1)
4302 || (op0 == const0_rtx && op1 != const0_rtx))
4303 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4305 /* If op0 is a compare, extract the comparison arguments from it. */
4306 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4307 return simplify_gen_relational (code, mode, VOIDmode,
4308 XEXP (op0, 0), XEXP (op0, 1));
4310 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4311 || CC0_P (op0))
4312 return NULL_RTX;
4314 trueop0 = avoid_constant_pool_reference (op0);
4315 trueop1 = avoid_constant_pool_reference (op1);
4316 return simplify_relational_operation_1 (code, mode, cmp_mode,
4317 trueop0, trueop1);
4320 /* This part of simplify_relational_operation is only used when CMP_MODE
4321 is not in class MODE_CC (i.e. it is a real comparison).
4323 MODE is the mode of the result, while CMP_MODE specifies in which
4324 mode the comparison is done in, so it is the mode of the operands. */
4326 static rtx
4327 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4328 enum machine_mode cmp_mode, rtx op0, rtx op1)
4330 enum rtx_code op0code = GET_CODE (op0);
4332 if (op1 == const0_rtx && COMPARISON_P (op0))
4334 /* If op0 is a comparison, extract the comparison arguments
4335 from it. */
4336 if (code == NE)
4338 if (GET_MODE (op0) == mode)
4339 return simplify_rtx (op0);
4340 else
4341 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4342 XEXP (op0, 0), XEXP (op0, 1));
4344 else if (code == EQ)
4346 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4347 if (new_code != UNKNOWN)
4348 return simplify_gen_relational (new_code, mode, VOIDmode,
4349 XEXP (op0, 0), XEXP (op0, 1));
4353 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4354 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4355 if ((code == LTU || code == GEU)
4356 && GET_CODE (op0) == PLUS
4357 && CONST_INT_P (XEXP (op0, 1))
4358 && (rtx_equal_p (op1, XEXP (op0, 0))
4359 || rtx_equal_p (op1, XEXP (op0, 1))))
4361 rtx new_cmp
4362 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4363 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4364 cmp_mode, XEXP (op0, 0), new_cmp);
4367 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4368 if ((code == LTU || code == GEU)
4369 && GET_CODE (op0) == PLUS
4370 && rtx_equal_p (op1, XEXP (op0, 1))
4371 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4372 && !rtx_equal_p (op1, XEXP (op0, 0)))
4373 return simplify_gen_relational (code, mode, cmp_mode, op0,
4374 copy_rtx (XEXP (op0, 0)));
4376 if (op1 == const0_rtx)
4378 /* Canonicalize (GTU x 0) as (NE x 0). */
4379 if (code == GTU)
4380 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4381 /* Canonicalize (LEU x 0) as (EQ x 0). */
4382 if (code == LEU)
4383 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4385 else if (op1 == const1_rtx)
4387 switch (code)
4389 case GE:
4390 /* Canonicalize (GE x 1) as (GT x 0). */
4391 return simplify_gen_relational (GT, mode, cmp_mode,
4392 op0, const0_rtx);
4393 case GEU:
4394 /* Canonicalize (GEU x 1) as (NE x 0). */
4395 return simplify_gen_relational (NE, mode, cmp_mode,
4396 op0, const0_rtx);
4397 case LT:
4398 /* Canonicalize (LT x 1) as (LE x 0). */
4399 return simplify_gen_relational (LE, mode, cmp_mode,
4400 op0, const0_rtx);
4401 case LTU:
4402 /* Canonicalize (LTU x 1) as (EQ x 0). */
4403 return simplify_gen_relational (EQ, mode, cmp_mode,
4404 op0, const0_rtx);
4405 default:
4406 break;
4409 else if (op1 == constm1_rtx)
4411 /* Canonicalize (LE x -1) as (LT x 0). */
4412 if (code == LE)
4413 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4414 /* Canonicalize (GT x -1) as (GE x 0). */
4415 if (code == GT)
4416 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4419 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4420 if ((code == EQ || code == NE)
4421 && (op0code == PLUS || op0code == MINUS)
4422 && CONSTANT_P (op1)
4423 && CONSTANT_P (XEXP (op0, 1))
4424 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4426 rtx x = XEXP (op0, 0);
4427 rtx c = XEXP (op0, 1);
4428 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4429 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4431 /* Detect an infinite recursive condition, where we oscillate at this
4432 simplification case between:
4433 A + B == C <---> C - B == A,
4434 where A, B, and C are all constants with non-simplifiable expressions,
4435 usually SYMBOL_REFs. */
4436 if (GET_CODE (tem) == invcode
4437 && CONSTANT_P (x)
4438 && rtx_equal_p (c, XEXP (tem, 1)))
4439 return NULL_RTX;
4441 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4444 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4445 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4446 if (code == NE
4447 && op1 == const0_rtx
4448 && GET_MODE_CLASS (mode) == MODE_INT
4449 && cmp_mode != VOIDmode
4450 /* ??? Work-around BImode bugs in the ia64 backend. */
4451 && mode != BImode
4452 && cmp_mode != BImode
4453 && nonzero_bits (op0, cmp_mode) == 1
4454 && STORE_FLAG_VALUE == 1)
4455 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4456 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4457 : lowpart_subreg (mode, op0, cmp_mode);
4459 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4460 if ((code == EQ || code == NE)
4461 && op1 == const0_rtx
4462 && op0code == XOR)
4463 return simplify_gen_relational (code, mode, cmp_mode,
4464 XEXP (op0, 0), XEXP (op0, 1));
4466 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4467 if ((code == EQ || code == NE)
4468 && op0code == XOR
4469 && rtx_equal_p (XEXP (op0, 0), op1)
4470 && !side_effects_p (XEXP (op0, 0)))
4471 return simplify_gen_relational (code, mode, cmp_mode,
4472 XEXP (op0, 1), const0_rtx);
4474 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4475 if ((code == EQ || code == NE)
4476 && op0code == XOR
4477 && rtx_equal_p (XEXP (op0, 1), op1)
4478 && !side_effects_p (XEXP (op0, 1)))
4479 return simplify_gen_relational (code, mode, cmp_mode,
4480 XEXP (op0, 0), const0_rtx);
4482 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4483 if ((code == EQ || code == NE)
4484 && op0code == XOR
4485 && (CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
4486 && (CONST_INT_P (XEXP (op0, 1))
4487 || CONST_DOUBLE_AS_INT_P (XEXP (op0, 1))))
4488 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4489 simplify_gen_binary (XOR, cmp_mode,
4490 XEXP (op0, 1), op1));
4492 if (op0code == POPCOUNT && op1 == const0_rtx)
4493 switch (code)
4495 case EQ:
4496 case LE:
4497 case LEU:
4498 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4499 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4500 XEXP (op0, 0), const0_rtx);
4502 case NE:
4503 case GT:
4504 case GTU:
4505 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4506 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4507 XEXP (op0, 0), const0_rtx);
4509 default:
4510 break;
4513 return NULL_RTX;
4516 enum
4518 CMP_EQ = 1,
4519 CMP_LT = 2,
4520 CMP_GT = 4,
4521 CMP_LTU = 8,
4522 CMP_GTU = 16
4526 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4527 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4528 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4529 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4530 For floating-point comparisons, assume that the operands were ordered. */
4532 static rtx
4533 comparison_result (enum rtx_code code, int known_results)
4535 switch (code)
4537 case EQ:
4538 case UNEQ:
4539 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4540 case NE:
4541 case LTGT:
4542 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4544 case LT:
4545 case UNLT:
4546 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4547 case GE:
4548 case UNGE:
4549 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4551 case GT:
4552 case UNGT:
4553 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4554 case LE:
4555 case UNLE:
4556 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4558 case LTU:
4559 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4560 case GEU:
4561 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4563 case GTU:
4564 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4565 case LEU:
4566 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4568 case ORDERED:
4569 return const_true_rtx;
4570 case UNORDERED:
4571 return const0_rtx;
4572 default:
4573 gcc_unreachable ();
4577 /* Check if the given comparison (done in the given MODE) is actually a
4578 tautology or a contradiction.
4579 If no simplification is possible, this function returns zero.
4580 Otherwise, it returns either const_true_rtx or const0_rtx. */
4583 simplify_const_relational_operation (enum rtx_code code,
4584 enum machine_mode mode,
4585 rtx op0, rtx op1)
4587 rtx tem;
4588 rtx trueop0;
4589 rtx trueop1;
4591 gcc_assert (mode != VOIDmode
4592 || (GET_MODE (op0) == VOIDmode
4593 && GET_MODE (op1) == VOIDmode));
4595 /* If op0 is a compare, extract the comparison arguments from it. */
4596 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4598 op1 = XEXP (op0, 1);
4599 op0 = XEXP (op0, 0);
4601 if (GET_MODE (op0) != VOIDmode)
4602 mode = GET_MODE (op0);
4603 else if (GET_MODE (op1) != VOIDmode)
4604 mode = GET_MODE (op1);
4605 else
4606 return 0;
4609 /* We can't simplify MODE_CC values since we don't know what the
4610 actual comparison is. */
4611 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4612 return 0;
4614 /* Make sure the constant is second. */
4615 if (swap_commutative_operands_p (op0, op1))
4617 tem = op0, op0 = op1, op1 = tem;
4618 code = swap_condition (code);
4621 trueop0 = avoid_constant_pool_reference (op0);
4622 trueop1 = avoid_constant_pool_reference (op1);
4624 /* For integer comparisons of A and B maybe we can simplify A - B and can
4625 then simplify a comparison of that with zero. If A and B are both either
4626 a register or a CONST_INT, this can't help; testing for these cases will
4627 prevent infinite recursion here and speed things up.
4629 We can only do this for EQ and NE comparisons as otherwise we may
4630 lose or introduce overflow which we cannot disregard as undefined as
4631 we do not know the signedness of the operation on either the left or
4632 the right hand side of the comparison. */
4634 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4635 && (code == EQ || code == NE)
4636 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4637 && (REG_P (op1) || CONST_INT_P (trueop1)))
4638 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4639 /* We cannot do this if tem is a nonzero address. */
4640 && ! nonzero_address_p (tem))
4641 return simplify_const_relational_operation (signed_condition (code),
4642 mode, tem, const0_rtx);
4644 if (! HONOR_NANS (mode) && code == ORDERED)
4645 return const_true_rtx;
4647 if (! HONOR_NANS (mode) && code == UNORDERED)
4648 return const0_rtx;
4650 /* For modes without NaNs, if the two operands are equal, we know the
4651 result except if they have side-effects. Even with NaNs we know
4652 the result of unordered comparisons and, if signaling NaNs are
4653 irrelevant, also the result of LT/GT/LTGT. */
4654 if ((! HONOR_NANS (GET_MODE (trueop0))
4655 || code == UNEQ || code == UNLE || code == UNGE
4656 || ((code == LT || code == GT || code == LTGT)
4657 && ! HONOR_SNANS (GET_MODE (trueop0))))
4658 && rtx_equal_p (trueop0, trueop1)
4659 && ! side_effects_p (trueop0))
4660 return comparison_result (code, CMP_EQ);
4662 /* If the operands are floating-point constants, see if we can fold
4663 the result. */
4664 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4665 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4666 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4668 REAL_VALUE_TYPE d0, d1;
4670 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4671 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4673 /* Comparisons are unordered iff at least one of the values is NaN. */
4674 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4675 switch (code)
4677 case UNEQ:
4678 case UNLT:
4679 case UNGT:
4680 case UNLE:
4681 case UNGE:
4682 case NE:
4683 case UNORDERED:
4684 return const_true_rtx;
4685 case EQ:
4686 case LT:
4687 case GT:
4688 case LE:
4689 case GE:
4690 case LTGT:
4691 case ORDERED:
4692 return const0_rtx;
4693 default:
4694 return 0;
4697 return comparison_result (code,
4698 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4699 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4702 /* Otherwise, see if the operands are both integers. */
4703 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4704 && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
4705 && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
4707 int width = GET_MODE_PRECISION (mode);
4708 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4709 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4711 /* Get the two words comprising each integer constant. */
4712 if (CONST_DOUBLE_AS_INT_P (trueop0))
4714 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4715 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4717 else
4719 l0u = l0s = INTVAL (trueop0);
4720 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4723 if (CONST_DOUBLE_AS_INT_P (trueop1))
4725 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4726 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4728 else
4730 l1u = l1s = INTVAL (trueop1);
4731 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4734 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4735 we have to sign or zero-extend the values. */
4736 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4738 l0u &= GET_MODE_MASK (mode);
4739 l1u &= GET_MODE_MASK (mode);
4741 if (val_signbit_known_set_p (mode, l0s))
4742 l0s |= ~GET_MODE_MASK (mode);
4744 if (val_signbit_known_set_p (mode, l1s))
4745 l1s |= ~GET_MODE_MASK (mode);
4747 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4748 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4750 if (h0u == h1u && l0u == l1u)
4751 return comparison_result (code, CMP_EQ);
4752 else
4754 int cr;
4755 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4756 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4757 return comparison_result (code, cr);
4761 /* Optimize comparisons with upper and lower bounds. */
4762 if (HWI_COMPUTABLE_MODE_P (mode)
4763 && CONST_INT_P (trueop1))
4765 int sign;
4766 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4767 HOST_WIDE_INT val = INTVAL (trueop1);
4768 HOST_WIDE_INT mmin, mmax;
4770 if (code == GEU
4771 || code == LEU
4772 || code == GTU
4773 || code == LTU)
4774 sign = 0;
4775 else
4776 sign = 1;
4778 /* Get a reduced range if the sign bit is zero. */
4779 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4781 mmin = 0;
4782 mmax = nonzero;
4784 else
4786 rtx mmin_rtx, mmax_rtx;
4787 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4789 mmin = INTVAL (mmin_rtx);
4790 mmax = INTVAL (mmax_rtx);
4791 if (sign)
4793 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4795 mmin >>= (sign_copies - 1);
4796 mmax >>= (sign_copies - 1);
4800 switch (code)
4802 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4803 case GEU:
4804 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4805 return const_true_rtx;
4806 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4807 return const0_rtx;
4808 break;
4809 case GE:
4810 if (val <= mmin)
4811 return const_true_rtx;
4812 if (val > mmax)
4813 return const0_rtx;
4814 break;
4816 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4817 case LEU:
4818 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4819 return const_true_rtx;
4820 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4821 return const0_rtx;
4822 break;
4823 case LE:
4824 if (val >= mmax)
4825 return const_true_rtx;
4826 if (val < mmin)
4827 return const0_rtx;
4828 break;
4830 case EQ:
4831 /* x == y is always false for y out of range. */
4832 if (val < mmin || val > mmax)
4833 return const0_rtx;
4834 break;
4836 /* x > y is always false for y >= mmax, always true for y < mmin. */
4837 case GTU:
4838 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4839 return const0_rtx;
4840 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4841 return const_true_rtx;
4842 break;
4843 case GT:
4844 if (val >= mmax)
4845 return const0_rtx;
4846 if (val < mmin)
4847 return const_true_rtx;
4848 break;
4850 /* x < y is always false for y <= mmin, always true for y > mmax. */
4851 case LTU:
4852 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4853 return const0_rtx;
4854 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4855 return const_true_rtx;
4856 break;
4857 case LT:
4858 if (val <= mmin)
4859 return const0_rtx;
4860 if (val > mmax)
4861 return const_true_rtx;
4862 break;
4864 case NE:
4865 /* x != y is always true for y out of range. */
4866 if (val < mmin || val > mmax)
4867 return const_true_rtx;
4868 break;
4870 default:
4871 break;
4875 /* Optimize integer comparisons with zero. */
4876 if (trueop1 == const0_rtx)
4878 /* Some addresses are known to be nonzero. We don't know
4879 their sign, but equality comparisons are known. */
4880 if (nonzero_address_p (trueop0))
4882 if (code == EQ || code == LEU)
4883 return const0_rtx;
4884 if (code == NE || code == GTU)
4885 return const_true_rtx;
4888 /* See if the first operand is an IOR with a constant. If so, we
4889 may be able to determine the result of this comparison. */
4890 if (GET_CODE (op0) == IOR)
4892 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4893 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4895 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
4896 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4897 && (UINTVAL (inner_const)
4898 & ((unsigned HOST_WIDE_INT) 1
4899 << sign_bitnum)));
4901 switch (code)
4903 case EQ:
4904 case LEU:
4905 return const0_rtx;
4906 case NE:
4907 case GTU:
4908 return const_true_rtx;
4909 case LT:
4910 case LE:
4911 if (has_sign)
4912 return const_true_rtx;
4913 break;
4914 case GT:
4915 case GE:
4916 if (has_sign)
4917 return const0_rtx;
4918 break;
4919 default:
4920 break;
4926 /* Optimize comparison of ABS with zero. */
4927 if (trueop1 == CONST0_RTX (mode)
4928 && (GET_CODE (trueop0) == ABS
4929 || (GET_CODE (trueop0) == FLOAT_EXTEND
4930 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4932 switch (code)
4934 case LT:
4935 /* Optimize abs(x) < 0.0. */
4936 if (!HONOR_SNANS (mode)
4937 && (!INTEGRAL_MODE_P (mode)
4938 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4940 if (INTEGRAL_MODE_P (mode)
4941 && (issue_strict_overflow_warning
4942 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4943 warning (OPT_Wstrict_overflow,
4944 ("assuming signed overflow does not occur when "
4945 "assuming abs (x) < 0 is false"));
4946 return const0_rtx;
4948 break;
4950 case GE:
4951 /* Optimize abs(x) >= 0.0. */
4952 if (!HONOR_NANS (mode)
4953 && (!INTEGRAL_MODE_P (mode)
4954 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4956 if (INTEGRAL_MODE_P (mode)
4957 && (issue_strict_overflow_warning
4958 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4959 warning (OPT_Wstrict_overflow,
4960 ("assuming signed overflow does not occur when "
4961 "assuming abs (x) >= 0 is true"));
4962 return const_true_rtx;
4964 break;
4966 case UNGE:
4967 /* Optimize ! (abs(x) < 0.0). */
4968 return const_true_rtx;
4970 default:
4971 break;
4975 return 0;
4978 /* Simplify CODE, an operation with result mode MODE and three operands,
4979 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4980 a constant. Return 0 if no simplifications is possible. */
4983 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4984 enum machine_mode op0_mode, rtx op0, rtx op1,
4985 rtx op2)
4987 unsigned int width = GET_MODE_PRECISION (mode);
4988 bool any_change = false;
4989 rtx tem;
4991 /* VOIDmode means "infinite" precision. */
4992 if (width == 0)
4993 width = HOST_BITS_PER_WIDE_INT;
4995 switch (code)
4997 case FMA:
4998 /* Simplify negations around the multiplication. */
4999 /* -a * -b + c => a * b + c. */
5000 if (GET_CODE (op0) == NEG)
5002 tem = simplify_unary_operation (NEG, mode, op1, mode);
5003 if (tem)
5004 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5006 else if (GET_CODE (op1) == NEG)
5008 tem = simplify_unary_operation (NEG, mode, op0, mode);
5009 if (tem)
5010 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5013 /* Canonicalize the two multiplication operands. */
5014 /* a * -b + c => -b * a + c. */
5015 if (swap_commutative_operands_p (op0, op1))
5016 tem = op0, op0 = op1, op1 = tem, any_change = true;
5018 if (any_change)
5019 return gen_rtx_FMA (mode, op0, op1, op2);
5020 return NULL_RTX;
5022 case SIGN_EXTRACT:
5023 case ZERO_EXTRACT:
5024 if (CONST_INT_P (op0)
5025 && CONST_INT_P (op1)
5026 && CONST_INT_P (op2)
5027 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5028 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5030 /* Extracting a bit-field from a constant */
5031 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5032 HOST_WIDE_INT op1val = INTVAL (op1);
5033 HOST_WIDE_INT op2val = INTVAL (op2);
5034 if (BITS_BIG_ENDIAN)
5035 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5036 else
5037 val >>= op2val;
5039 if (HOST_BITS_PER_WIDE_INT != op1val)
5041 /* First zero-extend. */
5042 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5043 /* If desired, propagate sign bit. */
5044 if (code == SIGN_EXTRACT
5045 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5046 != 0)
5047 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5050 return gen_int_mode (val, mode);
5052 break;
5054 case IF_THEN_ELSE:
5055 if (CONST_INT_P (op0))
5056 return op0 != const0_rtx ? op1 : op2;
5058 /* Convert c ? a : a into "a". */
5059 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5060 return op1;
5062 /* Convert a != b ? a : b into "a". */
5063 if (GET_CODE (op0) == NE
5064 && ! side_effects_p (op0)
5065 && ! HONOR_NANS (mode)
5066 && ! HONOR_SIGNED_ZEROS (mode)
5067 && ((rtx_equal_p (XEXP (op0, 0), op1)
5068 && rtx_equal_p (XEXP (op0, 1), op2))
5069 || (rtx_equal_p (XEXP (op0, 0), op2)
5070 && rtx_equal_p (XEXP (op0, 1), op1))))
5071 return op1;
5073 /* Convert a == b ? a : b into "b". */
5074 if (GET_CODE (op0) == EQ
5075 && ! side_effects_p (op0)
5076 && ! HONOR_NANS (mode)
5077 && ! HONOR_SIGNED_ZEROS (mode)
5078 && ((rtx_equal_p (XEXP (op0, 0), op1)
5079 && rtx_equal_p (XEXP (op0, 1), op2))
5080 || (rtx_equal_p (XEXP (op0, 0), op2)
5081 && rtx_equal_p (XEXP (op0, 1), op1))))
5082 return op2;
5084 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5086 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5087 ? GET_MODE (XEXP (op0, 1))
5088 : GET_MODE (XEXP (op0, 0)));
5089 rtx temp;
5091 /* Look for happy constants in op1 and op2. */
5092 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5094 HOST_WIDE_INT t = INTVAL (op1);
5095 HOST_WIDE_INT f = INTVAL (op2);
5097 if (t == STORE_FLAG_VALUE && f == 0)
5098 code = GET_CODE (op0);
5099 else if (t == 0 && f == STORE_FLAG_VALUE)
5101 enum rtx_code tmp;
5102 tmp = reversed_comparison_code (op0, NULL_RTX);
5103 if (tmp == UNKNOWN)
5104 break;
5105 code = tmp;
5107 else
5108 break;
5110 return simplify_gen_relational (code, mode, cmp_mode,
5111 XEXP (op0, 0), XEXP (op0, 1));
5114 if (cmp_mode == VOIDmode)
5115 cmp_mode = op0_mode;
5116 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5117 cmp_mode, XEXP (op0, 0),
5118 XEXP (op0, 1));
5120 /* See if any simplifications were possible. */
5121 if (temp)
5123 if (CONST_INT_P (temp))
5124 return temp == const0_rtx ? op2 : op1;
5125 else if (temp)
5126 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5129 break;
5131 case VEC_MERGE:
5132 gcc_assert (GET_MODE (op0) == mode);
5133 gcc_assert (GET_MODE (op1) == mode);
5134 gcc_assert (VECTOR_MODE_P (mode));
5135 op2 = avoid_constant_pool_reference (op2);
5136 if (CONST_INT_P (op2))
5138 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5139 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5140 int mask = (1 << n_elts) - 1;
5142 if (!(INTVAL (op2) & mask))
5143 return op1;
5144 if ((INTVAL (op2) & mask) == mask)
5145 return op0;
5147 op0 = avoid_constant_pool_reference (op0);
5148 op1 = avoid_constant_pool_reference (op1);
5149 if (GET_CODE (op0) == CONST_VECTOR
5150 && GET_CODE (op1) == CONST_VECTOR)
5152 rtvec v = rtvec_alloc (n_elts);
5153 unsigned int i;
5155 for (i = 0; i < n_elts; i++)
5156 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5157 ? CONST_VECTOR_ELT (op0, i)
5158 : CONST_VECTOR_ELT (op1, i));
5159 return gen_rtx_CONST_VECTOR (mode, v);
5162 break;
5164 default:
5165 gcc_unreachable ();
5168 return 0;
5171 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5172 or CONST_VECTOR,
5173 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5175 Works by unpacking OP into a collection of 8-bit values
5176 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5177 and then repacking them again for OUTERMODE. */
5179 static rtx
5180 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5181 enum machine_mode innermode, unsigned int byte)
5183 /* We support up to 512-bit values (for V8DFmode). */
5184 enum {
5185 max_bitsize = 512,
5186 value_bit = 8,
5187 value_mask = (1 << value_bit) - 1
5189 unsigned char value[max_bitsize / value_bit];
5190 int value_start;
5191 int i;
5192 int elem;
5194 int num_elem;
5195 rtx * elems;
5196 int elem_bitsize;
5197 rtx result_s;
5198 rtvec result_v = NULL;
5199 enum mode_class outer_class;
5200 enum machine_mode outer_submode;
5202 /* Some ports misuse CCmode. */
5203 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5204 return op;
5206 /* We have no way to represent a complex constant at the rtl level. */
5207 if (COMPLEX_MODE_P (outermode))
5208 return NULL_RTX;
5210 /* Unpack the value. */
5212 if (GET_CODE (op) == CONST_VECTOR)
5214 num_elem = CONST_VECTOR_NUNITS (op);
5215 elems = &CONST_VECTOR_ELT (op, 0);
5216 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5218 else
5220 num_elem = 1;
5221 elems = &op;
5222 elem_bitsize = max_bitsize;
5224 /* If this asserts, it is too complicated; reducing value_bit may help. */
5225 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5226 /* I don't know how to handle endianness of sub-units. */
5227 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5229 for (elem = 0; elem < num_elem; elem++)
5231 unsigned char * vp;
5232 rtx el = elems[elem];
5234 /* Vectors are kept in target memory order. (This is probably
5235 a mistake.) */
5237 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5238 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5239 / BITS_PER_UNIT);
5240 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5241 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5242 unsigned bytele = (subword_byte % UNITS_PER_WORD
5243 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5244 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5247 switch (GET_CODE (el))
5249 case CONST_INT:
5250 for (i = 0;
5251 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5252 i += value_bit)
5253 *vp++ = INTVAL (el) >> i;
5254 /* CONST_INTs are always logically sign-extended. */
5255 for (; i < elem_bitsize; i += value_bit)
5256 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5257 break;
5259 case CONST_DOUBLE:
5260 if (GET_MODE (el) == VOIDmode)
5262 unsigned char extend = 0;
5263 /* If this triggers, someone should have generated a
5264 CONST_INT instead. */
5265 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5267 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5268 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5269 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5271 *vp++
5272 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5273 i += value_bit;
5276 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5277 extend = -1;
5278 for (; i < elem_bitsize; i += value_bit)
5279 *vp++ = extend;
5281 else
5283 long tmp[max_bitsize / 32];
5284 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5286 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5287 gcc_assert (bitsize <= elem_bitsize);
5288 gcc_assert (bitsize % value_bit == 0);
5290 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5291 GET_MODE (el));
5293 /* real_to_target produces its result in words affected by
5294 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5295 and use WORDS_BIG_ENDIAN instead; see the documentation
5296 of SUBREG in rtl.texi. */
5297 for (i = 0; i < bitsize; i += value_bit)
5299 int ibase;
5300 if (WORDS_BIG_ENDIAN)
5301 ibase = bitsize - 1 - i;
5302 else
5303 ibase = i;
5304 *vp++ = tmp[ibase / 32] >> i % 32;
5307 /* It shouldn't matter what's done here, so fill it with
5308 zero. */
5309 for (; i < elem_bitsize; i += value_bit)
5310 *vp++ = 0;
5312 break;
5314 case CONST_FIXED:
5315 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5317 for (i = 0; i < elem_bitsize; i += value_bit)
5318 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5320 else
5322 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5323 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5324 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5325 i += value_bit)
5326 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5327 >> (i - HOST_BITS_PER_WIDE_INT);
5328 for (; i < elem_bitsize; i += value_bit)
5329 *vp++ = 0;
5331 break;
5333 default:
5334 gcc_unreachable ();
5338 /* Now, pick the right byte to start with. */
5339 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5340 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5341 will already have offset 0. */
5342 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5344 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5345 - byte);
5346 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5347 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5348 byte = (subword_byte % UNITS_PER_WORD
5349 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5352 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5353 so if it's become negative it will instead be very large.) */
5354 gcc_assert (byte < GET_MODE_SIZE (innermode));
5356 /* Convert from bytes to chunks of size value_bit. */
5357 value_start = byte * (BITS_PER_UNIT / value_bit);
5359 /* Re-pack the value. */
5361 if (VECTOR_MODE_P (outermode))
5363 num_elem = GET_MODE_NUNITS (outermode);
5364 result_v = rtvec_alloc (num_elem);
5365 elems = &RTVEC_ELT (result_v, 0);
5366 outer_submode = GET_MODE_INNER (outermode);
5368 else
5370 num_elem = 1;
5371 elems = &result_s;
5372 outer_submode = outermode;
5375 outer_class = GET_MODE_CLASS (outer_submode);
5376 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5378 gcc_assert (elem_bitsize % value_bit == 0);
5379 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5381 for (elem = 0; elem < num_elem; elem++)
5383 unsigned char *vp;
5385 /* Vectors are stored in target memory order. (This is probably
5386 a mistake.) */
5388 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5389 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5390 / BITS_PER_UNIT);
5391 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5392 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5393 unsigned bytele = (subword_byte % UNITS_PER_WORD
5394 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5395 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5398 switch (outer_class)
5400 case MODE_INT:
5401 case MODE_PARTIAL_INT:
5403 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5405 for (i = 0;
5406 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5407 i += value_bit)
5408 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5409 for (; i < elem_bitsize; i += value_bit)
5410 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5411 << (i - HOST_BITS_PER_WIDE_INT);
5413 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5414 know why. */
5415 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5416 elems[elem] = gen_int_mode (lo, outer_submode);
5417 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5418 elems[elem] = immed_double_const (lo, hi, outer_submode);
5419 else
5420 return NULL_RTX;
5422 break;
5424 case MODE_FLOAT:
5425 case MODE_DECIMAL_FLOAT:
5427 REAL_VALUE_TYPE r;
5428 long tmp[max_bitsize / 32];
5430 /* real_from_target wants its input in words affected by
5431 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5432 and use WORDS_BIG_ENDIAN instead; see the documentation
5433 of SUBREG in rtl.texi. */
5434 for (i = 0; i < max_bitsize / 32; i++)
5435 tmp[i] = 0;
5436 for (i = 0; i < elem_bitsize; i += value_bit)
5438 int ibase;
5439 if (WORDS_BIG_ENDIAN)
5440 ibase = elem_bitsize - 1 - i;
5441 else
5442 ibase = i;
5443 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5446 real_from_target (&r, tmp, outer_submode);
5447 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5449 break;
5451 case MODE_FRACT:
5452 case MODE_UFRACT:
5453 case MODE_ACCUM:
5454 case MODE_UACCUM:
5456 FIXED_VALUE_TYPE f;
5457 f.data.low = 0;
5458 f.data.high = 0;
5459 f.mode = outer_submode;
5461 for (i = 0;
5462 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5463 i += value_bit)
5464 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5465 for (; i < elem_bitsize; i += value_bit)
5466 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5467 << (i - HOST_BITS_PER_WIDE_INT));
5469 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5471 break;
5473 default:
5474 gcc_unreachable ();
5477 if (VECTOR_MODE_P (outermode))
5478 return gen_rtx_CONST_VECTOR (outermode, result_v);
5479 else
5480 return result_s;
5483 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5484 Return 0 if no simplifications are possible. */
5486 simplify_subreg (enum machine_mode outermode, rtx op,
5487 enum machine_mode innermode, unsigned int byte)
5489 /* Little bit of sanity checking. */
5490 gcc_assert (innermode != VOIDmode);
5491 gcc_assert (outermode != VOIDmode);
5492 gcc_assert (innermode != BLKmode);
5493 gcc_assert (outermode != BLKmode);
5495 gcc_assert (GET_MODE (op) == innermode
5496 || GET_MODE (op) == VOIDmode);
5498 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5499 gcc_assert (byte < GET_MODE_SIZE (innermode));
5501 if (outermode == innermode && !byte)
5502 return op;
5504 if (CONST_INT_P (op)
5505 || CONST_DOUBLE_P (op)
5506 || GET_CODE (op) == CONST_FIXED
5507 || GET_CODE (op) == CONST_VECTOR)
5508 return simplify_immed_subreg (outermode, op, innermode, byte);
5510 /* Changing mode twice with SUBREG => just change it once,
5511 or not at all if changing back op starting mode. */
5512 if (GET_CODE (op) == SUBREG)
5514 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5515 int final_offset = byte + SUBREG_BYTE (op);
5516 rtx newx;
5518 if (outermode == innermostmode
5519 && byte == 0 && SUBREG_BYTE (op) == 0)
5520 return SUBREG_REG (op);
5522 /* The SUBREG_BYTE represents offset, as if the value were stored
5523 in memory. Irritating exception is paradoxical subreg, where
5524 we define SUBREG_BYTE to be 0. On big endian machines, this
5525 value should be negative. For a moment, undo this exception. */
5526 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5528 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5529 if (WORDS_BIG_ENDIAN)
5530 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5531 if (BYTES_BIG_ENDIAN)
5532 final_offset += difference % UNITS_PER_WORD;
5534 if (SUBREG_BYTE (op) == 0
5535 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5537 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5538 if (WORDS_BIG_ENDIAN)
5539 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5540 if (BYTES_BIG_ENDIAN)
5541 final_offset += difference % UNITS_PER_WORD;
5544 /* See whether resulting subreg will be paradoxical. */
5545 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5547 /* In nonparadoxical subregs we can't handle negative offsets. */
5548 if (final_offset < 0)
5549 return NULL_RTX;
5550 /* Bail out in case resulting subreg would be incorrect. */
5551 if (final_offset % GET_MODE_SIZE (outermode)
5552 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5553 return NULL_RTX;
5555 else
5557 int offset = 0;
5558 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5560 /* In paradoxical subreg, see if we are still looking on lower part.
5561 If so, our SUBREG_BYTE will be 0. */
5562 if (WORDS_BIG_ENDIAN)
5563 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5564 if (BYTES_BIG_ENDIAN)
5565 offset += difference % UNITS_PER_WORD;
5566 if (offset == final_offset)
5567 final_offset = 0;
5568 else
5569 return NULL_RTX;
5572 /* Recurse for further possible simplifications. */
5573 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5574 final_offset);
5575 if (newx)
5576 return newx;
5577 if (validate_subreg (outermode, innermostmode,
5578 SUBREG_REG (op), final_offset))
5580 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5581 if (SUBREG_PROMOTED_VAR_P (op)
5582 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5583 && GET_MODE_CLASS (outermode) == MODE_INT
5584 && IN_RANGE (GET_MODE_SIZE (outermode),
5585 GET_MODE_SIZE (innermode),
5586 GET_MODE_SIZE (innermostmode))
5587 && subreg_lowpart_p (newx))
5589 SUBREG_PROMOTED_VAR_P (newx) = 1;
5590 SUBREG_PROMOTED_UNSIGNED_SET
5591 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5593 return newx;
5595 return NULL_RTX;
5598 /* Merge implicit and explicit truncations. */
5600 if (GET_CODE (op) == TRUNCATE
5601 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5602 && subreg_lowpart_offset (outermode, innermode) == byte)
5603 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5604 GET_MODE (XEXP (op, 0)));
5606 /* SUBREG of a hard register => just change the register number
5607 and/or mode. If the hard register is not valid in that mode,
5608 suppress this simplification. If the hard register is the stack,
5609 frame, or argument pointer, leave this as a SUBREG. */
5611 if (REG_P (op) && HARD_REGISTER_P (op))
5613 unsigned int regno, final_regno;
5615 regno = REGNO (op);
5616 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5617 if (HARD_REGISTER_NUM_P (final_regno))
5619 rtx x;
5620 int final_offset = byte;
5622 /* Adjust offset for paradoxical subregs. */
5623 if (byte == 0
5624 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5626 int difference = (GET_MODE_SIZE (innermode)
5627 - GET_MODE_SIZE (outermode));
5628 if (WORDS_BIG_ENDIAN)
5629 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5630 if (BYTES_BIG_ENDIAN)
5631 final_offset += difference % UNITS_PER_WORD;
5634 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5636 /* Propagate original regno. We don't have any way to specify
5637 the offset inside original regno, so do so only for lowpart.
5638 The information is used only by alias analysis that can not
5639 grog partial register anyway. */
5641 if (subreg_lowpart_offset (outermode, innermode) == byte)
5642 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5643 return x;
5647 /* If we have a SUBREG of a register that we are replacing and we are
5648 replacing it with a MEM, make a new MEM and try replacing the
5649 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5650 or if we would be widening it. */
5652 if (MEM_P (op)
5653 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5654 /* Allow splitting of volatile memory references in case we don't
5655 have instruction to move the whole thing. */
5656 && (! MEM_VOLATILE_P (op)
5657 || ! have_insn_for (SET, innermode))
5658 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5659 return adjust_address_nv (op, outermode, byte);
5661 /* Handle complex values represented as CONCAT
5662 of real and imaginary part. */
5663 if (GET_CODE (op) == CONCAT)
5665 unsigned int part_size, final_offset;
5666 rtx part, res;
5668 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5669 if (byte < part_size)
5671 part = XEXP (op, 0);
5672 final_offset = byte;
5674 else
5676 part = XEXP (op, 1);
5677 final_offset = byte - part_size;
5680 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5681 return NULL_RTX;
5683 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5684 if (res)
5685 return res;
5686 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5687 return gen_rtx_SUBREG (outermode, part, final_offset);
5688 return NULL_RTX;
5691 /* Optimize SUBREG truncations of zero and sign extended values. */
5692 if ((GET_CODE (op) == ZERO_EXTEND
5693 || GET_CODE (op) == SIGN_EXTEND)
5694 && SCALAR_INT_MODE_P (innermode)
5695 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode))
5697 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5699 /* If we're requesting the lowpart of a zero or sign extension,
5700 there are three possibilities. If the outermode is the same
5701 as the origmode, we can omit both the extension and the subreg.
5702 If the outermode is not larger than the origmode, we can apply
5703 the truncation without the extension. Finally, if the outermode
5704 is larger than the origmode, but both are integer modes, we
5705 can just extend to the appropriate mode. */
5706 if (bitpos == 0)
5708 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5709 if (outermode == origmode)
5710 return XEXP (op, 0);
5711 if (GET_MODE_PRECISION (outermode) <= GET_MODE_PRECISION (origmode))
5712 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5713 subreg_lowpart_offset (outermode,
5714 origmode));
5715 if (SCALAR_INT_MODE_P (outermode))
5716 return simplify_gen_unary (GET_CODE (op), outermode,
5717 XEXP (op, 0), origmode);
5720 /* A SUBREG resulting from a zero extension may fold to zero if
5721 it extracts higher bits that the ZERO_EXTEND's source bits. */
5722 if (GET_CODE (op) == ZERO_EXTEND
5723 && bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5724 return CONST0_RTX (outermode);
5727 /* Simplify (subreg:SI (op:DI ((x:DI) (y:DI)), 0)
5728 to (op:SI (subreg:SI (x:DI) 0) (subreg:SI (x:DI) 0)), where
5729 the outer subreg is effectively a truncation to the original mode. */
5730 if ((GET_CODE (op) == PLUS
5731 || GET_CODE (op) == MINUS
5732 || GET_CODE (op) == MULT)
5733 && SCALAR_INT_MODE_P (outermode)
5734 && SCALAR_INT_MODE_P (innermode)
5735 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5736 && byte == subreg_lowpart_offset (outermode, innermode))
5738 rtx op0 = simplify_gen_subreg (outermode, XEXP (op, 0),
5739 innermode, byte);
5740 if (op0)
5742 rtx op1 = simplify_gen_subreg (outermode, XEXP (op, 1),
5743 innermode, byte);
5744 if (op1)
5745 return simplify_gen_binary (GET_CODE (op), outermode, op0, op1);
5749 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5750 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5751 the outer subreg is effectively a truncation to the original mode. */
5752 if ((GET_CODE (op) == LSHIFTRT
5753 || GET_CODE (op) == ASHIFTRT)
5754 && SCALAR_INT_MODE_P (outermode)
5755 && SCALAR_INT_MODE_P (innermode)
5756 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5757 to avoid the possibility that an outer LSHIFTRT shifts by more
5758 than the sign extension's sign_bit_copies and introduces zeros
5759 into the high bits of the result. */
5760 && (2 * GET_MODE_PRECISION (outermode)) <= GET_MODE_PRECISION (innermode)
5761 && CONST_INT_P (XEXP (op, 1))
5762 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5763 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5764 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5765 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5766 return simplify_gen_binary (ASHIFTRT, outermode,
5767 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5769 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5770 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5771 the outer subreg is effectively a truncation to the original mode. */
5772 if ((GET_CODE (op) == LSHIFTRT
5773 || GET_CODE (op) == ASHIFTRT)
5774 && SCALAR_INT_MODE_P (outermode)
5775 && SCALAR_INT_MODE_P (innermode)
5776 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5777 && CONST_INT_P (XEXP (op, 1))
5778 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5779 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5780 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5781 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5782 return simplify_gen_binary (LSHIFTRT, outermode,
5783 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5785 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5786 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5787 the outer subreg is effectively a truncation to the original mode. */
5788 if (GET_CODE (op) == ASHIFT
5789 && SCALAR_INT_MODE_P (outermode)
5790 && SCALAR_INT_MODE_P (innermode)
5791 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5792 && CONST_INT_P (XEXP (op, 1))
5793 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5794 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5795 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5796 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5797 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5798 return simplify_gen_binary (ASHIFT, outermode,
5799 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5801 /* Recognize a word extraction from a multi-word subreg. */
5802 if ((GET_CODE (op) == LSHIFTRT
5803 || GET_CODE (op) == ASHIFTRT)
5804 && SCALAR_INT_MODE_P (innermode)
5805 && GET_MODE_PRECISION (outermode) >= BITS_PER_WORD
5806 && GET_MODE_PRECISION (innermode) >= (2 * GET_MODE_PRECISION (outermode))
5807 && CONST_INT_P (XEXP (op, 1))
5808 && (INTVAL (XEXP (op, 1)) & (GET_MODE_PRECISION (outermode) - 1)) == 0
5809 && INTVAL (XEXP (op, 1)) >= 0
5810 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (innermode)
5811 && byte == subreg_lowpart_offset (outermode, innermode))
5813 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5814 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5815 (WORDS_BIG_ENDIAN
5816 ? byte - shifted_bytes
5817 : byte + shifted_bytes));
5820 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5821 and try replacing the SUBREG and shift with it. Don't do this if
5822 the MEM has a mode-dependent address or if we would be widening it. */
5824 if ((GET_CODE (op) == LSHIFTRT
5825 || GET_CODE (op) == ASHIFTRT)
5826 && SCALAR_INT_MODE_P (innermode)
5827 && MEM_P (XEXP (op, 0))
5828 && CONST_INT_P (XEXP (op, 1))
5829 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
5830 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
5831 && INTVAL (XEXP (op, 1)) > 0
5832 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5833 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
5834 MEM_ADDR_SPACE (XEXP (op, 0)))
5835 && ! MEM_VOLATILE_P (XEXP (op, 0))
5836 && byte == subreg_lowpart_offset (outermode, innermode)
5837 && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
5838 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
5840 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5841 return adjust_address_nv (XEXP (op, 0), outermode,
5842 (WORDS_BIG_ENDIAN
5843 ? byte - shifted_bytes
5844 : byte + shifted_bytes));
5847 return NULL_RTX;
5850 /* Make a SUBREG operation or equivalent if it folds. */
5853 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5854 enum machine_mode innermode, unsigned int byte)
5856 rtx newx;
5858 newx = simplify_subreg (outermode, op, innermode, byte);
5859 if (newx)
5860 return newx;
5862 if (GET_CODE (op) == SUBREG
5863 || GET_CODE (op) == CONCAT
5864 || GET_MODE (op) == VOIDmode)
5865 return NULL_RTX;
5867 if (validate_subreg (outermode, innermode, op, byte))
5868 return gen_rtx_SUBREG (outermode, op, byte);
5870 return NULL_RTX;
5873 /* Simplify X, an rtx expression.
5875 Return the simplified expression or NULL if no simplifications
5876 were possible.
5878 This is the preferred entry point into the simplification routines;
5879 however, we still allow passes to call the more specific routines.
5881 Right now GCC has three (yes, three) major bodies of RTL simplification
5882 code that need to be unified.
5884 1. fold_rtx in cse.c. This code uses various CSE specific
5885 information to aid in RTL simplification.
5887 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5888 it uses combine specific information to aid in RTL
5889 simplification.
5891 3. The routines in this file.
5894 Long term we want to only have one body of simplification code; to
5895 get to that state I recommend the following steps:
5897 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5898 which are not pass dependent state into these routines.
5900 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5901 use this routine whenever possible.
5903 3. Allow for pass dependent state to be provided to these
5904 routines and add simplifications based on the pass dependent
5905 state. Remove code from cse.c & combine.c that becomes
5906 redundant/dead.
5908 It will take time, but ultimately the compiler will be easier to
5909 maintain and improve. It's totally silly that when we add a
5910 simplification that it needs to be added to 4 places (3 for RTL
5911 simplification and 1 for tree simplification. */
5914 simplify_rtx (const_rtx x)
5916 const enum rtx_code code = GET_CODE (x);
5917 const enum machine_mode mode = GET_MODE (x);
5919 switch (GET_RTX_CLASS (code))
5921 case RTX_UNARY:
5922 return simplify_unary_operation (code, mode,
5923 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5924 case RTX_COMM_ARITH:
5925 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5926 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5928 /* Fall through.... */
5930 case RTX_BIN_ARITH:
5931 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5933 case RTX_TERNARY:
5934 case RTX_BITFIELD_OPS:
5935 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5936 XEXP (x, 0), XEXP (x, 1),
5937 XEXP (x, 2));
5939 case RTX_COMPARE:
5940 case RTX_COMM_COMPARE:
5941 return simplify_relational_operation (code, mode,
5942 ((GET_MODE (XEXP (x, 0))
5943 != VOIDmode)
5944 ? GET_MODE (XEXP (x, 0))
5945 : GET_MODE (XEXP (x, 1))),
5946 XEXP (x, 0),
5947 XEXP (x, 1));
5949 case RTX_EXTRA:
5950 if (code == SUBREG)
5951 return simplify_subreg (mode, SUBREG_REG (x),
5952 GET_MODE (SUBREG_REG (x)),
5953 SUBREG_BYTE (x));
5954 break;
5956 case RTX_OBJ:
5957 if (code == LO_SUM)
5959 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5960 if (GET_CODE (XEXP (x, 0)) == HIGH
5961 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5962 return XEXP (x, 1);
5964 break;
5966 default:
5967 break;
5969 return NULL;