* config/darwin.c (darwin_assemble_visibility): Treat
[official-gcc.git] / gcc / simplify-rtx.c
blobacd479876039825086ae1a18b4858c20aae04b0e
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "diagnostic-core.h"
38 #include "ggc.h"
39 #include "target.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Much code operates on (low, high) pairs; the low value is an
44 unsigned wide int, the high value a signed wide int. We
45 occasionally need to sign extend from low to high as if low were a
46 signed wide int. */
47 #define HWI_SIGN_EXTEND(low) \
48 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
50 static rtx neg_const_int (enum machine_mode, const_rtx);
51 static bool plus_minus_operand_p (const_rtx);
52 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
53 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
54 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
55 unsigned int);
56 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
57 rtx, rtx);
58 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
59 enum machine_mode, rtx, rtx);
60 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
61 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
62 rtx, rtx, rtx, rtx);
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
66 static rtx
67 neg_const_int (enum machine_mode mode, const_rtx i)
69 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 bool
76 mode_signbit_p (enum machine_mode mode, const_rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
84 width = GET_MODE_PRECISION (mode);
85 if (width == 0)
86 return false;
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && CONST_INT_P (x))
90 val = INTVAL (x);
91 else if (width <= HOST_BITS_PER_DOUBLE_INT
92 && CONST_DOUBLE_AS_INT_P (x)
93 && CONST_DOUBLE_LOW (x) == 0)
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
98 else
99 /* FIXME: We don't yet have a representation for wider modes. */
100 return false;
102 if (width < HOST_BITS_PER_WIDE_INT)
103 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
107 /* Test whether VAL is equal to the most significant bit of mode MODE
108 (after masking with the mode mask of MODE). Returns false if the
109 precision of MODE is too large to handle. */
111 bool
112 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
114 unsigned int width;
116 if (GET_MODE_CLASS (mode) != MODE_INT)
117 return false;
119 width = GET_MODE_PRECISION (mode);
120 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
121 return false;
123 val &= GET_MODE_MASK (mode);
124 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
127 /* Test whether the most significant bit of mode MODE is set in VAL.
128 Returns false if the precision of MODE is too large to handle. */
129 bool
130 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
132 unsigned int width;
134 if (GET_MODE_CLASS (mode) != MODE_INT)
135 return false;
137 width = GET_MODE_PRECISION (mode);
138 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
139 return false;
141 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
142 return val != 0;
145 /* Test whether the most significant bit of mode MODE is clear in VAL.
146 Returns false if the precision of MODE is too large to handle. */
147 bool
148 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
150 unsigned int width;
152 if (GET_MODE_CLASS (mode) != MODE_INT)
153 return false;
155 width = GET_MODE_PRECISION (mode);
156 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
157 return false;
159 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
160 return val == 0;
163 /* Make a binary operation by properly ordering the operands and
164 seeing if the expression folds. */
167 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
168 rtx op1)
170 rtx tem;
172 /* If this simplifies, do it. */
173 tem = simplify_binary_operation (code, mode, op0, op1);
174 if (tem)
175 return tem;
177 /* Put complex operands first and constants second if commutative. */
178 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
179 && swap_commutative_operands_p (op0, op1))
180 tem = op0, op0 = op1, op1 = tem;
182 return gen_rtx_fmt_ee (code, mode, op0, op1);
185 /* If X is a MEM referencing the constant pool, return the real value.
186 Otherwise return X. */
188 avoid_constant_pool_reference (rtx x)
190 rtx c, tmp, addr;
191 enum machine_mode cmode;
192 HOST_WIDE_INT offset = 0;
194 switch (GET_CODE (x))
196 case MEM:
197 break;
199 case FLOAT_EXTEND:
200 /* Handle float extensions of constant pool references. */
201 tmp = XEXP (x, 0);
202 c = avoid_constant_pool_reference (tmp);
203 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
205 REAL_VALUE_TYPE d;
207 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
208 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
210 return x;
212 default:
213 return x;
216 if (GET_MODE (x) == BLKmode)
217 return x;
219 addr = XEXP (x, 0);
221 /* Call target hook to avoid the effects of -fpic etc.... */
222 addr = targetm.delegitimize_address (addr);
224 /* Split the address into a base and integer offset. */
225 if (GET_CODE (addr) == CONST
226 && GET_CODE (XEXP (addr, 0)) == PLUS
227 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
229 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
230 addr = XEXP (XEXP (addr, 0), 0);
233 if (GET_CODE (addr) == LO_SUM)
234 addr = XEXP (addr, 1);
236 /* If this is a constant pool reference, we can turn it into its
237 constant and hope that simplifications happen. */
238 if (GET_CODE (addr) == SYMBOL_REF
239 && CONSTANT_POOL_ADDRESS_P (addr))
241 c = get_pool_constant (addr);
242 cmode = get_pool_mode (addr);
244 /* If we're accessing the constant in a different mode than it was
245 originally stored, attempt to fix that up via subreg simplifications.
246 If that fails we have no choice but to return the original memory. */
247 if (offset != 0 || cmode != GET_MODE (x))
249 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
250 if (tem && CONSTANT_P (tem))
251 return tem;
253 else
254 return c;
257 return x;
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
265 delegitimize_mem_from_attrs (rtx x)
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
269 if (MEM_P (x)
270 && MEM_EXPR (x)
271 && MEM_OFFSET_KNOWN_P (x))
273 tree decl = MEM_EXPR (x);
274 enum machine_mode mode = GET_MODE (x);
275 HOST_WIDE_INT offset = 0;
277 switch (TREE_CODE (decl))
279 default:
280 decl = NULL;
281 break;
283 case VAR_DECL:
284 break;
286 case ARRAY_REF:
287 case ARRAY_RANGE_REF:
288 case COMPONENT_REF:
289 case BIT_FIELD_REF:
290 case REALPART_EXPR:
291 case IMAGPART_EXPR:
292 case VIEW_CONVERT_EXPR:
294 HOST_WIDE_INT bitsize, bitpos;
295 tree toffset;
296 int unsignedp, volatilep = 0;
298 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
299 &mode, &unsignedp, &volatilep, false);
300 if (bitsize != GET_MODE_BITSIZE (mode)
301 || (bitpos % BITS_PER_UNIT)
302 || (toffset && !host_integerp (toffset, 0)))
303 decl = NULL;
304 else
306 offset += bitpos / BITS_PER_UNIT;
307 if (toffset)
308 offset += TREE_INT_CST_LOW (toffset);
310 break;
314 if (decl
315 && mode == GET_MODE (x)
316 && TREE_CODE (decl) == VAR_DECL
317 && (TREE_STATIC (decl)
318 || DECL_THREAD_LOCAL_P (decl))
319 && DECL_RTL_SET_P (decl)
320 && MEM_P (DECL_RTL (decl)))
322 rtx newx;
324 offset += MEM_OFFSET (x);
326 newx = DECL_RTL (decl);
328 if (MEM_P (newx))
330 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
338 if (!((offset == 0
339 || (GET_CODE (o) == PLUS
340 && GET_CODE (XEXP (o, 1)) == CONST_INT
341 && (offset == INTVAL (XEXP (o, 1))
342 || (GET_CODE (n) == PLUS
343 && GET_CODE (XEXP (n, 1)) == CONST_INT
344 && (INTVAL (XEXP (n, 1)) + offset
345 == INTVAL (XEXP (o, 1)))
346 && (n = XEXP (n, 0))))
347 && (o = XEXP (o, 0))))
348 && rtx_equal_p (o, n)))
349 x = adjust_address_nv (newx, mode, offset);
351 else if (GET_MODE (x) == GET_MODE (newx)
352 && offset == 0)
353 x = newx;
357 return x;
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
364 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
365 enum machine_mode op_mode)
367 rtx tem;
369 /* If this simplifies, use it. */
370 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
371 return tem;
373 return gen_rtx_fmt_e (code, mode, op);
376 /* Likewise for ternary operations. */
379 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
380 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
382 rtx tem;
384 /* If this simplifies, use it. */
385 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
386 op0, op1, op2)))
387 return tem;
389 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
396 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
397 enum machine_mode cmp_mode, rtx op0, rtx op1)
399 rtx tem;
401 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
402 op0, op1)))
403 return tem;
405 return gen_rtx_fmt_ee (code, mode, op0, op1);
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
411 result. */
414 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
415 rtx (*fn) (rtx, const_rtx, void *), void *data)
417 enum rtx_code code = GET_CODE (x);
418 enum machine_mode mode = GET_MODE (x);
419 enum machine_mode op_mode;
420 const char *fmt;
421 rtx op0, op1, op2, newx, op;
422 rtvec vec, newvec;
423 int i, j;
425 if (__builtin_expect (fn != NULL, 0))
427 newx = fn (x, old_rtx, data);
428 if (newx)
429 return newx;
431 else if (rtx_equal_p (x, old_rtx))
432 return copy_rtx ((rtx) data);
434 switch (GET_RTX_CLASS (code))
436 case RTX_UNARY:
437 op0 = XEXP (x, 0);
438 op_mode = GET_MODE (op0);
439 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
440 if (op0 == XEXP (x, 0))
441 return x;
442 return simplify_gen_unary (code, mode, op0, op_mode);
444 case RTX_BIN_ARITH:
445 case RTX_COMM_ARITH:
446 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
447 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
448 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
449 return x;
450 return simplify_gen_binary (code, mode, op0, op1);
452 case RTX_COMPARE:
453 case RTX_COMM_COMPARE:
454 op0 = XEXP (x, 0);
455 op1 = XEXP (x, 1);
456 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
459 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
460 return x;
461 return simplify_gen_relational (code, mode, op_mode, op0, op1);
463 case RTX_TERNARY:
464 case RTX_BITFIELD_OPS:
465 op0 = XEXP (x, 0);
466 op_mode = GET_MODE (op0);
467 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
468 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
469 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
470 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
471 return x;
472 if (op_mode == VOIDmode)
473 op_mode = GET_MODE (op0);
474 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
476 case RTX_EXTRA:
477 if (code == SUBREG)
479 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
480 if (op0 == SUBREG_REG (x))
481 return x;
482 op0 = simplify_gen_subreg (GET_MODE (x), op0,
483 GET_MODE (SUBREG_REG (x)),
484 SUBREG_BYTE (x));
485 return op0 ? op0 : x;
487 break;
489 case RTX_OBJ:
490 if (code == MEM)
492 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
493 if (op0 == XEXP (x, 0))
494 return x;
495 return replace_equiv_address_nv (x, op0);
497 else if (code == LO_SUM)
499 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
500 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
504 return op1;
506 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
507 return x;
508 return gen_rtx_LO_SUM (mode, op0, op1);
510 break;
512 default:
513 break;
516 newx = x;
517 fmt = GET_RTX_FORMAT (code);
518 for (i = 0; fmt[i]; i++)
519 switch (fmt[i])
521 case 'E':
522 vec = XVEC (x, i);
523 newvec = XVEC (newx, i);
524 for (j = 0; j < GET_NUM_ELEM (vec); j++)
526 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
527 old_rtx, fn, data);
528 if (op != RTVEC_ELT (vec, j))
530 if (newvec == vec)
532 newvec = shallow_copy_rtvec (vec);
533 if (x == newx)
534 newx = shallow_copy_rtx (x);
535 XVEC (newx, i) = newvec;
537 RTVEC_ELT (newvec, j) = op;
540 break;
542 case 'e':
543 if (XEXP (x, i))
545 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
546 if (op != XEXP (x, i))
548 if (x == newx)
549 newx = shallow_copy_rtx (x);
550 XEXP (newx, i) = op;
553 break;
555 return newx;
558 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559 resulting RTX. Return a new RTX which is as simplified as possible. */
562 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
564 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
567 /* Try to simplify a unary operation CODE whose output mode is to be
568 MODE with input operand OP whose mode was originally OP_MODE.
569 Return zero if no simplification can be made. */
571 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
572 rtx op, enum machine_mode op_mode)
574 rtx trueop, tem;
576 trueop = avoid_constant_pool_reference (op);
578 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
579 if (tem)
580 return tem;
582 return simplify_unary_operation_1 (code, mode, op);
585 /* Perform some simplifications we can do even if the operands
586 aren't constant. */
587 static rtx
588 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
590 enum rtx_code reversed;
591 rtx temp;
593 switch (code)
595 case NOT:
596 /* (not (not X)) == X. */
597 if (GET_CODE (op) == NOT)
598 return XEXP (op, 0);
600 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
601 comparison is all ones. */
602 if (COMPARISON_P (op)
603 && (mode == BImode || STORE_FLAG_VALUE == -1)
604 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
605 return simplify_gen_relational (reversed, mode, VOIDmode,
606 XEXP (op, 0), XEXP (op, 1));
608 /* (not (plus X -1)) can become (neg X). */
609 if (GET_CODE (op) == PLUS
610 && XEXP (op, 1) == constm1_rtx)
611 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
613 /* Similarly, (not (neg X)) is (plus X -1). */
614 if (GET_CODE (op) == NEG)
615 return plus_constant (mode, XEXP (op, 0), -1);
617 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
618 if (GET_CODE (op) == XOR
619 && CONST_INT_P (XEXP (op, 1))
620 && (temp = simplify_unary_operation (NOT, mode,
621 XEXP (op, 1), mode)) != 0)
622 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
624 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
625 if (GET_CODE (op) == PLUS
626 && CONST_INT_P (XEXP (op, 1))
627 && mode_signbit_p (mode, XEXP (op, 1))
628 && (temp = simplify_unary_operation (NOT, mode,
629 XEXP (op, 1), mode)) != 0)
630 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
633 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
634 operands other than 1, but that is not valid. We could do a
635 similar simplification for (not (lshiftrt C X)) where C is
636 just the sign bit, but this doesn't seem common enough to
637 bother with. */
638 if (GET_CODE (op) == ASHIFT
639 && XEXP (op, 0) == const1_rtx)
641 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
642 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
645 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
646 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
647 so we can perform the above simplification. */
649 if (STORE_FLAG_VALUE == -1
650 && GET_CODE (op) == ASHIFTRT
651 && GET_CODE (XEXP (op, 1))
652 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
653 return simplify_gen_relational (GE, mode, VOIDmode,
654 XEXP (op, 0), const0_rtx);
657 if (GET_CODE (op) == SUBREG
658 && subreg_lowpart_p (op)
659 && (GET_MODE_SIZE (GET_MODE (op))
660 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
661 && GET_CODE (SUBREG_REG (op)) == ASHIFT
662 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
664 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
665 rtx x;
667 x = gen_rtx_ROTATE (inner_mode,
668 simplify_gen_unary (NOT, inner_mode, const1_rtx,
669 inner_mode),
670 XEXP (SUBREG_REG (op), 1));
671 return rtl_hooks.gen_lowpart_no_emit (mode, x);
674 /* Apply De Morgan's laws to reduce number of patterns for machines
675 with negating logical insns (and-not, nand, etc.). If result has
676 only one NOT, put it first, since that is how the patterns are
677 coded. */
679 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
681 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
682 enum machine_mode op_mode;
684 op_mode = GET_MODE (in1);
685 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
687 op_mode = GET_MODE (in2);
688 if (op_mode == VOIDmode)
689 op_mode = mode;
690 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
692 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
694 rtx tem = in2;
695 in2 = in1; in1 = tem;
698 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
699 mode, in1, in2);
701 break;
703 case NEG:
704 /* (neg (neg X)) == X. */
705 if (GET_CODE (op) == NEG)
706 return XEXP (op, 0);
708 /* (neg (plus X 1)) can become (not X). */
709 if (GET_CODE (op) == PLUS
710 && XEXP (op, 1) == const1_rtx)
711 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
713 /* Similarly, (neg (not X)) is (plus X 1). */
714 if (GET_CODE (op) == NOT)
715 return plus_constant (mode, XEXP (op, 0), 1);
717 /* (neg (minus X Y)) can become (minus Y X). This transformation
718 isn't safe for modes with signed zeros, since if X and Y are
719 both +0, (minus Y X) is the same as (minus X Y). If the
720 rounding mode is towards +infinity (or -infinity) then the two
721 expressions will be rounded differently. */
722 if (GET_CODE (op) == MINUS
723 && !HONOR_SIGNED_ZEROS (mode)
724 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
725 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
727 if (GET_CODE (op) == PLUS
728 && !HONOR_SIGNED_ZEROS (mode)
729 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
731 /* (neg (plus A C)) is simplified to (minus -C A). */
732 if (CONST_INT_P (XEXP (op, 1))
733 || CONST_DOUBLE_P (XEXP (op, 1)))
735 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
736 if (temp)
737 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
740 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
741 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
742 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
745 /* (neg (mult A B)) becomes (mult A (neg B)).
746 This works even for floating-point values. */
747 if (GET_CODE (op) == MULT
748 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
750 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
751 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
754 /* NEG commutes with ASHIFT since it is multiplication. Only do
755 this if we can then eliminate the NEG (e.g., if the operand
756 is a constant). */
757 if (GET_CODE (op) == ASHIFT)
759 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
760 if (temp)
761 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
764 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
765 C is equal to the width of MODE minus 1. */
766 if (GET_CODE (op) == ASHIFTRT
767 && CONST_INT_P (XEXP (op, 1))
768 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
769 return simplify_gen_binary (LSHIFTRT, mode,
770 XEXP (op, 0), XEXP (op, 1));
772 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
773 C is equal to the width of MODE minus 1. */
774 if (GET_CODE (op) == LSHIFTRT
775 && CONST_INT_P (XEXP (op, 1))
776 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
777 return simplify_gen_binary (ASHIFTRT, mode,
778 XEXP (op, 0), XEXP (op, 1));
780 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
781 if (GET_CODE (op) == XOR
782 && XEXP (op, 1) == const1_rtx
783 && nonzero_bits (XEXP (op, 0), mode) == 1)
784 return plus_constant (mode, XEXP (op, 0), -1);
786 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
787 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
788 if (GET_CODE (op) == LT
789 && XEXP (op, 1) == const0_rtx
790 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
792 enum machine_mode inner = GET_MODE (XEXP (op, 0));
793 int isize = GET_MODE_PRECISION (inner);
794 if (STORE_FLAG_VALUE == 1)
796 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
797 GEN_INT (isize - 1));
798 if (mode == inner)
799 return temp;
800 if (GET_MODE_PRECISION (mode) > isize)
801 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
802 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
804 else if (STORE_FLAG_VALUE == -1)
806 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
807 GEN_INT (isize - 1));
808 if (mode == inner)
809 return temp;
810 if (GET_MODE_PRECISION (mode) > isize)
811 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
812 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
815 break;
817 case TRUNCATE:
818 /* We can't handle truncation to a partial integer mode here
819 because we don't know the real bitsize of the partial
820 integer mode. */
821 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
822 break;
824 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
825 if ((GET_CODE (op) == SIGN_EXTEND
826 || GET_CODE (op) == ZERO_EXTEND)
827 && GET_MODE (XEXP (op, 0)) == mode)
828 return XEXP (op, 0);
830 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
831 (OP:SI foo:SI) if OP is NEG or ABS. */
832 if ((GET_CODE (op) == ABS
833 || GET_CODE (op) == NEG)
834 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
835 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
836 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
837 return simplify_gen_unary (GET_CODE (op), mode,
838 XEXP (XEXP (op, 0), 0), mode);
840 /* (truncate:A (subreg:B (truncate:C X) 0)) is
841 (truncate:A X). */
842 if (GET_CODE (op) == SUBREG
843 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
844 && subreg_lowpart_p (op))
845 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
846 GET_MODE (XEXP (SUBREG_REG (op), 0)));
848 /* If we know that the value is already truncated, we can
849 replace the TRUNCATE with a SUBREG. Note that this is also
850 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
851 modes we just have to apply a different definition for
852 truncation. But don't do this for an (LSHIFTRT (MULT ...))
853 since this will cause problems with the umulXi3_highpart
854 patterns. */
855 if ((TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
856 ? (num_sign_bit_copies (op, GET_MODE (op))
857 > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op))
858 - GET_MODE_PRECISION (mode)))
859 : truncated_to_mode (mode, op))
860 && ! (GET_CODE (op) == LSHIFTRT
861 && GET_CODE (XEXP (op, 0)) == MULT))
862 return rtl_hooks.gen_lowpart_no_emit (mode, op);
864 /* A truncate of a comparison can be replaced with a subreg if
865 STORE_FLAG_VALUE permits. This is like the previous test,
866 but it works even if the comparison is done in a mode larger
867 than HOST_BITS_PER_WIDE_INT. */
868 if (HWI_COMPUTABLE_MODE_P (mode)
869 && COMPARISON_P (op)
870 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
871 return rtl_hooks.gen_lowpart_no_emit (mode, op);
873 /* A truncate of a memory is just loading the low part of the memory
874 if we are not changing the meaning of the address. */
875 if (GET_CODE (op) == MEM
876 && !VECTOR_MODE_P (mode)
877 && !MEM_VOLATILE_P (op)
878 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
879 return rtl_hooks.gen_lowpart_no_emit (mode, op);
881 break;
883 case FLOAT_TRUNCATE:
884 if (DECIMAL_FLOAT_MODE_P (mode))
885 break;
887 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
888 if (GET_CODE (op) == FLOAT_EXTEND
889 && GET_MODE (XEXP (op, 0)) == mode)
890 return XEXP (op, 0);
892 /* (float_truncate:SF (float_truncate:DF foo:XF))
893 = (float_truncate:SF foo:XF).
894 This may eliminate double rounding, so it is unsafe.
896 (float_truncate:SF (float_extend:XF foo:DF))
897 = (float_truncate:SF foo:DF).
899 (float_truncate:DF (float_extend:XF foo:SF))
900 = (float_extend:SF foo:DF). */
901 if ((GET_CODE (op) == FLOAT_TRUNCATE
902 && flag_unsafe_math_optimizations)
903 || GET_CODE (op) == FLOAT_EXTEND)
904 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
905 0)))
906 > GET_MODE_SIZE (mode)
907 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
908 mode,
909 XEXP (op, 0), mode);
911 /* (float_truncate (float x)) is (float x) */
912 if (GET_CODE (op) == FLOAT
913 && (flag_unsafe_math_optimizations
914 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
915 && ((unsigned)significand_size (GET_MODE (op))
916 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
917 - num_sign_bit_copies (XEXP (op, 0),
918 GET_MODE (XEXP (op, 0))))))))
919 return simplify_gen_unary (FLOAT, mode,
920 XEXP (op, 0),
921 GET_MODE (XEXP (op, 0)));
923 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
924 (OP:SF foo:SF) if OP is NEG or ABS. */
925 if ((GET_CODE (op) == ABS
926 || GET_CODE (op) == NEG)
927 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
928 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
929 return simplify_gen_unary (GET_CODE (op), mode,
930 XEXP (XEXP (op, 0), 0), mode);
932 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
933 is (float_truncate:SF x). */
934 if (GET_CODE (op) == SUBREG
935 && subreg_lowpart_p (op)
936 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
937 return SUBREG_REG (op);
938 break;
940 case FLOAT_EXTEND:
941 if (DECIMAL_FLOAT_MODE_P (mode))
942 break;
944 /* (float_extend (float_extend x)) is (float_extend x)
946 (float_extend (float x)) is (float x) assuming that double
947 rounding can't happen.
949 if (GET_CODE (op) == FLOAT_EXTEND
950 || (GET_CODE (op) == FLOAT
951 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
952 && ((unsigned)significand_size (GET_MODE (op))
953 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
954 - num_sign_bit_copies (XEXP (op, 0),
955 GET_MODE (XEXP (op, 0)))))))
956 return simplify_gen_unary (GET_CODE (op), mode,
957 XEXP (op, 0),
958 GET_MODE (XEXP (op, 0)));
960 break;
962 case ABS:
963 /* (abs (neg <foo>)) -> (abs <foo>) */
964 if (GET_CODE (op) == NEG)
965 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
966 GET_MODE (XEXP (op, 0)));
968 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
969 do nothing. */
970 if (GET_MODE (op) == VOIDmode)
971 break;
973 /* If operand is something known to be positive, ignore the ABS. */
974 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
975 || val_signbit_known_clear_p (GET_MODE (op),
976 nonzero_bits (op, GET_MODE (op))))
977 return op;
979 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
980 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
981 return gen_rtx_NEG (mode, op);
983 break;
985 case FFS:
986 /* (ffs (*_extend <X>)) = (ffs <X>) */
987 if (GET_CODE (op) == SIGN_EXTEND
988 || GET_CODE (op) == ZERO_EXTEND)
989 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
990 GET_MODE (XEXP (op, 0)));
991 break;
993 case POPCOUNT:
994 switch (GET_CODE (op))
996 case BSWAP:
997 case ZERO_EXTEND:
998 /* (popcount (zero_extend <X>)) = (popcount <X>) */
999 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1000 GET_MODE (XEXP (op, 0)));
1002 case ROTATE:
1003 case ROTATERT:
1004 /* Rotations don't affect popcount. */
1005 if (!side_effects_p (XEXP (op, 1)))
1006 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1007 GET_MODE (XEXP (op, 0)));
1008 break;
1010 default:
1011 break;
1013 break;
1015 case PARITY:
1016 switch (GET_CODE (op))
1018 case NOT:
1019 case BSWAP:
1020 case ZERO_EXTEND:
1021 case SIGN_EXTEND:
1022 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1023 GET_MODE (XEXP (op, 0)));
1025 case ROTATE:
1026 case ROTATERT:
1027 /* Rotations don't affect parity. */
1028 if (!side_effects_p (XEXP (op, 1)))
1029 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1030 GET_MODE (XEXP (op, 0)));
1031 break;
1033 default:
1034 break;
1036 break;
1038 case BSWAP:
1039 /* (bswap (bswap x)) -> x. */
1040 if (GET_CODE (op) == BSWAP)
1041 return XEXP (op, 0);
1042 break;
1044 case FLOAT:
1045 /* (float (sign_extend <X>)) = (float <X>). */
1046 if (GET_CODE (op) == SIGN_EXTEND)
1047 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1048 GET_MODE (XEXP (op, 0)));
1049 break;
1051 case SIGN_EXTEND:
1052 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1053 becomes just the MINUS if its mode is MODE. This allows
1054 folding switch statements on machines using casesi (such as
1055 the VAX). */
1056 if (GET_CODE (op) == TRUNCATE
1057 && GET_MODE (XEXP (op, 0)) == mode
1058 && GET_CODE (XEXP (op, 0)) == MINUS
1059 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1060 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1061 return XEXP (op, 0);
1063 /* Extending a widening multiplication should be canonicalized to
1064 a wider widening multiplication. */
1065 if (GET_CODE (op) == MULT)
1067 rtx lhs = XEXP (op, 0);
1068 rtx rhs = XEXP (op, 1);
1069 enum rtx_code lcode = GET_CODE (lhs);
1070 enum rtx_code rcode = GET_CODE (rhs);
1072 /* Widening multiplies usually extend both operands, but sometimes
1073 they use a shift to extract a portion of a register. */
1074 if ((lcode == SIGN_EXTEND
1075 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1076 && (rcode == SIGN_EXTEND
1077 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1079 enum machine_mode lmode = GET_MODE (lhs);
1080 enum machine_mode rmode = GET_MODE (rhs);
1081 int bits;
1083 if (lcode == ASHIFTRT)
1084 /* Number of bits not shifted off the end. */
1085 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1086 else /* lcode == SIGN_EXTEND */
1087 /* Size of inner mode. */
1088 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1090 if (rcode == ASHIFTRT)
1091 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1092 else /* rcode == SIGN_EXTEND */
1093 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1095 /* We can only widen multiplies if the result is mathematiclly
1096 equivalent. I.e. if overflow was impossible. */
1097 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1098 return simplify_gen_binary
1099 (MULT, mode,
1100 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1101 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1105 /* Check for a sign extension of a subreg of a promoted
1106 variable, where the promotion is sign-extended, and the
1107 target mode is the same as the variable's promotion. */
1108 if (GET_CODE (op) == SUBREG
1109 && SUBREG_PROMOTED_VAR_P (op)
1110 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1111 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1112 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1114 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1115 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1116 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1118 gcc_assert (GET_MODE_BITSIZE (mode)
1119 > GET_MODE_BITSIZE (GET_MODE (op)));
1120 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1121 GET_MODE (XEXP (op, 0)));
1124 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1125 is (sign_extend:M (subreg:O <X>)) if there is mode with
1126 GET_MODE_BITSIZE (N) - I bits.
1127 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1128 is similarly (zero_extend:M (subreg:O <X>)). */
1129 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1130 && GET_CODE (XEXP (op, 0)) == ASHIFT
1131 && CONST_INT_P (XEXP (op, 1))
1132 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1133 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1135 enum machine_mode tmode
1136 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1137 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1138 gcc_assert (GET_MODE_BITSIZE (mode)
1139 > GET_MODE_BITSIZE (GET_MODE (op)));
1140 if (tmode != BLKmode)
1142 rtx inner =
1143 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1144 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1145 ? SIGN_EXTEND : ZERO_EXTEND,
1146 mode, inner, tmode);
1150 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1151 /* As we do not know which address space the pointer is referring to,
1152 we can do this only if the target does not support different pointer
1153 or address modes depending on the address space. */
1154 if (target_default_pointer_address_modes_p ()
1155 && ! POINTERS_EXTEND_UNSIGNED
1156 && mode == Pmode && GET_MODE (op) == ptr_mode
1157 && (CONSTANT_P (op)
1158 || (GET_CODE (op) == SUBREG
1159 && REG_P (SUBREG_REG (op))
1160 && REG_POINTER (SUBREG_REG (op))
1161 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1162 return convert_memory_address (Pmode, op);
1163 #endif
1164 break;
1166 case ZERO_EXTEND:
1167 /* Check for a zero extension of a subreg of a promoted
1168 variable, where the promotion is zero-extended, and the
1169 target mode is the same as the variable's promotion. */
1170 if (GET_CODE (op) == SUBREG
1171 && SUBREG_PROMOTED_VAR_P (op)
1172 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1173 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1174 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1176 /* Extending a widening multiplication should be canonicalized to
1177 a wider widening multiplication. */
1178 if (GET_CODE (op) == MULT)
1180 rtx lhs = XEXP (op, 0);
1181 rtx rhs = XEXP (op, 1);
1182 enum rtx_code lcode = GET_CODE (lhs);
1183 enum rtx_code rcode = GET_CODE (rhs);
1185 /* Widening multiplies usually extend both operands, but sometimes
1186 they use a shift to extract a portion of a register. */
1187 if ((lcode == ZERO_EXTEND
1188 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1189 && (rcode == ZERO_EXTEND
1190 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1192 enum machine_mode lmode = GET_MODE (lhs);
1193 enum machine_mode rmode = GET_MODE (rhs);
1194 int bits;
1196 if (lcode == LSHIFTRT)
1197 /* Number of bits not shifted off the end. */
1198 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1199 else /* lcode == ZERO_EXTEND */
1200 /* Size of inner mode. */
1201 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1203 if (rcode == LSHIFTRT)
1204 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1205 else /* rcode == ZERO_EXTEND */
1206 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1208 /* We can only widen multiplies if the result is mathematiclly
1209 equivalent. I.e. if overflow was impossible. */
1210 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1211 return simplify_gen_binary
1212 (MULT, mode,
1213 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1214 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1218 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1219 if (GET_CODE (op) == ZERO_EXTEND)
1220 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1221 GET_MODE (XEXP (op, 0)));
1223 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1224 is (zero_extend:M (subreg:O <X>)) if there is mode with
1225 GET_MODE_BITSIZE (N) - I bits. */
1226 if (GET_CODE (op) == LSHIFTRT
1227 && GET_CODE (XEXP (op, 0)) == ASHIFT
1228 && CONST_INT_P (XEXP (op, 1))
1229 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1230 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1232 enum machine_mode tmode
1233 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1234 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1235 if (tmode != BLKmode)
1237 rtx inner =
1238 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1239 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1243 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1244 /* As we do not know which address space the pointer is referring to,
1245 we can do this only if the target does not support different pointer
1246 or address modes depending on the address space. */
1247 if (target_default_pointer_address_modes_p ()
1248 && POINTERS_EXTEND_UNSIGNED > 0
1249 && mode == Pmode && GET_MODE (op) == ptr_mode
1250 && (CONSTANT_P (op)
1251 || (GET_CODE (op) == SUBREG
1252 && REG_P (SUBREG_REG (op))
1253 && REG_POINTER (SUBREG_REG (op))
1254 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1255 return convert_memory_address (Pmode, op);
1256 #endif
1257 break;
1259 default:
1260 break;
1263 return 0;
1266 /* Try to compute the value of a unary operation CODE whose output mode is to
1267 be MODE with input operand OP whose mode was originally OP_MODE.
1268 Return zero if the value cannot be computed. */
1270 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1271 rtx op, enum machine_mode op_mode)
1273 unsigned int width = GET_MODE_PRECISION (mode);
1274 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1276 if (code == VEC_DUPLICATE)
1278 gcc_assert (VECTOR_MODE_P (mode));
1279 if (GET_MODE (op) != VOIDmode)
1281 if (!VECTOR_MODE_P (GET_MODE (op)))
1282 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1283 else
1284 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1285 (GET_MODE (op)));
1287 if (CONST_INT_P (op) || CONST_DOUBLE_P (op)
1288 || GET_CODE (op) == CONST_VECTOR)
1290 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1291 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1292 rtvec v = rtvec_alloc (n_elts);
1293 unsigned int i;
1295 if (GET_CODE (op) != CONST_VECTOR)
1296 for (i = 0; i < n_elts; i++)
1297 RTVEC_ELT (v, i) = op;
1298 else
1300 enum machine_mode inmode = GET_MODE (op);
1301 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1302 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1304 gcc_assert (in_n_elts < n_elts);
1305 gcc_assert ((n_elts % in_n_elts) == 0);
1306 for (i = 0; i < n_elts; i++)
1307 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1309 return gen_rtx_CONST_VECTOR (mode, v);
1313 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1315 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1316 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1317 enum machine_mode opmode = GET_MODE (op);
1318 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1319 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1320 rtvec v = rtvec_alloc (n_elts);
1321 unsigned int i;
1323 gcc_assert (op_n_elts == n_elts);
1324 for (i = 0; i < n_elts; i++)
1326 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1327 CONST_VECTOR_ELT (op, i),
1328 GET_MODE_INNER (opmode));
1329 if (!x)
1330 return 0;
1331 RTVEC_ELT (v, i) = x;
1333 return gen_rtx_CONST_VECTOR (mode, v);
1336 /* The order of these tests is critical so that, for example, we don't
1337 check the wrong mode (input vs. output) for a conversion operation,
1338 such as FIX. At some point, this should be simplified. */
1340 if (code == FLOAT && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1342 HOST_WIDE_INT hv, lv;
1343 REAL_VALUE_TYPE d;
1345 if (CONST_INT_P (op))
1346 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1347 else
1348 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1350 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1351 d = real_value_truncate (mode, d);
1352 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1354 else if (code == UNSIGNED_FLOAT
1355 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1357 HOST_WIDE_INT hv, lv;
1358 REAL_VALUE_TYPE d;
1360 if (CONST_INT_P (op))
1361 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1362 else
1363 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1365 if (op_mode == VOIDmode
1366 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1367 /* We should never get a negative number. */
1368 gcc_assert (hv >= 0);
1369 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1370 hv = 0, lv &= GET_MODE_MASK (op_mode);
1372 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1373 d = real_value_truncate (mode, d);
1374 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1377 if (CONST_INT_P (op)
1378 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1380 HOST_WIDE_INT arg0 = INTVAL (op);
1381 HOST_WIDE_INT val;
1383 switch (code)
1385 case NOT:
1386 val = ~ arg0;
1387 break;
1389 case NEG:
1390 val = - arg0;
1391 break;
1393 case ABS:
1394 val = (arg0 >= 0 ? arg0 : - arg0);
1395 break;
1397 case FFS:
1398 arg0 &= GET_MODE_MASK (mode);
1399 val = ffs_hwi (arg0);
1400 break;
1402 case CLZ:
1403 arg0 &= GET_MODE_MASK (mode);
1404 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1406 else
1407 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1408 break;
1410 case CLRSB:
1411 arg0 &= GET_MODE_MASK (mode);
1412 if (arg0 == 0)
1413 val = GET_MODE_PRECISION (mode) - 1;
1414 else if (arg0 >= 0)
1415 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1416 else if (arg0 < 0)
1417 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1418 break;
1420 case CTZ:
1421 arg0 &= GET_MODE_MASK (mode);
1422 if (arg0 == 0)
1424 /* Even if the value at zero is undefined, we have to come
1425 up with some replacement. Seems good enough. */
1426 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1427 val = GET_MODE_PRECISION (mode);
1429 else
1430 val = ctz_hwi (arg0);
1431 break;
1433 case POPCOUNT:
1434 arg0 &= GET_MODE_MASK (mode);
1435 val = 0;
1436 while (arg0)
1437 val++, arg0 &= arg0 - 1;
1438 break;
1440 case PARITY:
1441 arg0 &= GET_MODE_MASK (mode);
1442 val = 0;
1443 while (arg0)
1444 val++, arg0 &= arg0 - 1;
1445 val &= 1;
1446 break;
1448 case BSWAP:
1450 unsigned int s;
1452 val = 0;
1453 for (s = 0; s < width; s += 8)
1455 unsigned int d = width - s - 8;
1456 unsigned HOST_WIDE_INT byte;
1457 byte = (arg0 >> s) & 0xff;
1458 val |= byte << d;
1461 break;
1463 case TRUNCATE:
1464 val = arg0;
1465 break;
1467 case ZERO_EXTEND:
1468 /* When zero-extending a CONST_INT, we need to know its
1469 original mode. */
1470 gcc_assert (op_mode != VOIDmode);
1471 if (op_width == HOST_BITS_PER_WIDE_INT)
1473 /* If we were really extending the mode,
1474 we would have to distinguish between zero-extension
1475 and sign-extension. */
1476 gcc_assert (width == op_width);
1477 val = arg0;
1479 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1480 val = arg0 & GET_MODE_MASK (op_mode);
1481 else
1482 return 0;
1483 break;
1485 case SIGN_EXTEND:
1486 if (op_mode == VOIDmode)
1487 op_mode = mode;
1488 op_width = GET_MODE_PRECISION (op_mode);
1489 if (op_width == HOST_BITS_PER_WIDE_INT)
1491 /* If we were really extending the mode,
1492 we would have to distinguish between zero-extension
1493 and sign-extension. */
1494 gcc_assert (width == op_width);
1495 val = arg0;
1497 else if (op_width < HOST_BITS_PER_WIDE_INT)
1499 val = arg0 & GET_MODE_MASK (op_mode);
1500 if (val_signbit_known_set_p (op_mode, val))
1501 val |= ~GET_MODE_MASK (op_mode);
1503 else
1504 return 0;
1505 break;
1507 case SQRT:
1508 case FLOAT_EXTEND:
1509 case FLOAT_TRUNCATE:
1510 case SS_TRUNCATE:
1511 case US_TRUNCATE:
1512 case SS_NEG:
1513 case US_NEG:
1514 case SS_ABS:
1515 return 0;
1517 default:
1518 gcc_unreachable ();
1521 return gen_int_mode (val, mode);
1524 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1525 for a DImode operation on a CONST_INT. */
1526 else if (width <= HOST_BITS_PER_DOUBLE_INT
1527 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1529 double_int first, value;
1531 if (CONST_DOUBLE_AS_INT_P (op))
1532 first = double_int::from_pair (CONST_DOUBLE_HIGH (op),
1533 CONST_DOUBLE_LOW (op));
1534 else
1535 first = double_int::from_shwi (INTVAL (op));
1537 switch (code)
1539 case NOT:
1540 value = ~first;
1541 break;
1543 case NEG:
1544 value = -first;
1545 break;
1547 case ABS:
1548 if (first.is_negative ())
1549 value = -first;
1550 else
1551 value = first;
1552 break;
1554 case FFS:
1555 value.high = 0;
1556 if (first.low != 0)
1557 value.low = ffs_hwi (first.low);
1558 else if (first.high != 0)
1559 value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high);
1560 else
1561 value.low = 0;
1562 break;
1564 case CLZ:
1565 value.high = 0;
1566 if (first.high != 0)
1567 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1
1568 - HOST_BITS_PER_WIDE_INT;
1569 else if (first.low != 0)
1570 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1;
1571 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1572 value.low = GET_MODE_PRECISION (mode);
1573 break;
1575 case CTZ:
1576 value.high = 0;
1577 if (first.low != 0)
1578 value.low = ctz_hwi (first.low);
1579 else if (first.high != 0)
1580 value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high);
1581 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1582 value.low = GET_MODE_PRECISION (mode);
1583 break;
1585 case POPCOUNT:
1586 value = double_int_zero;
1587 while (first.low)
1589 value.low++;
1590 first.low &= first.low - 1;
1592 while (first.high)
1594 value.low++;
1595 first.high &= first.high - 1;
1597 break;
1599 case PARITY:
1600 value = double_int_zero;
1601 while (first.low)
1603 value.low++;
1604 first.low &= first.low - 1;
1606 while (first.high)
1608 value.low++;
1609 first.high &= first.high - 1;
1611 value.low &= 1;
1612 break;
1614 case BSWAP:
1616 unsigned int s;
1618 value = double_int_zero;
1619 for (s = 0; s < width; s += 8)
1621 unsigned int d = width - s - 8;
1622 unsigned HOST_WIDE_INT byte;
1624 if (s < HOST_BITS_PER_WIDE_INT)
1625 byte = (first.low >> s) & 0xff;
1626 else
1627 byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1629 if (d < HOST_BITS_PER_WIDE_INT)
1630 value.low |= byte << d;
1631 else
1632 value.high |= byte << (d - HOST_BITS_PER_WIDE_INT);
1635 break;
1637 case TRUNCATE:
1638 /* This is just a change-of-mode, so do nothing. */
1639 value = first;
1640 break;
1642 case ZERO_EXTEND:
1643 gcc_assert (op_mode != VOIDmode);
1645 if (op_width > HOST_BITS_PER_WIDE_INT)
1646 return 0;
1648 value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode));
1649 break;
1651 case SIGN_EXTEND:
1652 if (op_mode == VOIDmode
1653 || op_width > HOST_BITS_PER_WIDE_INT)
1654 return 0;
1655 else
1657 value.low = first.low & GET_MODE_MASK (op_mode);
1658 if (val_signbit_known_set_p (op_mode, value.low))
1659 value.low |= ~GET_MODE_MASK (op_mode);
1661 value.high = HWI_SIGN_EXTEND (value.low);
1663 break;
1665 case SQRT:
1666 return 0;
1668 default:
1669 return 0;
1672 return immed_double_int_const (value, mode);
1675 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1676 && SCALAR_FLOAT_MODE_P (mode)
1677 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1679 REAL_VALUE_TYPE d, t;
1680 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1682 switch (code)
1684 case SQRT:
1685 if (HONOR_SNANS (mode) && real_isnan (&d))
1686 return 0;
1687 real_sqrt (&t, mode, &d);
1688 d = t;
1689 break;
1690 case ABS:
1691 d = real_value_abs (&d);
1692 break;
1693 case NEG:
1694 d = real_value_negate (&d);
1695 break;
1696 case FLOAT_TRUNCATE:
1697 d = real_value_truncate (mode, d);
1698 break;
1699 case FLOAT_EXTEND:
1700 /* All this does is change the mode, unless changing
1701 mode class. */
1702 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1703 real_convert (&d, mode, &d);
1704 break;
1705 case FIX:
1706 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1707 break;
1708 case NOT:
1710 long tmp[4];
1711 int i;
1713 real_to_target (tmp, &d, GET_MODE (op));
1714 for (i = 0; i < 4; i++)
1715 tmp[i] = ~tmp[i];
1716 real_from_target (&d, tmp, mode);
1717 break;
1719 default:
1720 gcc_unreachable ();
1722 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1725 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1726 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1727 && GET_MODE_CLASS (mode) == MODE_INT
1728 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1730 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1731 operators are intentionally left unspecified (to ease implementation
1732 by target backends), for consistency, this routine implements the
1733 same semantics for constant folding as used by the middle-end. */
1735 /* This was formerly used only for non-IEEE float.
1736 eggert@twinsun.com says it is safe for IEEE also. */
1737 HOST_WIDE_INT xh, xl, th, tl;
1738 REAL_VALUE_TYPE x, t;
1739 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1740 switch (code)
1742 case FIX:
1743 if (REAL_VALUE_ISNAN (x))
1744 return const0_rtx;
1746 /* Test against the signed upper bound. */
1747 if (width > HOST_BITS_PER_WIDE_INT)
1749 th = ((unsigned HOST_WIDE_INT) 1
1750 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1751 tl = -1;
1753 else
1755 th = 0;
1756 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1758 real_from_integer (&t, VOIDmode, tl, th, 0);
1759 if (REAL_VALUES_LESS (t, x))
1761 xh = th;
1762 xl = tl;
1763 break;
1766 /* Test against the signed lower bound. */
1767 if (width > HOST_BITS_PER_WIDE_INT)
1769 th = (unsigned HOST_WIDE_INT) (-1)
1770 << (width - HOST_BITS_PER_WIDE_INT - 1);
1771 tl = 0;
1773 else
1775 th = -1;
1776 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1778 real_from_integer (&t, VOIDmode, tl, th, 0);
1779 if (REAL_VALUES_LESS (x, t))
1781 xh = th;
1782 xl = tl;
1783 break;
1785 REAL_VALUE_TO_INT (&xl, &xh, x);
1786 break;
1788 case UNSIGNED_FIX:
1789 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1790 return const0_rtx;
1792 /* Test against the unsigned upper bound. */
1793 if (width == HOST_BITS_PER_DOUBLE_INT)
1795 th = -1;
1796 tl = -1;
1798 else if (width >= HOST_BITS_PER_WIDE_INT)
1800 th = ((unsigned HOST_WIDE_INT) 1
1801 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1802 tl = -1;
1804 else
1806 th = 0;
1807 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1809 real_from_integer (&t, VOIDmode, tl, th, 1);
1810 if (REAL_VALUES_LESS (t, x))
1812 xh = th;
1813 xl = tl;
1814 break;
1817 REAL_VALUE_TO_INT (&xl, &xh, x);
1818 break;
1820 default:
1821 gcc_unreachable ();
1823 return immed_double_const (xl, xh, mode);
1826 return NULL_RTX;
1829 /* Subroutine of simplify_binary_operation to simplify a commutative,
1830 associative binary operation CODE with result mode MODE, operating
1831 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1832 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1833 canonicalization is possible. */
1835 static rtx
1836 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1837 rtx op0, rtx op1)
1839 rtx tem;
1841 /* Linearize the operator to the left. */
1842 if (GET_CODE (op1) == code)
1844 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1845 if (GET_CODE (op0) == code)
1847 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1848 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1851 /* "a op (b op c)" becomes "(b op c) op a". */
1852 if (! swap_commutative_operands_p (op1, op0))
1853 return simplify_gen_binary (code, mode, op1, op0);
1855 tem = op0;
1856 op0 = op1;
1857 op1 = tem;
1860 if (GET_CODE (op0) == code)
1862 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1863 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1865 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1866 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1869 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1870 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1871 if (tem != 0)
1872 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1874 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1875 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1876 if (tem != 0)
1877 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1880 return 0;
1884 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1885 and OP1. Return 0 if no simplification is possible.
1887 Don't use this for relational operations such as EQ or LT.
1888 Use simplify_relational_operation instead. */
1890 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1891 rtx op0, rtx op1)
1893 rtx trueop0, trueop1;
1894 rtx tem;
1896 /* Relational operations don't work here. We must know the mode
1897 of the operands in order to do the comparison correctly.
1898 Assuming a full word can give incorrect results.
1899 Consider comparing 128 with -128 in QImode. */
1900 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1901 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1903 /* Make sure the constant is second. */
1904 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1905 && swap_commutative_operands_p (op0, op1))
1907 tem = op0, op0 = op1, op1 = tem;
1910 trueop0 = avoid_constant_pool_reference (op0);
1911 trueop1 = avoid_constant_pool_reference (op1);
1913 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1914 if (tem)
1915 return tem;
1916 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1919 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1920 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1921 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1922 actual constants. */
1924 static rtx
1925 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1926 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1928 rtx tem, reversed, opleft, opright;
1929 HOST_WIDE_INT val;
1930 unsigned int width = GET_MODE_PRECISION (mode);
1932 /* Even if we can't compute a constant result,
1933 there are some cases worth simplifying. */
1935 switch (code)
1937 case PLUS:
1938 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1939 when x is NaN, infinite, or finite and nonzero. They aren't
1940 when x is -0 and the rounding mode is not towards -infinity,
1941 since (-0) + 0 is then 0. */
1942 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1943 return op0;
1945 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1946 transformations are safe even for IEEE. */
1947 if (GET_CODE (op0) == NEG)
1948 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1949 else if (GET_CODE (op1) == NEG)
1950 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1952 /* (~a) + 1 -> -a */
1953 if (INTEGRAL_MODE_P (mode)
1954 && GET_CODE (op0) == NOT
1955 && trueop1 == const1_rtx)
1956 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1958 /* Handle both-operands-constant cases. We can only add
1959 CONST_INTs to constants since the sum of relocatable symbols
1960 can't be handled by most assemblers. Don't add CONST_INT
1961 to CONST_INT since overflow won't be computed properly if wider
1962 than HOST_BITS_PER_WIDE_INT. */
1964 if ((GET_CODE (op0) == CONST
1965 || GET_CODE (op0) == SYMBOL_REF
1966 || GET_CODE (op0) == LABEL_REF)
1967 && CONST_INT_P (op1))
1968 return plus_constant (mode, op0, INTVAL (op1));
1969 else if ((GET_CODE (op1) == CONST
1970 || GET_CODE (op1) == SYMBOL_REF
1971 || GET_CODE (op1) == LABEL_REF)
1972 && CONST_INT_P (op0))
1973 return plus_constant (mode, op1, INTVAL (op0));
1975 /* See if this is something like X * C - X or vice versa or
1976 if the multiplication is written as a shift. If so, we can
1977 distribute and make a new multiply, shift, or maybe just
1978 have X (if C is 2 in the example above). But don't make
1979 something more expensive than we had before. */
1981 if (SCALAR_INT_MODE_P (mode))
1983 double_int coeff0, coeff1;
1984 rtx lhs = op0, rhs = op1;
1986 coeff0 = double_int_one;
1987 coeff1 = double_int_one;
1989 if (GET_CODE (lhs) == NEG)
1991 coeff0 = double_int_minus_one;
1992 lhs = XEXP (lhs, 0);
1994 else if (GET_CODE (lhs) == MULT
1995 && CONST_INT_P (XEXP (lhs, 1)))
1997 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
1998 lhs = XEXP (lhs, 0);
2000 else if (GET_CODE (lhs) == ASHIFT
2001 && CONST_INT_P (XEXP (lhs, 1))
2002 && INTVAL (XEXP (lhs, 1)) >= 0
2003 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2005 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2006 lhs = XEXP (lhs, 0);
2009 if (GET_CODE (rhs) == NEG)
2011 coeff1 = double_int_minus_one;
2012 rhs = XEXP (rhs, 0);
2014 else if (GET_CODE (rhs) == MULT
2015 && CONST_INT_P (XEXP (rhs, 1)))
2017 coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
2018 rhs = XEXP (rhs, 0);
2020 else if (GET_CODE (rhs) == ASHIFT
2021 && CONST_INT_P (XEXP (rhs, 1))
2022 && INTVAL (XEXP (rhs, 1)) >= 0
2023 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2025 coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2026 rhs = XEXP (rhs, 0);
2029 if (rtx_equal_p (lhs, rhs))
2031 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2032 rtx coeff;
2033 double_int val;
2034 bool speed = optimize_function_for_speed_p (cfun);
2036 val = coeff0 + coeff1;
2037 coeff = immed_double_int_const (val, mode);
2039 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2040 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2041 ? tem : 0;
2045 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2046 if ((CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
2047 && GET_CODE (op0) == XOR
2048 && (CONST_INT_P (XEXP (op0, 1))
2049 || CONST_DOUBLE_AS_INT_P (XEXP (op0, 1)))
2050 && mode_signbit_p (mode, op1))
2051 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2052 simplify_gen_binary (XOR, mode, op1,
2053 XEXP (op0, 1)));
2055 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2056 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2057 && GET_CODE (op0) == MULT
2058 && GET_CODE (XEXP (op0, 0)) == NEG)
2060 rtx in1, in2;
2062 in1 = XEXP (XEXP (op0, 0), 0);
2063 in2 = XEXP (op0, 1);
2064 return simplify_gen_binary (MINUS, mode, op1,
2065 simplify_gen_binary (MULT, mode,
2066 in1, in2));
2069 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2070 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2071 is 1. */
2072 if (COMPARISON_P (op0)
2073 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2074 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2075 && (reversed = reversed_comparison (op0, mode)))
2076 return
2077 simplify_gen_unary (NEG, mode, reversed, mode);
2079 /* If one of the operands is a PLUS or a MINUS, see if we can
2080 simplify this by the associative law.
2081 Don't use the associative law for floating point.
2082 The inaccuracy makes it nonassociative,
2083 and subtle programs can break if operations are associated. */
2085 if (INTEGRAL_MODE_P (mode)
2086 && (plus_minus_operand_p (op0)
2087 || plus_minus_operand_p (op1))
2088 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2089 return tem;
2091 /* Reassociate floating point addition only when the user
2092 specifies associative math operations. */
2093 if (FLOAT_MODE_P (mode)
2094 && flag_associative_math)
2096 tem = simplify_associative_operation (code, mode, op0, op1);
2097 if (tem)
2098 return tem;
2100 break;
2102 case COMPARE:
2103 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2104 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2105 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2106 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2108 rtx xop00 = XEXP (op0, 0);
2109 rtx xop10 = XEXP (op1, 0);
2111 #ifdef HAVE_cc0
2112 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2113 #else
2114 if (REG_P (xop00) && REG_P (xop10)
2115 && GET_MODE (xop00) == GET_MODE (xop10)
2116 && REGNO (xop00) == REGNO (xop10)
2117 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2118 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2119 #endif
2120 return xop00;
2122 break;
2124 case MINUS:
2125 /* We can't assume x-x is 0 even with non-IEEE floating point,
2126 but since it is zero except in very strange circumstances, we
2127 will treat it as zero with -ffinite-math-only. */
2128 if (rtx_equal_p (trueop0, trueop1)
2129 && ! side_effects_p (op0)
2130 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2131 return CONST0_RTX (mode);
2133 /* Change subtraction from zero into negation. (0 - x) is the
2134 same as -x when x is NaN, infinite, or finite and nonzero.
2135 But if the mode has signed zeros, and does not round towards
2136 -infinity, then 0 - 0 is 0, not -0. */
2137 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2138 return simplify_gen_unary (NEG, mode, op1, mode);
2140 /* (-1 - a) is ~a. */
2141 if (trueop0 == constm1_rtx)
2142 return simplify_gen_unary (NOT, mode, op1, mode);
2144 /* Subtracting 0 has no effect unless the mode has signed zeros
2145 and supports rounding towards -infinity. In such a case,
2146 0 - 0 is -0. */
2147 if (!(HONOR_SIGNED_ZEROS (mode)
2148 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2149 && trueop1 == CONST0_RTX (mode))
2150 return op0;
2152 /* See if this is something like X * C - X or vice versa or
2153 if the multiplication is written as a shift. If so, we can
2154 distribute and make a new multiply, shift, or maybe just
2155 have X (if C is 2 in the example above). But don't make
2156 something more expensive than we had before. */
2158 if (SCALAR_INT_MODE_P (mode))
2160 double_int coeff0, negcoeff1;
2161 rtx lhs = op0, rhs = op1;
2163 coeff0 = double_int_one;
2164 negcoeff1 = double_int_minus_one;
2166 if (GET_CODE (lhs) == NEG)
2168 coeff0 = double_int_minus_one;
2169 lhs = XEXP (lhs, 0);
2171 else if (GET_CODE (lhs) == MULT
2172 && CONST_INT_P (XEXP (lhs, 1)))
2174 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2175 lhs = XEXP (lhs, 0);
2177 else if (GET_CODE (lhs) == ASHIFT
2178 && CONST_INT_P (XEXP (lhs, 1))
2179 && INTVAL (XEXP (lhs, 1)) >= 0
2180 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2182 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2183 lhs = XEXP (lhs, 0);
2186 if (GET_CODE (rhs) == NEG)
2188 negcoeff1 = double_int_one;
2189 rhs = XEXP (rhs, 0);
2191 else if (GET_CODE (rhs) == MULT
2192 && CONST_INT_P (XEXP (rhs, 1)))
2194 negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
2195 rhs = XEXP (rhs, 0);
2197 else if (GET_CODE (rhs) == ASHIFT
2198 && CONST_INT_P (XEXP (rhs, 1))
2199 && INTVAL (XEXP (rhs, 1)) >= 0
2200 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2202 negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2203 negcoeff1 = -negcoeff1;
2204 rhs = XEXP (rhs, 0);
2207 if (rtx_equal_p (lhs, rhs))
2209 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2210 rtx coeff;
2211 double_int val;
2212 bool speed = optimize_function_for_speed_p (cfun);
2214 val = coeff0 + negcoeff1;
2215 coeff = immed_double_int_const (val, mode);
2217 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2218 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2219 ? tem : 0;
2223 /* (a - (-b)) -> (a + b). True even for IEEE. */
2224 if (GET_CODE (op1) == NEG)
2225 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2227 /* (-x - c) may be simplified as (-c - x). */
2228 if (GET_CODE (op0) == NEG
2229 && (CONST_INT_P (op1) || CONST_DOUBLE_P (op1)))
2231 tem = simplify_unary_operation (NEG, mode, op1, mode);
2232 if (tem)
2233 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2236 /* Don't let a relocatable value get a negative coeff. */
2237 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2238 return simplify_gen_binary (PLUS, mode,
2239 op0,
2240 neg_const_int (mode, op1));
2242 /* (x - (x & y)) -> (x & ~y) */
2243 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2245 if (rtx_equal_p (op0, XEXP (op1, 0)))
2247 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2248 GET_MODE (XEXP (op1, 1)));
2249 return simplify_gen_binary (AND, mode, op0, tem);
2251 if (rtx_equal_p (op0, XEXP (op1, 1)))
2253 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2254 GET_MODE (XEXP (op1, 0)));
2255 return simplify_gen_binary (AND, mode, op0, tem);
2259 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2260 by reversing the comparison code if valid. */
2261 if (STORE_FLAG_VALUE == 1
2262 && trueop0 == const1_rtx
2263 && COMPARISON_P (op1)
2264 && (reversed = reversed_comparison (op1, mode)))
2265 return reversed;
2267 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2268 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2269 && GET_CODE (op1) == MULT
2270 && GET_CODE (XEXP (op1, 0)) == NEG)
2272 rtx in1, in2;
2274 in1 = XEXP (XEXP (op1, 0), 0);
2275 in2 = XEXP (op1, 1);
2276 return simplify_gen_binary (PLUS, mode,
2277 simplify_gen_binary (MULT, mode,
2278 in1, in2),
2279 op0);
2282 /* Canonicalize (minus (neg A) (mult B C)) to
2283 (minus (mult (neg B) C) A). */
2284 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2285 && GET_CODE (op1) == MULT
2286 && GET_CODE (op0) == NEG)
2288 rtx in1, in2;
2290 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2291 in2 = XEXP (op1, 1);
2292 return simplify_gen_binary (MINUS, mode,
2293 simplify_gen_binary (MULT, mode,
2294 in1, in2),
2295 XEXP (op0, 0));
2298 /* If one of the operands is a PLUS or a MINUS, see if we can
2299 simplify this by the associative law. This will, for example,
2300 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2301 Don't use the associative law for floating point.
2302 The inaccuracy makes it nonassociative,
2303 and subtle programs can break if operations are associated. */
2305 if (INTEGRAL_MODE_P (mode)
2306 && (plus_minus_operand_p (op0)
2307 || plus_minus_operand_p (op1))
2308 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2309 return tem;
2310 break;
2312 case MULT:
2313 if (trueop1 == constm1_rtx)
2314 return simplify_gen_unary (NEG, mode, op0, mode);
2316 if (GET_CODE (op0) == NEG)
2318 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2319 /* If op1 is a MULT as well and simplify_unary_operation
2320 just moved the NEG to the second operand, simplify_gen_binary
2321 below could through simplify_associative_operation move
2322 the NEG around again and recurse endlessly. */
2323 if (temp
2324 && GET_CODE (op1) == MULT
2325 && GET_CODE (temp) == MULT
2326 && XEXP (op1, 0) == XEXP (temp, 0)
2327 && GET_CODE (XEXP (temp, 1)) == NEG
2328 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2329 temp = NULL_RTX;
2330 if (temp)
2331 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2333 if (GET_CODE (op1) == NEG)
2335 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2336 /* If op0 is a MULT as well and simplify_unary_operation
2337 just moved the NEG to the second operand, simplify_gen_binary
2338 below could through simplify_associative_operation move
2339 the NEG around again and recurse endlessly. */
2340 if (temp
2341 && GET_CODE (op0) == MULT
2342 && GET_CODE (temp) == MULT
2343 && XEXP (op0, 0) == XEXP (temp, 0)
2344 && GET_CODE (XEXP (temp, 1)) == NEG
2345 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2346 temp = NULL_RTX;
2347 if (temp)
2348 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2351 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2352 x is NaN, since x * 0 is then also NaN. Nor is it valid
2353 when the mode has signed zeros, since multiplying a negative
2354 number by 0 will give -0, not 0. */
2355 if (!HONOR_NANS (mode)
2356 && !HONOR_SIGNED_ZEROS (mode)
2357 && trueop1 == CONST0_RTX (mode)
2358 && ! side_effects_p (op0))
2359 return op1;
2361 /* In IEEE floating point, x*1 is not equivalent to x for
2362 signalling NaNs. */
2363 if (!HONOR_SNANS (mode)
2364 && trueop1 == CONST1_RTX (mode))
2365 return op0;
2367 /* Convert multiply by constant power of two into shift unless
2368 we are still generating RTL. This test is a kludge. */
2369 if (CONST_INT_P (trueop1)
2370 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2371 /* If the mode is larger than the host word size, and the
2372 uppermost bit is set, then this isn't a power of two due
2373 to implicit sign extension. */
2374 && (width <= HOST_BITS_PER_WIDE_INT
2375 || val != HOST_BITS_PER_WIDE_INT - 1))
2376 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2378 /* Likewise for multipliers wider than a word. */
2379 if (CONST_DOUBLE_AS_INT_P (trueop1)
2380 && GET_MODE (op0) == mode
2381 && CONST_DOUBLE_LOW (trueop1) == 0
2382 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2383 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2384 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2385 return simplify_gen_binary (ASHIFT, mode, op0,
2386 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2388 /* x*2 is x+x and x*(-1) is -x */
2389 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2390 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2391 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2392 && GET_MODE (op0) == mode)
2394 REAL_VALUE_TYPE d;
2395 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2397 if (REAL_VALUES_EQUAL (d, dconst2))
2398 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2400 if (!HONOR_SNANS (mode)
2401 && REAL_VALUES_EQUAL (d, dconstm1))
2402 return simplify_gen_unary (NEG, mode, op0, mode);
2405 /* Optimize -x * -x as x * x. */
2406 if (FLOAT_MODE_P (mode)
2407 && GET_CODE (op0) == NEG
2408 && GET_CODE (op1) == NEG
2409 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2410 && !side_effects_p (XEXP (op0, 0)))
2411 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2413 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2414 if (SCALAR_FLOAT_MODE_P (mode)
2415 && GET_CODE (op0) == ABS
2416 && GET_CODE (op1) == ABS
2417 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2418 && !side_effects_p (XEXP (op0, 0)))
2419 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2421 /* Reassociate multiplication, but for floating point MULTs
2422 only when the user specifies unsafe math optimizations. */
2423 if (! FLOAT_MODE_P (mode)
2424 || flag_unsafe_math_optimizations)
2426 tem = simplify_associative_operation (code, mode, op0, op1);
2427 if (tem)
2428 return tem;
2430 break;
2432 case IOR:
2433 if (trueop1 == CONST0_RTX (mode))
2434 return op0;
2435 if (INTEGRAL_MODE_P (mode)
2436 && trueop1 == CONSTM1_RTX (mode)
2437 && !side_effects_p (op0))
2438 return op1;
2439 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2440 return op0;
2441 /* A | (~A) -> -1 */
2442 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2443 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2444 && ! side_effects_p (op0)
2445 && SCALAR_INT_MODE_P (mode))
2446 return constm1_rtx;
2448 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2449 if (CONST_INT_P (op1)
2450 && HWI_COMPUTABLE_MODE_P (mode)
2451 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2452 && !side_effects_p (op0))
2453 return op1;
2455 /* Canonicalize (X & C1) | C2. */
2456 if (GET_CODE (op0) == AND
2457 && CONST_INT_P (trueop1)
2458 && CONST_INT_P (XEXP (op0, 1)))
2460 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2461 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2462 HOST_WIDE_INT c2 = INTVAL (trueop1);
2464 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2465 if ((c1 & c2) == c1
2466 && !side_effects_p (XEXP (op0, 0)))
2467 return trueop1;
2469 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2470 if (((c1|c2) & mask) == mask)
2471 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2473 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2474 if (((c1 & ~c2) & mask) != (c1 & mask))
2476 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2477 gen_int_mode (c1 & ~c2, mode));
2478 return simplify_gen_binary (IOR, mode, tem, op1);
2482 /* Convert (A & B) | A to A. */
2483 if (GET_CODE (op0) == AND
2484 && (rtx_equal_p (XEXP (op0, 0), op1)
2485 || rtx_equal_p (XEXP (op0, 1), op1))
2486 && ! side_effects_p (XEXP (op0, 0))
2487 && ! side_effects_p (XEXP (op0, 1)))
2488 return op1;
2490 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2491 mode size to (rotate A CX). */
2493 if (GET_CODE (op1) == ASHIFT
2494 || GET_CODE (op1) == SUBREG)
2496 opleft = op1;
2497 opright = op0;
2499 else
2501 opright = op1;
2502 opleft = op0;
2505 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2506 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2507 && CONST_INT_P (XEXP (opleft, 1))
2508 && CONST_INT_P (XEXP (opright, 1))
2509 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2510 == GET_MODE_PRECISION (mode)))
2511 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2513 /* Same, but for ashift that has been "simplified" to a wider mode
2514 by simplify_shift_const. */
2516 if (GET_CODE (opleft) == SUBREG
2517 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2518 && GET_CODE (opright) == LSHIFTRT
2519 && GET_CODE (XEXP (opright, 0)) == SUBREG
2520 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2521 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2522 && (GET_MODE_SIZE (GET_MODE (opleft))
2523 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2524 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2525 SUBREG_REG (XEXP (opright, 0)))
2526 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2527 && CONST_INT_P (XEXP (opright, 1))
2528 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2529 == GET_MODE_PRECISION (mode)))
2530 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2531 XEXP (SUBREG_REG (opleft), 1));
2533 /* If we have (ior (and (X C1) C2)), simplify this by making
2534 C1 as small as possible if C1 actually changes. */
2535 if (CONST_INT_P (op1)
2536 && (HWI_COMPUTABLE_MODE_P (mode)
2537 || INTVAL (op1) > 0)
2538 && GET_CODE (op0) == AND
2539 && CONST_INT_P (XEXP (op0, 1))
2540 && CONST_INT_P (op1)
2541 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2542 return simplify_gen_binary (IOR, mode,
2543 simplify_gen_binary
2544 (AND, mode, XEXP (op0, 0),
2545 GEN_INT (UINTVAL (XEXP (op0, 1))
2546 & ~UINTVAL (op1))),
2547 op1);
2549 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2550 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2551 the PLUS does not affect any of the bits in OP1: then we can do
2552 the IOR as a PLUS and we can associate. This is valid if OP1
2553 can be safely shifted left C bits. */
2554 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2555 && GET_CODE (XEXP (op0, 0)) == PLUS
2556 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2557 && CONST_INT_P (XEXP (op0, 1))
2558 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2560 int count = INTVAL (XEXP (op0, 1));
2561 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2563 if (mask >> count == INTVAL (trueop1)
2564 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2565 return simplify_gen_binary (ASHIFTRT, mode,
2566 plus_constant (mode, XEXP (op0, 0),
2567 mask),
2568 XEXP (op0, 1));
2571 tem = simplify_associative_operation (code, mode, op0, op1);
2572 if (tem)
2573 return tem;
2574 break;
2576 case XOR:
2577 if (trueop1 == CONST0_RTX (mode))
2578 return op0;
2579 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2580 return simplify_gen_unary (NOT, mode, op0, mode);
2581 if (rtx_equal_p (trueop0, trueop1)
2582 && ! side_effects_p (op0)
2583 && GET_MODE_CLASS (mode) != MODE_CC)
2584 return CONST0_RTX (mode);
2586 /* Canonicalize XOR of the most significant bit to PLUS. */
2587 if ((CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
2588 && mode_signbit_p (mode, op1))
2589 return simplify_gen_binary (PLUS, mode, op0, op1);
2590 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2591 if ((CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
2592 && GET_CODE (op0) == PLUS
2593 && (CONST_INT_P (XEXP (op0, 1))
2594 || CONST_DOUBLE_AS_INT_P (XEXP (op0, 1)))
2595 && mode_signbit_p (mode, XEXP (op0, 1)))
2596 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2597 simplify_gen_binary (XOR, mode, op1,
2598 XEXP (op0, 1)));
2600 /* If we are XORing two things that have no bits in common,
2601 convert them into an IOR. This helps to detect rotation encoded
2602 using those methods and possibly other simplifications. */
2604 if (HWI_COMPUTABLE_MODE_P (mode)
2605 && (nonzero_bits (op0, mode)
2606 & nonzero_bits (op1, mode)) == 0)
2607 return (simplify_gen_binary (IOR, mode, op0, op1));
2609 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2610 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2611 (NOT y). */
2613 int num_negated = 0;
2615 if (GET_CODE (op0) == NOT)
2616 num_negated++, op0 = XEXP (op0, 0);
2617 if (GET_CODE (op1) == NOT)
2618 num_negated++, op1 = XEXP (op1, 0);
2620 if (num_negated == 2)
2621 return simplify_gen_binary (XOR, mode, op0, op1);
2622 else if (num_negated == 1)
2623 return simplify_gen_unary (NOT, mode,
2624 simplify_gen_binary (XOR, mode, op0, op1),
2625 mode);
2628 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2629 correspond to a machine insn or result in further simplifications
2630 if B is a constant. */
2632 if (GET_CODE (op0) == AND
2633 && rtx_equal_p (XEXP (op0, 1), op1)
2634 && ! side_effects_p (op1))
2635 return simplify_gen_binary (AND, mode,
2636 simplify_gen_unary (NOT, mode,
2637 XEXP (op0, 0), mode),
2638 op1);
2640 else if (GET_CODE (op0) == AND
2641 && rtx_equal_p (XEXP (op0, 0), op1)
2642 && ! side_effects_p (op1))
2643 return simplify_gen_binary (AND, mode,
2644 simplify_gen_unary (NOT, mode,
2645 XEXP (op0, 1), mode),
2646 op1);
2648 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2649 we can transform like this:
2650 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2651 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2652 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2653 Attempt a few simplifications when B and C are both constants. */
2654 if (GET_CODE (op0) == AND
2655 && CONST_INT_P (op1)
2656 && CONST_INT_P (XEXP (op0, 1)))
2658 rtx a = XEXP (op0, 0);
2659 rtx b = XEXP (op0, 1);
2660 rtx c = op1;
2661 HOST_WIDE_INT bval = INTVAL (b);
2662 HOST_WIDE_INT cval = INTVAL (c);
2664 rtx na_c
2665 = simplify_binary_operation (AND, mode,
2666 simplify_gen_unary (NOT, mode, a, mode),
2668 if ((~cval & bval) == 0)
2670 /* Try to simplify ~A&C | ~B&C. */
2671 if (na_c != NULL_RTX)
2672 return simplify_gen_binary (IOR, mode, na_c,
2673 GEN_INT (~bval & cval));
2675 else
2677 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2678 if (na_c == const0_rtx)
2680 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2681 GEN_INT (~cval & bval));
2682 return simplify_gen_binary (IOR, mode, a_nc_b,
2683 GEN_INT (~bval & cval));
2688 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2689 comparison if STORE_FLAG_VALUE is 1. */
2690 if (STORE_FLAG_VALUE == 1
2691 && trueop1 == const1_rtx
2692 && COMPARISON_P (op0)
2693 && (reversed = reversed_comparison (op0, mode)))
2694 return reversed;
2696 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2697 is (lt foo (const_int 0)), so we can perform the above
2698 simplification if STORE_FLAG_VALUE is 1. */
2700 if (STORE_FLAG_VALUE == 1
2701 && trueop1 == const1_rtx
2702 && GET_CODE (op0) == LSHIFTRT
2703 && CONST_INT_P (XEXP (op0, 1))
2704 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2705 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2707 /* (xor (comparison foo bar) (const_int sign-bit))
2708 when STORE_FLAG_VALUE is the sign bit. */
2709 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2710 && trueop1 == const_true_rtx
2711 && COMPARISON_P (op0)
2712 && (reversed = reversed_comparison (op0, mode)))
2713 return reversed;
2715 tem = simplify_associative_operation (code, mode, op0, op1);
2716 if (tem)
2717 return tem;
2718 break;
2720 case AND:
2721 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2722 return trueop1;
2723 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2724 return op0;
2725 if (HWI_COMPUTABLE_MODE_P (mode))
2727 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2728 HOST_WIDE_INT nzop1;
2729 if (CONST_INT_P (trueop1))
2731 HOST_WIDE_INT val1 = INTVAL (trueop1);
2732 /* If we are turning off bits already known off in OP0, we need
2733 not do an AND. */
2734 if ((nzop0 & ~val1) == 0)
2735 return op0;
2737 nzop1 = nonzero_bits (trueop1, mode);
2738 /* If we are clearing all the nonzero bits, the result is zero. */
2739 if ((nzop1 & nzop0) == 0
2740 && !side_effects_p (op0) && !side_effects_p (op1))
2741 return CONST0_RTX (mode);
2743 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2744 && GET_MODE_CLASS (mode) != MODE_CC)
2745 return op0;
2746 /* A & (~A) -> 0 */
2747 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2748 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2749 && ! side_effects_p (op0)
2750 && GET_MODE_CLASS (mode) != MODE_CC)
2751 return CONST0_RTX (mode);
2753 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2754 there are no nonzero bits of C outside of X's mode. */
2755 if ((GET_CODE (op0) == SIGN_EXTEND
2756 || GET_CODE (op0) == ZERO_EXTEND)
2757 && CONST_INT_P (trueop1)
2758 && HWI_COMPUTABLE_MODE_P (mode)
2759 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2760 & UINTVAL (trueop1)) == 0)
2762 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2763 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2764 gen_int_mode (INTVAL (trueop1),
2765 imode));
2766 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2769 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2770 we might be able to further simplify the AND with X and potentially
2771 remove the truncation altogether. */
2772 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2774 rtx x = XEXP (op0, 0);
2775 enum machine_mode xmode = GET_MODE (x);
2776 tem = simplify_gen_binary (AND, xmode, x,
2777 gen_int_mode (INTVAL (trueop1), xmode));
2778 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2781 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2782 if (GET_CODE (op0) == IOR
2783 && CONST_INT_P (trueop1)
2784 && CONST_INT_P (XEXP (op0, 1)))
2786 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2787 return simplify_gen_binary (IOR, mode,
2788 simplify_gen_binary (AND, mode,
2789 XEXP (op0, 0), op1),
2790 gen_int_mode (tmp, mode));
2793 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2794 insn (and may simplify more). */
2795 if (GET_CODE (op0) == XOR
2796 && rtx_equal_p (XEXP (op0, 0), op1)
2797 && ! side_effects_p (op1))
2798 return simplify_gen_binary (AND, mode,
2799 simplify_gen_unary (NOT, mode,
2800 XEXP (op0, 1), mode),
2801 op1);
2803 if (GET_CODE (op0) == XOR
2804 && rtx_equal_p (XEXP (op0, 1), op1)
2805 && ! side_effects_p (op1))
2806 return simplify_gen_binary (AND, mode,
2807 simplify_gen_unary (NOT, mode,
2808 XEXP (op0, 0), mode),
2809 op1);
2811 /* Similarly for (~(A ^ B)) & A. */
2812 if (GET_CODE (op0) == NOT
2813 && GET_CODE (XEXP (op0, 0)) == XOR
2814 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2815 && ! side_effects_p (op1))
2816 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2818 if (GET_CODE (op0) == NOT
2819 && GET_CODE (XEXP (op0, 0)) == XOR
2820 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2821 && ! side_effects_p (op1))
2822 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2824 /* Convert (A | B) & A to A. */
2825 if (GET_CODE (op0) == IOR
2826 && (rtx_equal_p (XEXP (op0, 0), op1)
2827 || rtx_equal_p (XEXP (op0, 1), op1))
2828 && ! side_effects_p (XEXP (op0, 0))
2829 && ! side_effects_p (XEXP (op0, 1)))
2830 return op1;
2832 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2833 ((A & N) + B) & M -> (A + B) & M
2834 Similarly if (N & M) == 0,
2835 ((A | N) + B) & M -> (A + B) & M
2836 and for - instead of + and/or ^ instead of |.
2837 Also, if (N & M) == 0, then
2838 (A +- N) & M -> A & M. */
2839 if (CONST_INT_P (trueop1)
2840 && HWI_COMPUTABLE_MODE_P (mode)
2841 && ~UINTVAL (trueop1)
2842 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2843 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2845 rtx pmop[2];
2846 int which;
2848 pmop[0] = XEXP (op0, 0);
2849 pmop[1] = XEXP (op0, 1);
2851 if (CONST_INT_P (pmop[1])
2852 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2853 return simplify_gen_binary (AND, mode, pmop[0], op1);
2855 for (which = 0; which < 2; which++)
2857 tem = pmop[which];
2858 switch (GET_CODE (tem))
2860 case AND:
2861 if (CONST_INT_P (XEXP (tem, 1))
2862 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2863 == UINTVAL (trueop1))
2864 pmop[which] = XEXP (tem, 0);
2865 break;
2866 case IOR:
2867 case XOR:
2868 if (CONST_INT_P (XEXP (tem, 1))
2869 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2870 pmop[which] = XEXP (tem, 0);
2871 break;
2872 default:
2873 break;
2877 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2879 tem = simplify_gen_binary (GET_CODE (op0), mode,
2880 pmop[0], pmop[1]);
2881 return simplify_gen_binary (code, mode, tem, op1);
2885 /* (and X (ior (not X) Y) -> (and X Y) */
2886 if (GET_CODE (op1) == IOR
2887 && GET_CODE (XEXP (op1, 0)) == NOT
2888 && op0 == XEXP (XEXP (op1, 0), 0))
2889 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2891 /* (and (ior (not X) Y) X) -> (and X Y) */
2892 if (GET_CODE (op0) == IOR
2893 && GET_CODE (XEXP (op0, 0)) == NOT
2894 && op1 == XEXP (XEXP (op0, 0), 0))
2895 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2897 tem = simplify_associative_operation (code, mode, op0, op1);
2898 if (tem)
2899 return tem;
2900 break;
2902 case UDIV:
2903 /* 0/x is 0 (or x&0 if x has side-effects). */
2904 if (trueop0 == CONST0_RTX (mode))
2906 if (side_effects_p (op1))
2907 return simplify_gen_binary (AND, mode, op1, trueop0);
2908 return trueop0;
2910 /* x/1 is x. */
2911 if (trueop1 == CONST1_RTX (mode))
2912 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2913 /* Convert divide by power of two into shift. */
2914 if (CONST_INT_P (trueop1)
2915 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2916 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2917 break;
2919 case DIV:
2920 /* Handle floating point and integers separately. */
2921 if (SCALAR_FLOAT_MODE_P (mode))
2923 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2924 safe for modes with NaNs, since 0.0 / 0.0 will then be
2925 NaN rather than 0.0. Nor is it safe for modes with signed
2926 zeros, since dividing 0 by a negative number gives -0.0 */
2927 if (trueop0 == CONST0_RTX (mode)
2928 && !HONOR_NANS (mode)
2929 && !HONOR_SIGNED_ZEROS (mode)
2930 && ! side_effects_p (op1))
2931 return op0;
2932 /* x/1.0 is x. */
2933 if (trueop1 == CONST1_RTX (mode)
2934 && !HONOR_SNANS (mode))
2935 return op0;
2937 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2938 && trueop1 != CONST0_RTX (mode))
2940 REAL_VALUE_TYPE d;
2941 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2943 /* x/-1.0 is -x. */
2944 if (REAL_VALUES_EQUAL (d, dconstm1)
2945 && !HONOR_SNANS (mode))
2946 return simplify_gen_unary (NEG, mode, op0, mode);
2948 /* Change FP division by a constant into multiplication.
2949 Only do this with -freciprocal-math. */
2950 if (flag_reciprocal_math
2951 && !REAL_VALUES_EQUAL (d, dconst0))
2953 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2954 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2955 return simplify_gen_binary (MULT, mode, op0, tem);
2959 else if (SCALAR_INT_MODE_P (mode))
2961 /* 0/x is 0 (or x&0 if x has side-effects). */
2962 if (trueop0 == CONST0_RTX (mode)
2963 && !cfun->can_throw_non_call_exceptions)
2965 if (side_effects_p (op1))
2966 return simplify_gen_binary (AND, mode, op1, trueop0);
2967 return trueop0;
2969 /* x/1 is x. */
2970 if (trueop1 == CONST1_RTX (mode))
2971 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2972 /* x/-1 is -x. */
2973 if (trueop1 == constm1_rtx)
2975 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2976 return simplify_gen_unary (NEG, mode, x, mode);
2979 break;
2981 case UMOD:
2982 /* 0%x is 0 (or x&0 if x has side-effects). */
2983 if (trueop0 == CONST0_RTX (mode))
2985 if (side_effects_p (op1))
2986 return simplify_gen_binary (AND, mode, op1, trueop0);
2987 return trueop0;
2989 /* x%1 is 0 (of x&0 if x has side-effects). */
2990 if (trueop1 == CONST1_RTX (mode))
2992 if (side_effects_p (op0))
2993 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2994 return CONST0_RTX (mode);
2996 /* Implement modulus by power of two as AND. */
2997 if (CONST_INT_P (trueop1)
2998 && exact_log2 (UINTVAL (trueop1)) > 0)
2999 return simplify_gen_binary (AND, mode, op0,
3000 GEN_INT (INTVAL (op1) - 1));
3001 break;
3003 case MOD:
3004 /* 0%x is 0 (or x&0 if x has side-effects). */
3005 if (trueop0 == CONST0_RTX (mode))
3007 if (side_effects_p (op1))
3008 return simplify_gen_binary (AND, mode, op1, trueop0);
3009 return trueop0;
3011 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3012 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3014 if (side_effects_p (op0))
3015 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3016 return CONST0_RTX (mode);
3018 break;
3020 case ROTATERT:
3021 case ROTATE:
3022 case ASHIFTRT:
3023 if (trueop1 == CONST0_RTX (mode))
3024 return op0;
3025 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3026 return op0;
3027 /* Rotating ~0 always results in ~0. */
3028 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3029 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3030 && ! side_effects_p (op1))
3031 return op0;
3032 canonicalize_shift:
3033 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3035 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3036 if (val != INTVAL (op1))
3037 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3039 break;
3041 case ASHIFT:
3042 case SS_ASHIFT:
3043 case US_ASHIFT:
3044 if (trueop1 == CONST0_RTX (mode))
3045 return op0;
3046 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3047 return op0;
3048 goto canonicalize_shift;
3050 case LSHIFTRT:
3051 if (trueop1 == CONST0_RTX (mode))
3052 return op0;
3053 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3054 return op0;
3055 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3056 if (GET_CODE (op0) == CLZ
3057 && CONST_INT_P (trueop1)
3058 && STORE_FLAG_VALUE == 1
3059 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3061 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3062 unsigned HOST_WIDE_INT zero_val = 0;
3064 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3065 && zero_val == GET_MODE_PRECISION (imode)
3066 && INTVAL (trueop1) == exact_log2 (zero_val))
3067 return simplify_gen_relational (EQ, mode, imode,
3068 XEXP (op0, 0), const0_rtx);
3070 goto canonicalize_shift;
3072 case SMIN:
3073 if (width <= HOST_BITS_PER_WIDE_INT
3074 && mode_signbit_p (mode, trueop1)
3075 && ! side_effects_p (op0))
3076 return op1;
3077 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3078 return op0;
3079 tem = simplify_associative_operation (code, mode, op0, op1);
3080 if (tem)
3081 return tem;
3082 break;
3084 case SMAX:
3085 if (width <= HOST_BITS_PER_WIDE_INT
3086 && CONST_INT_P (trueop1)
3087 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3088 && ! side_effects_p (op0))
3089 return op1;
3090 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3091 return op0;
3092 tem = simplify_associative_operation (code, mode, op0, op1);
3093 if (tem)
3094 return tem;
3095 break;
3097 case UMIN:
3098 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3099 return op1;
3100 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3101 return op0;
3102 tem = simplify_associative_operation (code, mode, op0, op1);
3103 if (tem)
3104 return tem;
3105 break;
3107 case UMAX:
3108 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3109 return op1;
3110 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3111 return op0;
3112 tem = simplify_associative_operation (code, mode, op0, op1);
3113 if (tem)
3114 return tem;
3115 break;
3117 case SS_PLUS:
3118 case US_PLUS:
3119 case SS_MINUS:
3120 case US_MINUS:
3121 case SS_MULT:
3122 case US_MULT:
3123 case SS_DIV:
3124 case US_DIV:
3125 /* ??? There are simplifications that can be done. */
3126 return 0;
3128 case VEC_SELECT:
3129 if (!VECTOR_MODE_P (mode))
3131 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3132 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3133 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3134 gcc_assert (XVECLEN (trueop1, 0) == 1);
3135 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3137 if (GET_CODE (trueop0) == CONST_VECTOR)
3138 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3139 (trueop1, 0, 0)));
3141 /* Extract a scalar element from a nested VEC_SELECT expression
3142 (with optional nested VEC_CONCAT expression). Some targets
3143 (i386) extract scalar element from a vector using chain of
3144 nested VEC_SELECT expressions. When input operand is a memory
3145 operand, this operation can be simplified to a simple scalar
3146 load from an offseted memory address. */
3147 if (GET_CODE (trueop0) == VEC_SELECT)
3149 rtx op0 = XEXP (trueop0, 0);
3150 rtx op1 = XEXP (trueop0, 1);
3152 enum machine_mode opmode = GET_MODE (op0);
3153 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3154 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3156 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3157 int elem;
3159 rtvec vec;
3160 rtx tmp_op, tmp;
3162 gcc_assert (GET_CODE (op1) == PARALLEL);
3163 gcc_assert (i < n_elts);
3165 /* Select element, pointed by nested selector. */
3166 elem = INTVAL (XVECEXP (op1, 0, i));
3168 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3169 if (GET_CODE (op0) == VEC_CONCAT)
3171 rtx op00 = XEXP (op0, 0);
3172 rtx op01 = XEXP (op0, 1);
3174 enum machine_mode mode00, mode01;
3175 int n_elts00, n_elts01;
3177 mode00 = GET_MODE (op00);
3178 mode01 = GET_MODE (op01);
3180 /* Find out number of elements of each operand. */
3181 if (VECTOR_MODE_P (mode00))
3183 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3184 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3186 else
3187 n_elts00 = 1;
3189 if (VECTOR_MODE_P (mode01))
3191 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3192 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3194 else
3195 n_elts01 = 1;
3197 gcc_assert (n_elts == n_elts00 + n_elts01);
3199 /* Select correct operand of VEC_CONCAT
3200 and adjust selector. */
3201 if (elem < n_elts01)
3202 tmp_op = op00;
3203 else
3205 tmp_op = op01;
3206 elem -= n_elts00;
3209 else
3210 tmp_op = op0;
3212 vec = rtvec_alloc (1);
3213 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3215 tmp = gen_rtx_fmt_ee (code, mode,
3216 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3217 return tmp;
3219 if (GET_CODE (trueop0) == VEC_DUPLICATE
3220 && GET_MODE (XEXP (trueop0, 0)) == mode)
3221 return XEXP (trueop0, 0);
3223 else
3225 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3226 gcc_assert (GET_MODE_INNER (mode)
3227 == GET_MODE_INNER (GET_MODE (trueop0)));
3228 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3230 if (GET_CODE (trueop0) == CONST_VECTOR)
3232 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3233 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3234 rtvec v = rtvec_alloc (n_elts);
3235 unsigned int i;
3237 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3238 for (i = 0; i < n_elts; i++)
3240 rtx x = XVECEXP (trueop1, 0, i);
3242 gcc_assert (CONST_INT_P (x));
3243 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3244 INTVAL (x));
3247 return gen_rtx_CONST_VECTOR (mode, v);
3250 /* Recognize the identity. */
3251 if (GET_MODE (trueop0) == mode)
3253 bool maybe_ident = true;
3254 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3256 rtx j = XVECEXP (trueop1, 0, i);
3257 if (!CONST_INT_P (j) || INTVAL (j) != i)
3259 maybe_ident = false;
3260 break;
3263 if (maybe_ident)
3264 return trueop0;
3267 /* If we build {a,b} then permute it, build the result directly. */
3268 if (XVECLEN (trueop1, 0) == 2
3269 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3270 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3271 && GET_CODE (trueop0) == VEC_CONCAT
3272 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3273 && GET_MODE (XEXP (trueop0, 0)) == mode
3274 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3275 && GET_MODE (XEXP (trueop0, 1)) == mode)
3277 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3278 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3279 rtx subop0, subop1;
3281 gcc_assert (i0 < 4 && i1 < 4);
3282 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3283 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3285 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3288 if (XVECLEN (trueop1, 0) == 2
3289 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3290 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3291 && GET_CODE (trueop0) == VEC_CONCAT
3292 && GET_MODE (trueop0) == mode)
3294 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3295 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3296 rtx subop0, subop1;
3298 gcc_assert (i0 < 2 && i1 < 2);
3299 subop0 = XEXP (trueop0, i0);
3300 subop1 = XEXP (trueop0, i1);
3302 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3306 if (XVECLEN (trueop1, 0) == 1
3307 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3308 && GET_CODE (trueop0) == VEC_CONCAT)
3310 rtx vec = trueop0;
3311 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3313 /* Try to find the element in the VEC_CONCAT. */
3314 while (GET_MODE (vec) != mode
3315 && GET_CODE (vec) == VEC_CONCAT)
3317 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3318 if (offset < vec_size)
3319 vec = XEXP (vec, 0);
3320 else
3322 offset -= vec_size;
3323 vec = XEXP (vec, 1);
3325 vec = avoid_constant_pool_reference (vec);
3328 if (GET_MODE (vec) == mode)
3329 return vec;
3332 return 0;
3333 case VEC_CONCAT:
3335 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3336 ? GET_MODE (trueop0)
3337 : GET_MODE_INNER (mode));
3338 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3339 ? GET_MODE (trueop1)
3340 : GET_MODE_INNER (mode));
3342 gcc_assert (VECTOR_MODE_P (mode));
3343 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3344 == GET_MODE_SIZE (mode));
3346 if (VECTOR_MODE_P (op0_mode))
3347 gcc_assert (GET_MODE_INNER (mode)
3348 == GET_MODE_INNER (op0_mode));
3349 else
3350 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3352 if (VECTOR_MODE_P (op1_mode))
3353 gcc_assert (GET_MODE_INNER (mode)
3354 == GET_MODE_INNER (op1_mode));
3355 else
3356 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3358 if ((GET_CODE (trueop0) == CONST_VECTOR
3359 || CONST_INT_P (trueop0) || CONST_DOUBLE_P (trueop0))
3360 && (GET_CODE (trueop1) == CONST_VECTOR
3361 || CONST_INT_P (trueop1) || CONST_DOUBLE_P (trueop1)))
3363 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3364 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3365 rtvec v = rtvec_alloc (n_elts);
3366 unsigned int i;
3367 unsigned in_n_elts = 1;
3369 if (VECTOR_MODE_P (op0_mode))
3370 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3371 for (i = 0; i < n_elts; i++)
3373 if (i < in_n_elts)
3375 if (!VECTOR_MODE_P (op0_mode))
3376 RTVEC_ELT (v, i) = trueop0;
3377 else
3378 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3380 else
3382 if (!VECTOR_MODE_P (op1_mode))
3383 RTVEC_ELT (v, i) = trueop1;
3384 else
3385 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3386 i - in_n_elts);
3390 return gen_rtx_CONST_VECTOR (mode, v);
3393 /* Try to merge VEC_SELECTs from the same vector into a single one. */
3394 if (GET_CODE (trueop0) == VEC_SELECT
3395 && GET_CODE (trueop1) == VEC_SELECT
3396 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0)))
3398 rtx par0 = XEXP (trueop0, 1);
3399 rtx par1 = XEXP (trueop1, 1);
3400 int len0 = XVECLEN (par0, 0);
3401 int len1 = XVECLEN (par1, 0);
3402 rtvec vec = rtvec_alloc (len0 + len1);
3403 for (int i = 0; i < len0; i++)
3404 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3405 for (int i = 0; i < len1; i++)
3406 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3407 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3408 gen_rtx_PARALLEL (VOIDmode, vec));
3411 return 0;
3413 default:
3414 gcc_unreachable ();
3417 return 0;
3421 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3422 rtx op0, rtx op1)
3424 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3425 HOST_WIDE_INT val;
3426 unsigned int width = GET_MODE_PRECISION (mode);
3428 if (VECTOR_MODE_P (mode)
3429 && code != VEC_CONCAT
3430 && GET_CODE (op0) == CONST_VECTOR
3431 && GET_CODE (op1) == CONST_VECTOR)
3433 unsigned n_elts = GET_MODE_NUNITS (mode);
3434 enum machine_mode op0mode = GET_MODE (op0);
3435 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3436 enum machine_mode op1mode = GET_MODE (op1);
3437 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3438 rtvec v = rtvec_alloc (n_elts);
3439 unsigned int i;
3441 gcc_assert (op0_n_elts == n_elts);
3442 gcc_assert (op1_n_elts == n_elts);
3443 for (i = 0; i < n_elts; i++)
3445 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3446 CONST_VECTOR_ELT (op0, i),
3447 CONST_VECTOR_ELT (op1, i));
3448 if (!x)
3449 return 0;
3450 RTVEC_ELT (v, i) = x;
3453 return gen_rtx_CONST_VECTOR (mode, v);
3456 if (VECTOR_MODE_P (mode)
3457 && code == VEC_CONCAT
3458 && (CONST_INT_P (op0)
3459 || GET_CODE (op0) == CONST_FIXED
3460 || CONST_DOUBLE_P (op0))
3461 && (CONST_INT_P (op1)
3462 || CONST_DOUBLE_P (op1)
3463 || GET_CODE (op1) == CONST_FIXED))
3465 unsigned n_elts = GET_MODE_NUNITS (mode);
3466 rtvec v = rtvec_alloc (n_elts);
3468 gcc_assert (n_elts >= 2);
3469 if (n_elts == 2)
3471 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3472 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3474 RTVEC_ELT (v, 0) = op0;
3475 RTVEC_ELT (v, 1) = op1;
3477 else
3479 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3480 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3481 unsigned i;
3483 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3484 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3485 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3487 for (i = 0; i < op0_n_elts; ++i)
3488 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3489 for (i = 0; i < op1_n_elts; ++i)
3490 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3493 return gen_rtx_CONST_VECTOR (mode, v);
3496 if (SCALAR_FLOAT_MODE_P (mode)
3497 && CONST_DOUBLE_AS_FLOAT_P (op0)
3498 && CONST_DOUBLE_AS_FLOAT_P (op1)
3499 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3501 if (code == AND
3502 || code == IOR
3503 || code == XOR)
3505 long tmp0[4];
3506 long tmp1[4];
3507 REAL_VALUE_TYPE r;
3508 int i;
3510 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3511 GET_MODE (op0));
3512 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3513 GET_MODE (op1));
3514 for (i = 0; i < 4; i++)
3516 switch (code)
3518 case AND:
3519 tmp0[i] &= tmp1[i];
3520 break;
3521 case IOR:
3522 tmp0[i] |= tmp1[i];
3523 break;
3524 case XOR:
3525 tmp0[i] ^= tmp1[i];
3526 break;
3527 default:
3528 gcc_unreachable ();
3531 real_from_target (&r, tmp0, mode);
3532 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3534 else
3536 REAL_VALUE_TYPE f0, f1, value, result;
3537 bool inexact;
3539 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3540 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3541 real_convert (&f0, mode, &f0);
3542 real_convert (&f1, mode, &f1);
3544 if (HONOR_SNANS (mode)
3545 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3546 return 0;
3548 if (code == DIV
3549 && REAL_VALUES_EQUAL (f1, dconst0)
3550 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3551 return 0;
3553 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3554 && flag_trapping_math
3555 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3557 int s0 = REAL_VALUE_NEGATIVE (f0);
3558 int s1 = REAL_VALUE_NEGATIVE (f1);
3560 switch (code)
3562 case PLUS:
3563 /* Inf + -Inf = NaN plus exception. */
3564 if (s0 != s1)
3565 return 0;
3566 break;
3567 case MINUS:
3568 /* Inf - Inf = NaN plus exception. */
3569 if (s0 == s1)
3570 return 0;
3571 break;
3572 case DIV:
3573 /* Inf / Inf = NaN plus exception. */
3574 return 0;
3575 default:
3576 break;
3580 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3581 && flag_trapping_math
3582 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3583 || (REAL_VALUE_ISINF (f1)
3584 && REAL_VALUES_EQUAL (f0, dconst0))))
3585 /* Inf * 0 = NaN plus exception. */
3586 return 0;
3588 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3589 &f0, &f1);
3590 real_convert (&result, mode, &value);
3592 /* Don't constant fold this floating point operation if
3593 the result has overflowed and flag_trapping_math. */
3595 if (flag_trapping_math
3596 && MODE_HAS_INFINITIES (mode)
3597 && REAL_VALUE_ISINF (result)
3598 && !REAL_VALUE_ISINF (f0)
3599 && !REAL_VALUE_ISINF (f1))
3600 /* Overflow plus exception. */
3601 return 0;
3603 /* Don't constant fold this floating point operation if the
3604 result may dependent upon the run-time rounding mode and
3605 flag_rounding_math is set, or if GCC's software emulation
3606 is unable to accurately represent the result. */
3608 if ((flag_rounding_math
3609 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3610 && (inexact || !real_identical (&result, &value)))
3611 return NULL_RTX;
3613 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3617 /* We can fold some multi-word operations. */
3618 if (GET_MODE_CLASS (mode) == MODE_INT
3619 && width == HOST_BITS_PER_DOUBLE_INT
3620 && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3621 && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3623 double_int o0, o1, res, tmp;
3624 bool overflow;
3626 o0 = rtx_to_double_int (op0);
3627 o1 = rtx_to_double_int (op1);
3629 switch (code)
3631 case MINUS:
3632 /* A - B == A + (-B). */
3633 o1 = -o1;
3635 /* Fall through.... */
3637 case PLUS:
3638 res = o0 + o1;
3639 break;
3641 case MULT:
3642 res = o0 * o1;
3643 break;
3645 case DIV:
3646 res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3647 &tmp, &overflow);
3648 if (overflow)
3649 return 0;
3650 break;
3652 case MOD:
3653 tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3654 &res, &overflow);
3655 if (overflow)
3656 return 0;
3657 break;
3659 case UDIV:
3660 res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3661 &tmp, &overflow);
3662 if (overflow)
3663 return 0;
3664 break;
3666 case UMOD:
3667 tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3668 &res, &overflow);
3669 if (overflow)
3670 return 0;
3671 break;
3673 case AND:
3674 res = o0 & o1;
3675 break;
3677 case IOR:
3678 res = o0 | o1;
3679 break;
3681 case XOR:
3682 res = o0 ^ o1;
3683 break;
3685 case SMIN:
3686 res = o0.smin (o1);
3687 break;
3689 case SMAX:
3690 res = o0.smax (o1);
3691 break;
3693 case UMIN:
3694 res = o0.umin (o1);
3695 break;
3697 case UMAX:
3698 res = o0.umax (o1);
3699 break;
3701 case LSHIFTRT: case ASHIFTRT:
3702 case ASHIFT:
3703 case ROTATE: case ROTATERT:
3705 unsigned HOST_WIDE_INT cnt;
3707 if (SHIFT_COUNT_TRUNCATED)
3709 o1.high = 0;
3710 o1.low &= GET_MODE_PRECISION (mode) - 1;
3713 if (!o1.fits_uhwi ()
3714 || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
3715 return 0;
3717 cnt = o1.to_uhwi ();
3718 unsigned short prec = GET_MODE_PRECISION (mode);
3720 if (code == LSHIFTRT || code == ASHIFTRT)
3721 res = o0.rshift (cnt, prec, code == ASHIFTRT);
3722 else if (code == ASHIFT)
3723 res = o0.alshift (cnt, prec);
3724 else if (code == ROTATE)
3725 res = o0.lrotate (cnt, prec);
3726 else /* code == ROTATERT */
3727 res = o0.rrotate (cnt, prec);
3729 break;
3731 default:
3732 return 0;
3735 return immed_double_int_const (res, mode);
3738 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3739 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3741 /* Get the integer argument values in two forms:
3742 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3744 arg0 = INTVAL (op0);
3745 arg1 = INTVAL (op1);
3747 if (width < HOST_BITS_PER_WIDE_INT)
3749 arg0 &= GET_MODE_MASK (mode);
3750 arg1 &= GET_MODE_MASK (mode);
3752 arg0s = arg0;
3753 if (val_signbit_known_set_p (mode, arg0s))
3754 arg0s |= ~GET_MODE_MASK (mode);
3756 arg1s = arg1;
3757 if (val_signbit_known_set_p (mode, arg1s))
3758 arg1s |= ~GET_MODE_MASK (mode);
3760 else
3762 arg0s = arg0;
3763 arg1s = arg1;
3766 /* Compute the value of the arithmetic. */
3768 switch (code)
3770 case PLUS:
3771 val = arg0s + arg1s;
3772 break;
3774 case MINUS:
3775 val = arg0s - arg1s;
3776 break;
3778 case MULT:
3779 val = arg0s * arg1s;
3780 break;
3782 case DIV:
3783 if (arg1s == 0
3784 || ((unsigned HOST_WIDE_INT) arg0s
3785 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3786 && arg1s == -1))
3787 return 0;
3788 val = arg0s / arg1s;
3789 break;
3791 case MOD:
3792 if (arg1s == 0
3793 || ((unsigned HOST_WIDE_INT) arg0s
3794 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3795 && arg1s == -1))
3796 return 0;
3797 val = arg0s % arg1s;
3798 break;
3800 case UDIV:
3801 if (arg1 == 0
3802 || ((unsigned HOST_WIDE_INT) arg0s
3803 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3804 && arg1s == -1))
3805 return 0;
3806 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3807 break;
3809 case UMOD:
3810 if (arg1 == 0
3811 || ((unsigned HOST_WIDE_INT) arg0s
3812 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3813 && arg1s == -1))
3814 return 0;
3815 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3816 break;
3818 case AND:
3819 val = arg0 & arg1;
3820 break;
3822 case IOR:
3823 val = arg0 | arg1;
3824 break;
3826 case XOR:
3827 val = arg0 ^ arg1;
3828 break;
3830 case LSHIFTRT:
3831 case ASHIFT:
3832 case ASHIFTRT:
3833 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3834 the value is in range. We can't return any old value for
3835 out-of-range arguments because either the middle-end (via
3836 shift_truncation_mask) or the back-end might be relying on
3837 target-specific knowledge. Nor can we rely on
3838 shift_truncation_mask, since the shift might not be part of an
3839 ashlM3, lshrM3 or ashrM3 instruction. */
3840 if (SHIFT_COUNT_TRUNCATED)
3841 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3842 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3843 return 0;
3845 val = (code == ASHIFT
3846 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3847 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3849 /* Sign-extend the result for arithmetic right shifts. */
3850 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3851 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
3852 break;
3854 case ROTATERT:
3855 if (arg1 < 0)
3856 return 0;
3858 arg1 %= width;
3859 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3860 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3861 break;
3863 case ROTATE:
3864 if (arg1 < 0)
3865 return 0;
3867 arg1 %= width;
3868 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3869 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3870 break;
3872 case COMPARE:
3873 /* Do nothing here. */
3874 return 0;
3876 case SMIN:
3877 val = arg0s <= arg1s ? arg0s : arg1s;
3878 break;
3880 case UMIN:
3881 val = ((unsigned HOST_WIDE_INT) arg0
3882 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3883 break;
3885 case SMAX:
3886 val = arg0s > arg1s ? arg0s : arg1s;
3887 break;
3889 case UMAX:
3890 val = ((unsigned HOST_WIDE_INT) arg0
3891 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3892 break;
3894 case SS_PLUS:
3895 case US_PLUS:
3896 case SS_MINUS:
3897 case US_MINUS:
3898 case SS_MULT:
3899 case US_MULT:
3900 case SS_DIV:
3901 case US_DIV:
3902 case SS_ASHIFT:
3903 case US_ASHIFT:
3904 /* ??? There are simplifications that can be done. */
3905 return 0;
3907 default:
3908 gcc_unreachable ();
3911 return gen_int_mode (val, mode);
3914 return NULL_RTX;
3919 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3920 PLUS or MINUS.
3922 Rather than test for specific case, we do this by a brute-force method
3923 and do all possible simplifications until no more changes occur. Then
3924 we rebuild the operation. */
3926 struct simplify_plus_minus_op_data
3928 rtx op;
3929 short neg;
3932 static bool
3933 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3935 int result;
3937 result = (commutative_operand_precedence (y)
3938 - commutative_operand_precedence (x));
3939 if (result)
3940 return result > 0;
3942 /* Group together equal REGs to do more simplification. */
3943 if (REG_P (x) && REG_P (y))
3944 return REGNO (x) > REGNO (y);
3945 else
3946 return false;
3949 static rtx
3950 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3951 rtx op1)
3953 struct simplify_plus_minus_op_data ops[8];
3954 rtx result, tem;
3955 int n_ops = 2, input_ops = 2;
3956 int changed, n_constants = 0, canonicalized = 0;
3957 int i, j;
3959 memset (ops, 0, sizeof ops);
3961 /* Set up the two operands and then expand them until nothing has been
3962 changed. If we run out of room in our array, give up; this should
3963 almost never happen. */
3965 ops[0].op = op0;
3966 ops[0].neg = 0;
3967 ops[1].op = op1;
3968 ops[1].neg = (code == MINUS);
3972 changed = 0;
3974 for (i = 0; i < n_ops; i++)
3976 rtx this_op = ops[i].op;
3977 int this_neg = ops[i].neg;
3978 enum rtx_code this_code = GET_CODE (this_op);
3980 switch (this_code)
3982 case PLUS:
3983 case MINUS:
3984 if (n_ops == 7)
3985 return NULL_RTX;
3987 ops[n_ops].op = XEXP (this_op, 1);
3988 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3989 n_ops++;
3991 ops[i].op = XEXP (this_op, 0);
3992 input_ops++;
3993 changed = 1;
3994 canonicalized |= this_neg;
3995 break;
3997 case NEG:
3998 ops[i].op = XEXP (this_op, 0);
3999 ops[i].neg = ! this_neg;
4000 changed = 1;
4001 canonicalized = 1;
4002 break;
4004 case CONST:
4005 if (n_ops < 7
4006 && GET_CODE (XEXP (this_op, 0)) == PLUS
4007 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4008 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4010 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4011 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4012 ops[n_ops].neg = this_neg;
4013 n_ops++;
4014 changed = 1;
4015 canonicalized = 1;
4017 break;
4019 case NOT:
4020 /* ~a -> (-a - 1) */
4021 if (n_ops != 7)
4023 ops[n_ops].op = CONSTM1_RTX (mode);
4024 ops[n_ops++].neg = this_neg;
4025 ops[i].op = XEXP (this_op, 0);
4026 ops[i].neg = !this_neg;
4027 changed = 1;
4028 canonicalized = 1;
4030 break;
4032 case CONST_INT:
4033 n_constants++;
4034 if (this_neg)
4036 ops[i].op = neg_const_int (mode, this_op);
4037 ops[i].neg = 0;
4038 changed = 1;
4039 canonicalized = 1;
4041 break;
4043 default:
4044 break;
4048 while (changed);
4050 if (n_constants > 1)
4051 canonicalized = 1;
4053 gcc_assert (n_ops >= 2);
4055 /* If we only have two operands, we can avoid the loops. */
4056 if (n_ops == 2)
4058 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4059 rtx lhs, rhs;
4061 /* Get the two operands. Be careful with the order, especially for
4062 the cases where code == MINUS. */
4063 if (ops[0].neg && ops[1].neg)
4065 lhs = gen_rtx_NEG (mode, ops[0].op);
4066 rhs = ops[1].op;
4068 else if (ops[0].neg)
4070 lhs = ops[1].op;
4071 rhs = ops[0].op;
4073 else
4075 lhs = ops[0].op;
4076 rhs = ops[1].op;
4079 return simplify_const_binary_operation (code, mode, lhs, rhs);
4082 /* Now simplify each pair of operands until nothing changes. */
4085 /* Insertion sort is good enough for an eight-element array. */
4086 for (i = 1; i < n_ops; i++)
4088 struct simplify_plus_minus_op_data save;
4089 j = i - 1;
4090 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4091 continue;
4093 canonicalized = 1;
4094 save = ops[i];
4096 ops[j + 1] = ops[j];
4097 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4098 ops[j + 1] = save;
4101 changed = 0;
4102 for (i = n_ops - 1; i > 0; i--)
4103 for (j = i - 1; j >= 0; j--)
4105 rtx lhs = ops[j].op, rhs = ops[i].op;
4106 int lneg = ops[j].neg, rneg = ops[i].neg;
4108 if (lhs != 0 && rhs != 0)
4110 enum rtx_code ncode = PLUS;
4112 if (lneg != rneg)
4114 ncode = MINUS;
4115 if (lneg)
4116 tem = lhs, lhs = rhs, rhs = tem;
4118 else if (swap_commutative_operands_p (lhs, rhs))
4119 tem = lhs, lhs = rhs, rhs = tem;
4121 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4122 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4124 rtx tem_lhs, tem_rhs;
4126 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4127 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4128 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4130 if (tem && !CONSTANT_P (tem))
4131 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4133 else
4134 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4136 /* Reject "simplifications" that just wrap the two
4137 arguments in a CONST. Failure to do so can result
4138 in infinite recursion with simplify_binary_operation
4139 when it calls us to simplify CONST operations. */
4140 if (tem
4141 && ! (GET_CODE (tem) == CONST
4142 && GET_CODE (XEXP (tem, 0)) == ncode
4143 && XEXP (XEXP (tem, 0), 0) == lhs
4144 && XEXP (XEXP (tem, 0), 1) == rhs))
4146 lneg &= rneg;
4147 if (GET_CODE (tem) == NEG)
4148 tem = XEXP (tem, 0), lneg = !lneg;
4149 if (CONST_INT_P (tem) && lneg)
4150 tem = neg_const_int (mode, tem), lneg = 0;
4152 ops[i].op = tem;
4153 ops[i].neg = lneg;
4154 ops[j].op = NULL_RTX;
4155 changed = 1;
4156 canonicalized = 1;
4161 /* If nothing changed, fail. */
4162 if (!canonicalized)
4163 return NULL_RTX;
4165 /* Pack all the operands to the lower-numbered entries. */
4166 for (i = 0, j = 0; j < n_ops; j++)
4167 if (ops[j].op)
4169 ops[i] = ops[j];
4170 i++;
4172 n_ops = i;
4174 while (changed);
4176 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4177 if (n_ops == 2
4178 && CONST_INT_P (ops[1].op)
4179 && CONSTANT_P (ops[0].op)
4180 && ops[0].neg)
4181 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4183 /* We suppressed creation of trivial CONST expressions in the
4184 combination loop to avoid recursion. Create one manually now.
4185 The combination loop should have ensured that there is exactly
4186 one CONST_INT, and the sort will have ensured that it is last
4187 in the array and that any other constant will be next-to-last. */
4189 if (n_ops > 1
4190 && CONST_INT_P (ops[n_ops - 1].op)
4191 && CONSTANT_P (ops[n_ops - 2].op))
4193 rtx value = ops[n_ops - 1].op;
4194 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4195 value = neg_const_int (mode, value);
4196 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4197 INTVAL (value));
4198 n_ops--;
4201 /* Put a non-negated operand first, if possible. */
4203 for (i = 0; i < n_ops && ops[i].neg; i++)
4204 continue;
4205 if (i == n_ops)
4206 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4207 else if (i != 0)
4209 tem = ops[0].op;
4210 ops[0] = ops[i];
4211 ops[i].op = tem;
4212 ops[i].neg = 1;
4215 /* Now make the result by performing the requested operations. */
4216 result = ops[0].op;
4217 for (i = 1; i < n_ops; i++)
4218 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4219 mode, result, ops[i].op);
4221 return result;
4224 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4225 static bool
4226 plus_minus_operand_p (const_rtx x)
4228 return GET_CODE (x) == PLUS
4229 || GET_CODE (x) == MINUS
4230 || (GET_CODE (x) == CONST
4231 && GET_CODE (XEXP (x, 0)) == PLUS
4232 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4233 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4236 /* Like simplify_binary_operation except used for relational operators.
4237 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4238 not also be VOIDmode.
4240 CMP_MODE specifies in which mode the comparison is done in, so it is
4241 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4242 the operands or, if both are VOIDmode, the operands are compared in
4243 "infinite precision". */
4245 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4246 enum machine_mode cmp_mode, rtx op0, rtx op1)
4248 rtx tem, trueop0, trueop1;
4250 if (cmp_mode == VOIDmode)
4251 cmp_mode = GET_MODE (op0);
4252 if (cmp_mode == VOIDmode)
4253 cmp_mode = GET_MODE (op1);
4255 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4256 if (tem)
4258 if (SCALAR_FLOAT_MODE_P (mode))
4260 if (tem == const0_rtx)
4261 return CONST0_RTX (mode);
4262 #ifdef FLOAT_STORE_FLAG_VALUE
4264 REAL_VALUE_TYPE val;
4265 val = FLOAT_STORE_FLAG_VALUE (mode);
4266 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4268 #else
4269 return NULL_RTX;
4270 #endif
4272 if (VECTOR_MODE_P (mode))
4274 if (tem == const0_rtx)
4275 return CONST0_RTX (mode);
4276 #ifdef VECTOR_STORE_FLAG_VALUE
4278 int i, units;
4279 rtvec v;
4281 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4282 if (val == NULL_RTX)
4283 return NULL_RTX;
4284 if (val == const1_rtx)
4285 return CONST1_RTX (mode);
4287 units = GET_MODE_NUNITS (mode);
4288 v = rtvec_alloc (units);
4289 for (i = 0; i < units; i++)
4290 RTVEC_ELT (v, i) = val;
4291 return gen_rtx_raw_CONST_VECTOR (mode, v);
4293 #else
4294 return NULL_RTX;
4295 #endif
4298 return tem;
4301 /* For the following tests, ensure const0_rtx is op1. */
4302 if (swap_commutative_operands_p (op0, op1)
4303 || (op0 == const0_rtx && op1 != const0_rtx))
4304 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4306 /* If op0 is a compare, extract the comparison arguments from it. */
4307 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4308 return simplify_gen_relational (code, mode, VOIDmode,
4309 XEXP (op0, 0), XEXP (op0, 1));
4311 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4312 || CC0_P (op0))
4313 return NULL_RTX;
4315 trueop0 = avoid_constant_pool_reference (op0);
4316 trueop1 = avoid_constant_pool_reference (op1);
4317 return simplify_relational_operation_1 (code, mode, cmp_mode,
4318 trueop0, trueop1);
4321 /* This part of simplify_relational_operation is only used when CMP_MODE
4322 is not in class MODE_CC (i.e. it is a real comparison).
4324 MODE is the mode of the result, while CMP_MODE specifies in which
4325 mode the comparison is done in, so it is the mode of the operands. */
4327 static rtx
4328 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4329 enum machine_mode cmp_mode, rtx op0, rtx op1)
4331 enum rtx_code op0code = GET_CODE (op0);
4333 if (op1 == const0_rtx && COMPARISON_P (op0))
4335 /* If op0 is a comparison, extract the comparison arguments
4336 from it. */
4337 if (code == NE)
4339 if (GET_MODE (op0) == mode)
4340 return simplify_rtx (op0);
4341 else
4342 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4343 XEXP (op0, 0), XEXP (op0, 1));
4345 else if (code == EQ)
4347 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4348 if (new_code != UNKNOWN)
4349 return simplify_gen_relational (new_code, mode, VOIDmode,
4350 XEXP (op0, 0), XEXP (op0, 1));
4354 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4355 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4356 if ((code == LTU || code == GEU)
4357 && GET_CODE (op0) == PLUS
4358 && CONST_INT_P (XEXP (op0, 1))
4359 && (rtx_equal_p (op1, XEXP (op0, 0))
4360 || rtx_equal_p (op1, XEXP (op0, 1))))
4362 rtx new_cmp
4363 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4364 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4365 cmp_mode, XEXP (op0, 0), new_cmp);
4368 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4369 if ((code == LTU || code == GEU)
4370 && GET_CODE (op0) == PLUS
4371 && rtx_equal_p (op1, XEXP (op0, 1))
4372 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4373 && !rtx_equal_p (op1, XEXP (op0, 0)))
4374 return simplify_gen_relational (code, mode, cmp_mode, op0,
4375 copy_rtx (XEXP (op0, 0)));
4377 if (op1 == const0_rtx)
4379 /* Canonicalize (GTU x 0) as (NE x 0). */
4380 if (code == GTU)
4381 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4382 /* Canonicalize (LEU x 0) as (EQ x 0). */
4383 if (code == LEU)
4384 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4386 else if (op1 == const1_rtx)
4388 switch (code)
4390 case GE:
4391 /* Canonicalize (GE x 1) as (GT x 0). */
4392 return simplify_gen_relational (GT, mode, cmp_mode,
4393 op0, const0_rtx);
4394 case GEU:
4395 /* Canonicalize (GEU x 1) as (NE x 0). */
4396 return simplify_gen_relational (NE, mode, cmp_mode,
4397 op0, const0_rtx);
4398 case LT:
4399 /* Canonicalize (LT x 1) as (LE x 0). */
4400 return simplify_gen_relational (LE, mode, cmp_mode,
4401 op0, const0_rtx);
4402 case LTU:
4403 /* Canonicalize (LTU x 1) as (EQ x 0). */
4404 return simplify_gen_relational (EQ, mode, cmp_mode,
4405 op0, const0_rtx);
4406 default:
4407 break;
4410 else if (op1 == constm1_rtx)
4412 /* Canonicalize (LE x -1) as (LT x 0). */
4413 if (code == LE)
4414 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4415 /* Canonicalize (GT x -1) as (GE x 0). */
4416 if (code == GT)
4417 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4420 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4421 if ((code == EQ || code == NE)
4422 && (op0code == PLUS || op0code == MINUS)
4423 && CONSTANT_P (op1)
4424 && CONSTANT_P (XEXP (op0, 1))
4425 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4427 rtx x = XEXP (op0, 0);
4428 rtx c = XEXP (op0, 1);
4429 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4430 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4432 /* Detect an infinite recursive condition, where we oscillate at this
4433 simplification case between:
4434 A + B == C <---> C - B == A,
4435 where A, B, and C are all constants with non-simplifiable expressions,
4436 usually SYMBOL_REFs. */
4437 if (GET_CODE (tem) == invcode
4438 && CONSTANT_P (x)
4439 && rtx_equal_p (c, XEXP (tem, 1)))
4440 return NULL_RTX;
4442 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4445 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4446 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4447 if (code == NE
4448 && op1 == const0_rtx
4449 && GET_MODE_CLASS (mode) == MODE_INT
4450 && cmp_mode != VOIDmode
4451 /* ??? Work-around BImode bugs in the ia64 backend. */
4452 && mode != BImode
4453 && cmp_mode != BImode
4454 && nonzero_bits (op0, cmp_mode) == 1
4455 && STORE_FLAG_VALUE == 1)
4456 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4457 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4458 : lowpart_subreg (mode, op0, cmp_mode);
4460 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4461 if ((code == EQ || code == NE)
4462 && op1 == const0_rtx
4463 && op0code == XOR)
4464 return simplify_gen_relational (code, mode, cmp_mode,
4465 XEXP (op0, 0), XEXP (op0, 1));
4467 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4468 if ((code == EQ || code == NE)
4469 && op0code == XOR
4470 && rtx_equal_p (XEXP (op0, 0), op1)
4471 && !side_effects_p (XEXP (op0, 0)))
4472 return simplify_gen_relational (code, mode, cmp_mode,
4473 XEXP (op0, 1), const0_rtx);
4475 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4476 if ((code == EQ || code == NE)
4477 && op0code == XOR
4478 && rtx_equal_p (XEXP (op0, 1), op1)
4479 && !side_effects_p (XEXP (op0, 1)))
4480 return simplify_gen_relational (code, mode, cmp_mode,
4481 XEXP (op0, 0), const0_rtx);
4483 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4484 if ((code == EQ || code == NE)
4485 && op0code == XOR
4486 && (CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
4487 && (CONST_INT_P (XEXP (op0, 1))
4488 || CONST_DOUBLE_AS_INT_P (XEXP (op0, 1))))
4489 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4490 simplify_gen_binary (XOR, cmp_mode,
4491 XEXP (op0, 1), op1));
4493 if (op0code == POPCOUNT && op1 == const0_rtx)
4494 switch (code)
4496 case EQ:
4497 case LE:
4498 case LEU:
4499 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4500 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4501 XEXP (op0, 0), const0_rtx);
4503 case NE:
4504 case GT:
4505 case GTU:
4506 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4507 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4508 XEXP (op0, 0), const0_rtx);
4510 default:
4511 break;
4514 return NULL_RTX;
4517 enum
4519 CMP_EQ = 1,
4520 CMP_LT = 2,
4521 CMP_GT = 4,
4522 CMP_LTU = 8,
4523 CMP_GTU = 16
4527 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4528 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4529 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4530 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4531 For floating-point comparisons, assume that the operands were ordered. */
4533 static rtx
4534 comparison_result (enum rtx_code code, int known_results)
4536 switch (code)
4538 case EQ:
4539 case UNEQ:
4540 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4541 case NE:
4542 case LTGT:
4543 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4545 case LT:
4546 case UNLT:
4547 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4548 case GE:
4549 case UNGE:
4550 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4552 case GT:
4553 case UNGT:
4554 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4555 case LE:
4556 case UNLE:
4557 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4559 case LTU:
4560 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4561 case GEU:
4562 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4564 case GTU:
4565 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4566 case LEU:
4567 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4569 case ORDERED:
4570 return const_true_rtx;
4571 case UNORDERED:
4572 return const0_rtx;
4573 default:
4574 gcc_unreachable ();
4578 /* Check if the given comparison (done in the given MODE) is actually a
4579 tautology or a contradiction.
4580 If no simplification is possible, this function returns zero.
4581 Otherwise, it returns either const_true_rtx or const0_rtx. */
4584 simplify_const_relational_operation (enum rtx_code code,
4585 enum machine_mode mode,
4586 rtx op0, rtx op1)
4588 rtx tem;
4589 rtx trueop0;
4590 rtx trueop1;
4592 gcc_assert (mode != VOIDmode
4593 || (GET_MODE (op0) == VOIDmode
4594 && GET_MODE (op1) == VOIDmode));
4596 /* If op0 is a compare, extract the comparison arguments from it. */
4597 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4599 op1 = XEXP (op0, 1);
4600 op0 = XEXP (op0, 0);
4602 if (GET_MODE (op0) != VOIDmode)
4603 mode = GET_MODE (op0);
4604 else if (GET_MODE (op1) != VOIDmode)
4605 mode = GET_MODE (op1);
4606 else
4607 return 0;
4610 /* We can't simplify MODE_CC values since we don't know what the
4611 actual comparison is. */
4612 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4613 return 0;
4615 /* Make sure the constant is second. */
4616 if (swap_commutative_operands_p (op0, op1))
4618 tem = op0, op0 = op1, op1 = tem;
4619 code = swap_condition (code);
4622 trueop0 = avoid_constant_pool_reference (op0);
4623 trueop1 = avoid_constant_pool_reference (op1);
4625 /* For integer comparisons of A and B maybe we can simplify A - B and can
4626 then simplify a comparison of that with zero. If A and B are both either
4627 a register or a CONST_INT, this can't help; testing for these cases will
4628 prevent infinite recursion here and speed things up.
4630 We can only do this for EQ and NE comparisons as otherwise we may
4631 lose or introduce overflow which we cannot disregard as undefined as
4632 we do not know the signedness of the operation on either the left or
4633 the right hand side of the comparison. */
4635 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4636 && (code == EQ || code == NE)
4637 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4638 && (REG_P (op1) || CONST_INT_P (trueop1)))
4639 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4640 /* We cannot do this if tem is a nonzero address. */
4641 && ! nonzero_address_p (tem))
4642 return simplify_const_relational_operation (signed_condition (code),
4643 mode, tem, const0_rtx);
4645 if (! HONOR_NANS (mode) && code == ORDERED)
4646 return const_true_rtx;
4648 if (! HONOR_NANS (mode) && code == UNORDERED)
4649 return const0_rtx;
4651 /* For modes without NaNs, if the two operands are equal, we know the
4652 result except if they have side-effects. Even with NaNs we know
4653 the result of unordered comparisons and, if signaling NaNs are
4654 irrelevant, also the result of LT/GT/LTGT. */
4655 if ((! HONOR_NANS (GET_MODE (trueop0))
4656 || code == UNEQ || code == UNLE || code == UNGE
4657 || ((code == LT || code == GT || code == LTGT)
4658 && ! HONOR_SNANS (GET_MODE (trueop0))))
4659 && rtx_equal_p (trueop0, trueop1)
4660 && ! side_effects_p (trueop0))
4661 return comparison_result (code, CMP_EQ);
4663 /* If the operands are floating-point constants, see if we can fold
4664 the result. */
4665 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4666 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4667 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4669 REAL_VALUE_TYPE d0, d1;
4671 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4672 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4674 /* Comparisons are unordered iff at least one of the values is NaN. */
4675 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4676 switch (code)
4678 case UNEQ:
4679 case UNLT:
4680 case UNGT:
4681 case UNLE:
4682 case UNGE:
4683 case NE:
4684 case UNORDERED:
4685 return const_true_rtx;
4686 case EQ:
4687 case LT:
4688 case GT:
4689 case LE:
4690 case GE:
4691 case LTGT:
4692 case ORDERED:
4693 return const0_rtx;
4694 default:
4695 return 0;
4698 return comparison_result (code,
4699 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4700 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4703 /* Otherwise, see if the operands are both integers. */
4704 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4705 && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
4706 && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
4708 int width = GET_MODE_PRECISION (mode);
4709 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4710 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4712 /* Get the two words comprising each integer constant. */
4713 if (CONST_DOUBLE_AS_INT_P (trueop0))
4715 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4716 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4718 else
4720 l0u = l0s = INTVAL (trueop0);
4721 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4724 if (CONST_DOUBLE_AS_INT_P (trueop1))
4726 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4727 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4729 else
4731 l1u = l1s = INTVAL (trueop1);
4732 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4735 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4736 we have to sign or zero-extend the values. */
4737 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4739 l0u &= GET_MODE_MASK (mode);
4740 l1u &= GET_MODE_MASK (mode);
4742 if (val_signbit_known_set_p (mode, l0s))
4743 l0s |= ~GET_MODE_MASK (mode);
4745 if (val_signbit_known_set_p (mode, l1s))
4746 l1s |= ~GET_MODE_MASK (mode);
4748 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4749 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4751 if (h0u == h1u && l0u == l1u)
4752 return comparison_result (code, CMP_EQ);
4753 else
4755 int cr;
4756 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4757 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4758 return comparison_result (code, cr);
4762 /* Optimize comparisons with upper and lower bounds. */
4763 if (HWI_COMPUTABLE_MODE_P (mode)
4764 && CONST_INT_P (trueop1))
4766 int sign;
4767 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4768 HOST_WIDE_INT val = INTVAL (trueop1);
4769 HOST_WIDE_INT mmin, mmax;
4771 if (code == GEU
4772 || code == LEU
4773 || code == GTU
4774 || code == LTU)
4775 sign = 0;
4776 else
4777 sign = 1;
4779 /* Get a reduced range if the sign bit is zero. */
4780 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4782 mmin = 0;
4783 mmax = nonzero;
4785 else
4787 rtx mmin_rtx, mmax_rtx;
4788 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4790 mmin = INTVAL (mmin_rtx);
4791 mmax = INTVAL (mmax_rtx);
4792 if (sign)
4794 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4796 mmin >>= (sign_copies - 1);
4797 mmax >>= (sign_copies - 1);
4801 switch (code)
4803 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4804 case GEU:
4805 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4806 return const_true_rtx;
4807 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4808 return const0_rtx;
4809 break;
4810 case GE:
4811 if (val <= mmin)
4812 return const_true_rtx;
4813 if (val > mmax)
4814 return const0_rtx;
4815 break;
4817 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4818 case LEU:
4819 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4820 return const_true_rtx;
4821 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4822 return const0_rtx;
4823 break;
4824 case LE:
4825 if (val >= mmax)
4826 return const_true_rtx;
4827 if (val < mmin)
4828 return const0_rtx;
4829 break;
4831 case EQ:
4832 /* x == y is always false for y out of range. */
4833 if (val < mmin || val > mmax)
4834 return const0_rtx;
4835 break;
4837 /* x > y is always false for y >= mmax, always true for y < mmin. */
4838 case GTU:
4839 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4840 return const0_rtx;
4841 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4842 return const_true_rtx;
4843 break;
4844 case GT:
4845 if (val >= mmax)
4846 return const0_rtx;
4847 if (val < mmin)
4848 return const_true_rtx;
4849 break;
4851 /* x < y is always false for y <= mmin, always true for y > mmax. */
4852 case LTU:
4853 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4854 return const0_rtx;
4855 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4856 return const_true_rtx;
4857 break;
4858 case LT:
4859 if (val <= mmin)
4860 return const0_rtx;
4861 if (val > mmax)
4862 return const_true_rtx;
4863 break;
4865 case NE:
4866 /* x != y is always true for y out of range. */
4867 if (val < mmin || val > mmax)
4868 return const_true_rtx;
4869 break;
4871 default:
4872 break;
4876 /* Optimize integer comparisons with zero. */
4877 if (trueop1 == const0_rtx)
4879 /* Some addresses are known to be nonzero. We don't know
4880 their sign, but equality comparisons are known. */
4881 if (nonzero_address_p (trueop0))
4883 if (code == EQ || code == LEU)
4884 return const0_rtx;
4885 if (code == NE || code == GTU)
4886 return const_true_rtx;
4889 /* See if the first operand is an IOR with a constant. If so, we
4890 may be able to determine the result of this comparison. */
4891 if (GET_CODE (op0) == IOR)
4893 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4894 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4896 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
4897 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4898 && (UINTVAL (inner_const)
4899 & ((unsigned HOST_WIDE_INT) 1
4900 << sign_bitnum)));
4902 switch (code)
4904 case EQ:
4905 case LEU:
4906 return const0_rtx;
4907 case NE:
4908 case GTU:
4909 return const_true_rtx;
4910 case LT:
4911 case LE:
4912 if (has_sign)
4913 return const_true_rtx;
4914 break;
4915 case GT:
4916 case GE:
4917 if (has_sign)
4918 return const0_rtx;
4919 break;
4920 default:
4921 break;
4927 /* Optimize comparison of ABS with zero. */
4928 if (trueop1 == CONST0_RTX (mode)
4929 && (GET_CODE (trueop0) == ABS
4930 || (GET_CODE (trueop0) == FLOAT_EXTEND
4931 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4933 switch (code)
4935 case LT:
4936 /* Optimize abs(x) < 0.0. */
4937 if (!HONOR_SNANS (mode)
4938 && (!INTEGRAL_MODE_P (mode)
4939 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4941 if (INTEGRAL_MODE_P (mode)
4942 && (issue_strict_overflow_warning
4943 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4944 warning (OPT_Wstrict_overflow,
4945 ("assuming signed overflow does not occur when "
4946 "assuming abs (x) < 0 is false"));
4947 return const0_rtx;
4949 break;
4951 case GE:
4952 /* Optimize abs(x) >= 0.0. */
4953 if (!HONOR_NANS (mode)
4954 && (!INTEGRAL_MODE_P (mode)
4955 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4957 if (INTEGRAL_MODE_P (mode)
4958 && (issue_strict_overflow_warning
4959 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4960 warning (OPT_Wstrict_overflow,
4961 ("assuming signed overflow does not occur when "
4962 "assuming abs (x) >= 0 is true"));
4963 return const_true_rtx;
4965 break;
4967 case UNGE:
4968 /* Optimize ! (abs(x) < 0.0). */
4969 return const_true_rtx;
4971 default:
4972 break;
4976 return 0;
4979 /* Simplify CODE, an operation with result mode MODE and three operands,
4980 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4981 a constant. Return 0 if no simplifications is possible. */
4984 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4985 enum machine_mode op0_mode, rtx op0, rtx op1,
4986 rtx op2)
4988 unsigned int width = GET_MODE_PRECISION (mode);
4989 bool any_change = false;
4990 rtx tem;
4992 /* VOIDmode means "infinite" precision. */
4993 if (width == 0)
4994 width = HOST_BITS_PER_WIDE_INT;
4996 switch (code)
4998 case FMA:
4999 /* Simplify negations around the multiplication. */
5000 /* -a * -b + c => a * b + c. */
5001 if (GET_CODE (op0) == NEG)
5003 tem = simplify_unary_operation (NEG, mode, op1, mode);
5004 if (tem)
5005 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5007 else if (GET_CODE (op1) == NEG)
5009 tem = simplify_unary_operation (NEG, mode, op0, mode);
5010 if (tem)
5011 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5014 /* Canonicalize the two multiplication operands. */
5015 /* a * -b + c => -b * a + c. */
5016 if (swap_commutative_operands_p (op0, op1))
5017 tem = op0, op0 = op1, op1 = tem, any_change = true;
5019 if (any_change)
5020 return gen_rtx_FMA (mode, op0, op1, op2);
5021 return NULL_RTX;
5023 case SIGN_EXTRACT:
5024 case ZERO_EXTRACT:
5025 if (CONST_INT_P (op0)
5026 && CONST_INT_P (op1)
5027 && CONST_INT_P (op2)
5028 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5029 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5031 /* Extracting a bit-field from a constant */
5032 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5033 HOST_WIDE_INT op1val = INTVAL (op1);
5034 HOST_WIDE_INT op2val = INTVAL (op2);
5035 if (BITS_BIG_ENDIAN)
5036 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5037 else
5038 val >>= op2val;
5040 if (HOST_BITS_PER_WIDE_INT != op1val)
5042 /* First zero-extend. */
5043 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5044 /* If desired, propagate sign bit. */
5045 if (code == SIGN_EXTRACT
5046 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5047 != 0)
5048 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5051 return gen_int_mode (val, mode);
5053 break;
5055 case IF_THEN_ELSE:
5056 if (CONST_INT_P (op0))
5057 return op0 != const0_rtx ? op1 : op2;
5059 /* Convert c ? a : a into "a". */
5060 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5061 return op1;
5063 /* Convert a != b ? a : b into "a". */
5064 if (GET_CODE (op0) == NE
5065 && ! side_effects_p (op0)
5066 && ! HONOR_NANS (mode)
5067 && ! HONOR_SIGNED_ZEROS (mode)
5068 && ((rtx_equal_p (XEXP (op0, 0), op1)
5069 && rtx_equal_p (XEXP (op0, 1), op2))
5070 || (rtx_equal_p (XEXP (op0, 0), op2)
5071 && rtx_equal_p (XEXP (op0, 1), op1))))
5072 return op1;
5074 /* Convert a == b ? a : b into "b". */
5075 if (GET_CODE (op0) == EQ
5076 && ! side_effects_p (op0)
5077 && ! HONOR_NANS (mode)
5078 && ! HONOR_SIGNED_ZEROS (mode)
5079 && ((rtx_equal_p (XEXP (op0, 0), op1)
5080 && rtx_equal_p (XEXP (op0, 1), op2))
5081 || (rtx_equal_p (XEXP (op0, 0), op2)
5082 && rtx_equal_p (XEXP (op0, 1), op1))))
5083 return op2;
5085 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5087 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5088 ? GET_MODE (XEXP (op0, 1))
5089 : GET_MODE (XEXP (op0, 0)));
5090 rtx temp;
5092 /* Look for happy constants in op1 and op2. */
5093 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5095 HOST_WIDE_INT t = INTVAL (op1);
5096 HOST_WIDE_INT f = INTVAL (op2);
5098 if (t == STORE_FLAG_VALUE && f == 0)
5099 code = GET_CODE (op0);
5100 else if (t == 0 && f == STORE_FLAG_VALUE)
5102 enum rtx_code tmp;
5103 tmp = reversed_comparison_code (op0, NULL_RTX);
5104 if (tmp == UNKNOWN)
5105 break;
5106 code = tmp;
5108 else
5109 break;
5111 return simplify_gen_relational (code, mode, cmp_mode,
5112 XEXP (op0, 0), XEXP (op0, 1));
5115 if (cmp_mode == VOIDmode)
5116 cmp_mode = op0_mode;
5117 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5118 cmp_mode, XEXP (op0, 0),
5119 XEXP (op0, 1));
5121 /* See if any simplifications were possible. */
5122 if (temp)
5124 if (CONST_INT_P (temp))
5125 return temp == const0_rtx ? op2 : op1;
5126 else if (temp)
5127 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5130 break;
5132 case VEC_MERGE:
5133 gcc_assert (GET_MODE (op0) == mode);
5134 gcc_assert (GET_MODE (op1) == mode);
5135 gcc_assert (VECTOR_MODE_P (mode));
5136 op2 = avoid_constant_pool_reference (op2);
5137 if (CONST_INT_P (op2))
5139 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5140 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5141 int mask = (1 << n_elts) - 1;
5143 if (!(INTVAL (op2) & mask))
5144 return op1;
5145 if ((INTVAL (op2) & mask) == mask)
5146 return op0;
5148 op0 = avoid_constant_pool_reference (op0);
5149 op1 = avoid_constant_pool_reference (op1);
5150 if (GET_CODE (op0) == CONST_VECTOR
5151 && GET_CODE (op1) == CONST_VECTOR)
5153 rtvec v = rtvec_alloc (n_elts);
5154 unsigned int i;
5156 for (i = 0; i < n_elts; i++)
5157 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5158 ? CONST_VECTOR_ELT (op0, i)
5159 : CONST_VECTOR_ELT (op1, i));
5160 return gen_rtx_CONST_VECTOR (mode, v);
5163 break;
5165 default:
5166 gcc_unreachable ();
5169 return 0;
5172 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5173 or CONST_VECTOR,
5174 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5176 Works by unpacking OP into a collection of 8-bit values
5177 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5178 and then repacking them again for OUTERMODE. */
5180 static rtx
5181 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5182 enum machine_mode innermode, unsigned int byte)
5184 /* We support up to 512-bit values (for V8DFmode). */
5185 enum {
5186 max_bitsize = 512,
5187 value_bit = 8,
5188 value_mask = (1 << value_bit) - 1
5190 unsigned char value[max_bitsize / value_bit];
5191 int value_start;
5192 int i;
5193 int elem;
5195 int num_elem;
5196 rtx * elems;
5197 int elem_bitsize;
5198 rtx result_s;
5199 rtvec result_v = NULL;
5200 enum mode_class outer_class;
5201 enum machine_mode outer_submode;
5203 /* Some ports misuse CCmode. */
5204 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5205 return op;
5207 /* We have no way to represent a complex constant at the rtl level. */
5208 if (COMPLEX_MODE_P (outermode))
5209 return NULL_RTX;
5211 /* Unpack the value. */
5213 if (GET_CODE (op) == CONST_VECTOR)
5215 num_elem = CONST_VECTOR_NUNITS (op);
5216 elems = &CONST_VECTOR_ELT (op, 0);
5217 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5219 else
5221 num_elem = 1;
5222 elems = &op;
5223 elem_bitsize = max_bitsize;
5225 /* If this asserts, it is too complicated; reducing value_bit may help. */
5226 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5227 /* I don't know how to handle endianness of sub-units. */
5228 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5230 for (elem = 0; elem < num_elem; elem++)
5232 unsigned char * vp;
5233 rtx el = elems[elem];
5235 /* Vectors are kept in target memory order. (This is probably
5236 a mistake.) */
5238 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5239 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5240 / BITS_PER_UNIT);
5241 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5242 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5243 unsigned bytele = (subword_byte % UNITS_PER_WORD
5244 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5245 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5248 switch (GET_CODE (el))
5250 case CONST_INT:
5251 for (i = 0;
5252 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5253 i += value_bit)
5254 *vp++ = INTVAL (el) >> i;
5255 /* CONST_INTs are always logically sign-extended. */
5256 for (; i < elem_bitsize; i += value_bit)
5257 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5258 break;
5260 case CONST_DOUBLE:
5261 if (GET_MODE (el) == VOIDmode)
5263 unsigned char extend = 0;
5264 /* If this triggers, someone should have generated a
5265 CONST_INT instead. */
5266 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5268 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5269 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5270 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5272 *vp++
5273 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5274 i += value_bit;
5277 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5278 extend = -1;
5279 for (; i < elem_bitsize; i += value_bit)
5280 *vp++ = extend;
5282 else
5284 long tmp[max_bitsize / 32];
5285 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5287 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5288 gcc_assert (bitsize <= elem_bitsize);
5289 gcc_assert (bitsize % value_bit == 0);
5291 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5292 GET_MODE (el));
5294 /* real_to_target produces its result in words affected by
5295 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5296 and use WORDS_BIG_ENDIAN instead; see the documentation
5297 of SUBREG in rtl.texi. */
5298 for (i = 0; i < bitsize; i += value_bit)
5300 int ibase;
5301 if (WORDS_BIG_ENDIAN)
5302 ibase = bitsize - 1 - i;
5303 else
5304 ibase = i;
5305 *vp++ = tmp[ibase / 32] >> i % 32;
5308 /* It shouldn't matter what's done here, so fill it with
5309 zero. */
5310 for (; i < elem_bitsize; i += value_bit)
5311 *vp++ = 0;
5313 break;
5315 case CONST_FIXED:
5316 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5318 for (i = 0; i < elem_bitsize; i += value_bit)
5319 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5321 else
5323 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5324 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5325 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5326 i += value_bit)
5327 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5328 >> (i - HOST_BITS_PER_WIDE_INT);
5329 for (; i < elem_bitsize; i += value_bit)
5330 *vp++ = 0;
5332 break;
5334 default:
5335 gcc_unreachable ();
5339 /* Now, pick the right byte to start with. */
5340 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5341 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5342 will already have offset 0. */
5343 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5345 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5346 - byte);
5347 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5348 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5349 byte = (subword_byte % UNITS_PER_WORD
5350 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5353 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5354 so if it's become negative it will instead be very large.) */
5355 gcc_assert (byte < GET_MODE_SIZE (innermode));
5357 /* Convert from bytes to chunks of size value_bit. */
5358 value_start = byte * (BITS_PER_UNIT / value_bit);
5360 /* Re-pack the value. */
5362 if (VECTOR_MODE_P (outermode))
5364 num_elem = GET_MODE_NUNITS (outermode);
5365 result_v = rtvec_alloc (num_elem);
5366 elems = &RTVEC_ELT (result_v, 0);
5367 outer_submode = GET_MODE_INNER (outermode);
5369 else
5371 num_elem = 1;
5372 elems = &result_s;
5373 outer_submode = outermode;
5376 outer_class = GET_MODE_CLASS (outer_submode);
5377 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5379 gcc_assert (elem_bitsize % value_bit == 0);
5380 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5382 for (elem = 0; elem < num_elem; elem++)
5384 unsigned char *vp;
5386 /* Vectors are stored in target memory order. (This is probably
5387 a mistake.) */
5389 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5390 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5391 / BITS_PER_UNIT);
5392 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5393 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5394 unsigned bytele = (subword_byte % UNITS_PER_WORD
5395 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5396 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5399 switch (outer_class)
5401 case MODE_INT:
5402 case MODE_PARTIAL_INT:
5404 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5406 for (i = 0;
5407 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5408 i += value_bit)
5409 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5410 for (; i < elem_bitsize; i += value_bit)
5411 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5412 << (i - HOST_BITS_PER_WIDE_INT);
5414 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5415 know why. */
5416 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5417 elems[elem] = gen_int_mode (lo, outer_submode);
5418 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5419 elems[elem] = immed_double_const (lo, hi, outer_submode);
5420 else
5421 return NULL_RTX;
5423 break;
5425 case MODE_FLOAT:
5426 case MODE_DECIMAL_FLOAT:
5428 REAL_VALUE_TYPE r;
5429 long tmp[max_bitsize / 32];
5431 /* real_from_target wants its input in words affected by
5432 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5433 and use WORDS_BIG_ENDIAN instead; see the documentation
5434 of SUBREG in rtl.texi. */
5435 for (i = 0; i < max_bitsize / 32; i++)
5436 tmp[i] = 0;
5437 for (i = 0; i < elem_bitsize; i += value_bit)
5439 int ibase;
5440 if (WORDS_BIG_ENDIAN)
5441 ibase = elem_bitsize - 1 - i;
5442 else
5443 ibase = i;
5444 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5447 real_from_target (&r, tmp, outer_submode);
5448 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5450 break;
5452 case MODE_FRACT:
5453 case MODE_UFRACT:
5454 case MODE_ACCUM:
5455 case MODE_UACCUM:
5457 FIXED_VALUE_TYPE f;
5458 f.data.low = 0;
5459 f.data.high = 0;
5460 f.mode = outer_submode;
5462 for (i = 0;
5463 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5464 i += value_bit)
5465 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5466 for (; i < elem_bitsize; i += value_bit)
5467 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5468 << (i - HOST_BITS_PER_WIDE_INT));
5470 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5472 break;
5474 default:
5475 gcc_unreachable ();
5478 if (VECTOR_MODE_P (outermode))
5479 return gen_rtx_CONST_VECTOR (outermode, result_v);
5480 else
5481 return result_s;
5484 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5485 Return 0 if no simplifications are possible. */
5487 simplify_subreg (enum machine_mode outermode, rtx op,
5488 enum machine_mode innermode, unsigned int byte)
5490 /* Little bit of sanity checking. */
5491 gcc_assert (innermode != VOIDmode);
5492 gcc_assert (outermode != VOIDmode);
5493 gcc_assert (innermode != BLKmode);
5494 gcc_assert (outermode != BLKmode);
5496 gcc_assert (GET_MODE (op) == innermode
5497 || GET_MODE (op) == VOIDmode);
5499 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5500 gcc_assert (byte < GET_MODE_SIZE (innermode));
5502 if (outermode == innermode && !byte)
5503 return op;
5505 if (CONST_INT_P (op)
5506 || CONST_DOUBLE_P (op)
5507 || GET_CODE (op) == CONST_FIXED
5508 || GET_CODE (op) == CONST_VECTOR)
5509 return simplify_immed_subreg (outermode, op, innermode, byte);
5511 /* Changing mode twice with SUBREG => just change it once,
5512 or not at all if changing back op starting mode. */
5513 if (GET_CODE (op) == SUBREG)
5515 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5516 int final_offset = byte + SUBREG_BYTE (op);
5517 rtx newx;
5519 if (outermode == innermostmode
5520 && byte == 0 && SUBREG_BYTE (op) == 0)
5521 return SUBREG_REG (op);
5523 /* The SUBREG_BYTE represents offset, as if the value were stored
5524 in memory. Irritating exception is paradoxical subreg, where
5525 we define SUBREG_BYTE to be 0. On big endian machines, this
5526 value should be negative. For a moment, undo this exception. */
5527 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5529 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5530 if (WORDS_BIG_ENDIAN)
5531 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5532 if (BYTES_BIG_ENDIAN)
5533 final_offset += difference % UNITS_PER_WORD;
5535 if (SUBREG_BYTE (op) == 0
5536 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5538 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5539 if (WORDS_BIG_ENDIAN)
5540 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5541 if (BYTES_BIG_ENDIAN)
5542 final_offset += difference % UNITS_PER_WORD;
5545 /* See whether resulting subreg will be paradoxical. */
5546 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5548 /* In nonparadoxical subregs we can't handle negative offsets. */
5549 if (final_offset < 0)
5550 return NULL_RTX;
5551 /* Bail out in case resulting subreg would be incorrect. */
5552 if (final_offset % GET_MODE_SIZE (outermode)
5553 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5554 return NULL_RTX;
5556 else
5558 int offset = 0;
5559 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5561 /* In paradoxical subreg, see if we are still looking on lower part.
5562 If so, our SUBREG_BYTE will be 0. */
5563 if (WORDS_BIG_ENDIAN)
5564 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5565 if (BYTES_BIG_ENDIAN)
5566 offset += difference % UNITS_PER_WORD;
5567 if (offset == final_offset)
5568 final_offset = 0;
5569 else
5570 return NULL_RTX;
5573 /* Recurse for further possible simplifications. */
5574 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5575 final_offset);
5576 if (newx)
5577 return newx;
5578 if (validate_subreg (outermode, innermostmode,
5579 SUBREG_REG (op), final_offset))
5581 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5582 if (SUBREG_PROMOTED_VAR_P (op)
5583 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5584 && GET_MODE_CLASS (outermode) == MODE_INT
5585 && IN_RANGE (GET_MODE_SIZE (outermode),
5586 GET_MODE_SIZE (innermode),
5587 GET_MODE_SIZE (innermostmode))
5588 && subreg_lowpart_p (newx))
5590 SUBREG_PROMOTED_VAR_P (newx) = 1;
5591 SUBREG_PROMOTED_UNSIGNED_SET
5592 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5594 return newx;
5596 return NULL_RTX;
5599 /* Merge implicit and explicit truncations. */
5601 if (GET_CODE (op) == TRUNCATE
5602 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5603 && subreg_lowpart_offset (outermode, innermode) == byte)
5604 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5605 GET_MODE (XEXP (op, 0)));
5607 /* SUBREG of a hard register => just change the register number
5608 and/or mode. If the hard register is not valid in that mode,
5609 suppress this simplification. If the hard register is the stack,
5610 frame, or argument pointer, leave this as a SUBREG. */
5612 if (REG_P (op) && HARD_REGISTER_P (op))
5614 unsigned int regno, final_regno;
5616 regno = REGNO (op);
5617 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5618 if (HARD_REGISTER_NUM_P (final_regno))
5620 rtx x;
5621 int final_offset = byte;
5623 /* Adjust offset for paradoxical subregs. */
5624 if (byte == 0
5625 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5627 int difference = (GET_MODE_SIZE (innermode)
5628 - GET_MODE_SIZE (outermode));
5629 if (WORDS_BIG_ENDIAN)
5630 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5631 if (BYTES_BIG_ENDIAN)
5632 final_offset += difference % UNITS_PER_WORD;
5635 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5637 /* Propagate original regno. We don't have any way to specify
5638 the offset inside original regno, so do so only for lowpart.
5639 The information is used only by alias analysis that can not
5640 grog partial register anyway. */
5642 if (subreg_lowpart_offset (outermode, innermode) == byte)
5643 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5644 return x;
5648 /* If we have a SUBREG of a register that we are replacing and we are
5649 replacing it with a MEM, make a new MEM and try replacing the
5650 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5651 or if we would be widening it. */
5653 if (MEM_P (op)
5654 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5655 /* Allow splitting of volatile memory references in case we don't
5656 have instruction to move the whole thing. */
5657 && (! MEM_VOLATILE_P (op)
5658 || ! have_insn_for (SET, innermode))
5659 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5660 return adjust_address_nv (op, outermode, byte);
5662 /* Handle complex values represented as CONCAT
5663 of real and imaginary part. */
5664 if (GET_CODE (op) == CONCAT)
5666 unsigned int part_size, final_offset;
5667 rtx part, res;
5669 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5670 if (byte < part_size)
5672 part = XEXP (op, 0);
5673 final_offset = byte;
5675 else
5677 part = XEXP (op, 1);
5678 final_offset = byte - part_size;
5681 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5682 return NULL_RTX;
5684 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5685 if (res)
5686 return res;
5687 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5688 return gen_rtx_SUBREG (outermode, part, final_offset);
5689 return NULL_RTX;
5692 /* Optimize SUBREG truncations of zero and sign extended values. */
5693 if ((GET_CODE (op) == ZERO_EXTEND
5694 || GET_CODE (op) == SIGN_EXTEND)
5695 && SCALAR_INT_MODE_P (innermode)
5696 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode))
5698 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5700 /* If we're requesting the lowpart of a zero or sign extension,
5701 there are three possibilities. If the outermode is the same
5702 as the origmode, we can omit both the extension and the subreg.
5703 If the outermode is not larger than the origmode, we can apply
5704 the truncation without the extension. Finally, if the outermode
5705 is larger than the origmode, but both are integer modes, we
5706 can just extend to the appropriate mode. */
5707 if (bitpos == 0)
5709 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5710 if (outermode == origmode)
5711 return XEXP (op, 0);
5712 if (GET_MODE_PRECISION (outermode) <= GET_MODE_PRECISION (origmode))
5713 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5714 subreg_lowpart_offset (outermode,
5715 origmode));
5716 if (SCALAR_INT_MODE_P (outermode))
5717 return simplify_gen_unary (GET_CODE (op), outermode,
5718 XEXP (op, 0), origmode);
5721 /* A SUBREG resulting from a zero extension may fold to zero if
5722 it extracts higher bits that the ZERO_EXTEND's source bits. */
5723 if (GET_CODE (op) == ZERO_EXTEND
5724 && bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5725 return CONST0_RTX (outermode);
5728 /* Simplify (subreg:SI (op:DI ((x:DI) (y:DI)), 0)
5729 to (op:SI (subreg:SI (x:DI) 0) (subreg:SI (x:DI) 0)), where
5730 the outer subreg is effectively a truncation to the original mode. */
5731 if ((GET_CODE (op) == PLUS
5732 || GET_CODE (op) == MINUS
5733 || GET_CODE (op) == MULT)
5734 && SCALAR_INT_MODE_P (outermode)
5735 && SCALAR_INT_MODE_P (innermode)
5736 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5737 && byte == subreg_lowpart_offset (outermode, innermode))
5739 rtx op0 = simplify_gen_subreg (outermode, XEXP (op, 0),
5740 innermode, byte);
5741 if (op0)
5743 rtx op1 = simplify_gen_subreg (outermode, XEXP (op, 1),
5744 innermode, byte);
5745 if (op1)
5746 return simplify_gen_binary (GET_CODE (op), outermode, op0, op1);
5750 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5751 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5752 the outer subreg is effectively a truncation to the original mode. */
5753 if ((GET_CODE (op) == LSHIFTRT
5754 || GET_CODE (op) == ASHIFTRT)
5755 && SCALAR_INT_MODE_P (outermode)
5756 && SCALAR_INT_MODE_P (innermode)
5757 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5758 to avoid the possibility that an outer LSHIFTRT shifts by more
5759 than the sign extension's sign_bit_copies and introduces zeros
5760 into the high bits of the result. */
5761 && (2 * GET_MODE_PRECISION (outermode)) <= GET_MODE_PRECISION (innermode)
5762 && CONST_INT_P (XEXP (op, 1))
5763 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5764 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5765 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5766 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5767 return simplify_gen_binary (ASHIFTRT, outermode,
5768 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5770 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5771 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5772 the outer subreg is effectively a truncation to the original mode. */
5773 if ((GET_CODE (op) == LSHIFTRT
5774 || GET_CODE (op) == ASHIFTRT)
5775 && SCALAR_INT_MODE_P (outermode)
5776 && SCALAR_INT_MODE_P (innermode)
5777 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5778 && CONST_INT_P (XEXP (op, 1))
5779 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5780 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5781 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5782 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5783 return simplify_gen_binary (LSHIFTRT, outermode,
5784 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5786 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5787 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5788 the outer subreg is effectively a truncation to the original mode. */
5789 if (GET_CODE (op) == ASHIFT
5790 && SCALAR_INT_MODE_P (outermode)
5791 && SCALAR_INT_MODE_P (innermode)
5792 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5793 && CONST_INT_P (XEXP (op, 1))
5794 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5795 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5796 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5797 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5798 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5799 return simplify_gen_binary (ASHIFT, outermode,
5800 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5802 /* Recognize a word extraction from a multi-word subreg. */
5803 if ((GET_CODE (op) == LSHIFTRT
5804 || GET_CODE (op) == ASHIFTRT)
5805 && SCALAR_INT_MODE_P (innermode)
5806 && GET_MODE_PRECISION (outermode) >= BITS_PER_WORD
5807 && GET_MODE_PRECISION (innermode) >= (2 * GET_MODE_PRECISION (outermode))
5808 && CONST_INT_P (XEXP (op, 1))
5809 && (INTVAL (XEXP (op, 1)) & (GET_MODE_PRECISION (outermode) - 1)) == 0
5810 && INTVAL (XEXP (op, 1)) >= 0
5811 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (innermode)
5812 && byte == subreg_lowpart_offset (outermode, innermode))
5814 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5815 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5816 (WORDS_BIG_ENDIAN
5817 ? byte - shifted_bytes
5818 : byte + shifted_bytes));
5821 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5822 and try replacing the SUBREG and shift with it. Don't do this if
5823 the MEM has a mode-dependent address or if we would be widening it. */
5825 if ((GET_CODE (op) == LSHIFTRT
5826 || GET_CODE (op) == ASHIFTRT)
5827 && SCALAR_INT_MODE_P (innermode)
5828 && MEM_P (XEXP (op, 0))
5829 && CONST_INT_P (XEXP (op, 1))
5830 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
5831 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
5832 && INTVAL (XEXP (op, 1)) > 0
5833 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5834 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
5835 MEM_ADDR_SPACE (XEXP (op, 0)))
5836 && ! MEM_VOLATILE_P (XEXP (op, 0))
5837 && byte == subreg_lowpart_offset (outermode, innermode)
5838 && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
5839 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
5841 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5842 return adjust_address_nv (XEXP (op, 0), outermode,
5843 (WORDS_BIG_ENDIAN
5844 ? byte - shifted_bytes
5845 : byte + shifted_bytes));
5848 return NULL_RTX;
5851 /* Make a SUBREG operation or equivalent if it folds. */
5854 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5855 enum machine_mode innermode, unsigned int byte)
5857 rtx newx;
5859 newx = simplify_subreg (outermode, op, innermode, byte);
5860 if (newx)
5861 return newx;
5863 if (GET_CODE (op) == SUBREG
5864 || GET_CODE (op) == CONCAT
5865 || GET_MODE (op) == VOIDmode)
5866 return NULL_RTX;
5868 if (validate_subreg (outermode, innermode, op, byte))
5869 return gen_rtx_SUBREG (outermode, op, byte);
5871 return NULL_RTX;
5874 /* Simplify X, an rtx expression.
5876 Return the simplified expression or NULL if no simplifications
5877 were possible.
5879 This is the preferred entry point into the simplification routines;
5880 however, we still allow passes to call the more specific routines.
5882 Right now GCC has three (yes, three) major bodies of RTL simplification
5883 code that need to be unified.
5885 1. fold_rtx in cse.c. This code uses various CSE specific
5886 information to aid in RTL simplification.
5888 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5889 it uses combine specific information to aid in RTL
5890 simplification.
5892 3. The routines in this file.
5895 Long term we want to only have one body of simplification code; to
5896 get to that state I recommend the following steps:
5898 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5899 which are not pass dependent state into these routines.
5901 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5902 use this routine whenever possible.
5904 3. Allow for pass dependent state to be provided to these
5905 routines and add simplifications based on the pass dependent
5906 state. Remove code from cse.c & combine.c that becomes
5907 redundant/dead.
5909 It will take time, but ultimately the compiler will be easier to
5910 maintain and improve. It's totally silly that when we add a
5911 simplification that it needs to be added to 4 places (3 for RTL
5912 simplification and 1 for tree simplification. */
5915 simplify_rtx (const_rtx x)
5917 const enum rtx_code code = GET_CODE (x);
5918 const enum machine_mode mode = GET_MODE (x);
5920 switch (GET_RTX_CLASS (code))
5922 case RTX_UNARY:
5923 return simplify_unary_operation (code, mode,
5924 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5925 case RTX_COMM_ARITH:
5926 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5927 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5929 /* Fall through.... */
5931 case RTX_BIN_ARITH:
5932 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5934 case RTX_TERNARY:
5935 case RTX_BITFIELD_OPS:
5936 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5937 XEXP (x, 0), XEXP (x, 1),
5938 XEXP (x, 2));
5940 case RTX_COMPARE:
5941 case RTX_COMM_COMPARE:
5942 return simplify_relational_operation (code, mode,
5943 ((GET_MODE (XEXP (x, 0))
5944 != VOIDmode)
5945 ? GET_MODE (XEXP (x, 0))
5946 : GET_MODE (XEXP (x, 1))),
5947 XEXP (x, 0),
5948 XEXP (x, 1));
5950 case RTX_EXTRA:
5951 if (code == SUBREG)
5952 return simplify_subreg (mode, SUBREG_REG (x),
5953 GET_MODE (SUBREG_REG (x)),
5954 SUBREG_BYTE (x));
5955 break;
5957 case RTX_OBJ:
5958 if (code == LO_SUM)
5960 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5961 if (GET_CODE (XEXP (x, 0)) == HIGH
5962 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5963 return XEXP (x, 1);
5965 break;
5967 default:
5968 break;
5970 return NULL;