2011-06-16 Tom de Vries <tom@codesourcery.com>
[official-gcc.git] / gcc / simplify-rtx.c
blob18f264b0ca96128171c8b8bbb54deee1d3100c64
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "diagnostic-core.h"
38 #include "output.h"
39 #include "ggc.h"
40 #include "target.h"
42 /* Simplification and canonicalization of RTL. */
44 /* Much code operates on (low, high) pairs; the low value is an
45 unsigned wide int, the high value a signed wide int. We
46 occasionally need to sign extend from low to high as if low were a
47 signed wide int. */
48 #define HWI_SIGN_EXTEND(low) \
49 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51 static rtx neg_const_int (enum machine_mode, const_rtx);
52 static bool plus_minus_operand_p (const_rtx);
53 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
55 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
56 unsigned int);
57 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
58 rtx, rtx);
59 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
60 enum machine_mode, rtx, rtx);
61 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
62 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
63 rtx, rtx, rtx, rtx);
65 /* Negate a CONST_INT rtx, truncating (because a conversion from a
66 maximally negative number can overflow). */
67 static rtx
68 neg_const_int (enum machine_mode mode, const_rtx i)
70 return gen_int_mode (- INTVAL (i), mode);
73 /* Test whether expression, X, is an immediate constant that represents
74 the most significant bit of machine mode MODE. */
76 bool
77 mode_signbit_p (enum machine_mode mode, const_rtx x)
79 unsigned HOST_WIDE_INT val;
80 unsigned int width;
82 if (GET_MODE_CLASS (mode) != MODE_INT)
83 return false;
85 width = GET_MODE_BITSIZE (mode);
86 if (width == 0)
87 return false;
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
93 && GET_CODE (x) == CONST_DOUBLE
94 && CONST_DOUBLE_LOW (x) == 0)
96 val = CONST_DOUBLE_HIGH (x);
97 width -= HOST_BITS_PER_WIDE_INT;
99 else
100 return false;
102 if (width < HOST_BITS_PER_WIDE_INT)
103 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
107 /* Make a binary operation by properly ordering the operands and
108 seeing if the expression folds. */
111 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
112 rtx op1)
114 rtx tem;
116 /* If this simplifies, do it. */
117 tem = simplify_binary_operation (code, mode, op0, op1);
118 if (tem)
119 return tem;
121 /* Put complex operands first and constants second if commutative. */
122 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
123 && swap_commutative_operands_p (op0, op1))
124 tem = op0, op0 = op1, op1 = tem;
126 return gen_rtx_fmt_ee (code, mode, op0, op1);
129 /* If X is a MEM referencing the constant pool, return the real value.
130 Otherwise return X. */
132 avoid_constant_pool_reference (rtx x)
134 rtx c, tmp, addr;
135 enum machine_mode cmode;
136 HOST_WIDE_INT offset = 0;
138 switch (GET_CODE (x))
140 case MEM:
141 break;
143 case FLOAT_EXTEND:
144 /* Handle float extensions of constant pool references. */
145 tmp = XEXP (x, 0);
146 c = avoid_constant_pool_reference (tmp);
147 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
149 REAL_VALUE_TYPE d;
151 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
152 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
154 return x;
156 default:
157 return x;
160 if (GET_MODE (x) == BLKmode)
161 return x;
163 addr = XEXP (x, 0);
165 /* Call target hook to avoid the effects of -fpic etc.... */
166 addr = targetm.delegitimize_address (addr);
168 /* Split the address into a base and integer offset. */
169 if (GET_CODE (addr) == CONST
170 && GET_CODE (XEXP (addr, 0)) == PLUS
171 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
173 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
174 addr = XEXP (XEXP (addr, 0), 0);
177 if (GET_CODE (addr) == LO_SUM)
178 addr = XEXP (addr, 1);
180 /* If this is a constant pool reference, we can turn it into its
181 constant and hope that simplifications happen. */
182 if (GET_CODE (addr) == SYMBOL_REF
183 && CONSTANT_POOL_ADDRESS_P (addr))
185 c = get_pool_constant (addr);
186 cmode = get_pool_mode (addr);
188 /* If we're accessing the constant in a different mode than it was
189 originally stored, attempt to fix that up via subreg simplifications.
190 If that fails we have no choice but to return the original memory. */
191 if (offset != 0 || cmode != GET_MODE (x))
193 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
194 if (tem && CONSTANT_P (tem))
195 return tem;
197 else
198 return c;
201 return x;
204 /* Simplify a MEM based on its attributes. This is the default
205 delegitimize_address target hook, and it's recommended that every
206 overrider call it. */
209 delegitimize_mem_from_attrs (rtx x)
211 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
212 use their base addresses as equivalent. */
213 if (MEM_P (x)
214 && MEM_EXPR (x)
215 && MEM_OFFSET (x))
217 tree decl = MEM_EXPR (x);
218 enum machine_mode mode = GET_MODE (x);
219 HOST_WIDE_INT offset = 0;
221 switch (TREE_CODE (decl))
223 default:
224 decl = NULL;
225 break;
227 case VAR_DECL:
228 break;
230 case ARRAY_REF:
231 case ARRAY_RANGE_REF:
232 case COMPONENT_REF:
233 case BIT_FIELD_REF:
234 case REALPART_EXPR:
235 case IMAGPART_EXPR:
236 case VIEW_CONVERT_EXPR:
238 HOST_WIDE_INT bitsize, bitpos;
239 tree toffset;
240 int unsignedp = 0, volatilep = 0;
242 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
243 &mode, &unsignedp, &volatilep, false);
244 if (bitsize != GET_MODE_BITSIZE (mode)
245 || (bitpos % BITS_PER_UNIT)
246 || (toffset && !host_integerp (toffset, 0)))
247 decl = NULL;
248 else
250 offset += bitpos / BITS_PER_UNIT;
251 if (toffset)
252 offset += TREE_INT_CST_LOW (toffset);
254 break;
258 if (decl
259 && mode == GET_MODE (x)
260 && TREE_CODE (decl) == VAR_DECL
261 && (TREE_STATIC (decl)
262 || DECL_THREAD_LOCAL_P (decl))
263 && DECL_RTL_SET_P (decl)
264 && MEM_P (DECL_RTL (decl)))
266 rtx newx;
268 offset += INTVAL (MEM_OFFSET (x));
270 newx = DECL_RTL (decl);
272 if (MEM_P (newx))
274 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
276 /* Avoid creating a new MEM needlessly if we already had
277 the same address. We do if there's no OFFSET and the
278 old address X is identical to NEWX, or if X is of the
279 form (plus NEWX OFFSET), or the NEWX is of the form
280 (plus Y (const_int Z)) and X is that with the offset
281 added: (plus Y (const_int Z+OFFSET)). */
282 if (!((offset == 0
283 || (GET_CODE (o) == PLUS
284 && GET_CODE (XEXP (o, 1)) == CONST_INT
285 && (offset == INTVAL (XEXP (o, 1))
286 || (GET_CODE (n) == PLUS
287 && GET_CODE (XEXP (n, 1)) == CONST_INT
288 && (INTVAL (XEXP (n, 1)) + offset
289 == INTVAL (XEXP (o, 1)))
290 && (n = XEXP (n, 0))))
291 && (o = XEXP (o, 0))))
292 && rtx_equal_p (o, n)))
293 x = adjust_address_nv (newx, mode, offset);
295 else if (GET_MODE (x) == GET_MODE (newx)
296 && offset == 0)
297 x = newx;
301 return x;
304 /* Make a unary operation by first seeing if it folds and otherwise making
305 the specified operation. */
308 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
309 enum machine_mode op_mode)
311 rtx tem;
313 /* If this simplifies, use it. */
314 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
315 return tem;
317 return gen_rtx_fmt_e (code, mode, op);
320 /* Likewise for ternary operations. */
323 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
324 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
326 rtx tem;
328 /* If this simplifies, use it. */
329 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
330 op0, op1, op2)))
331 return tem;
333 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
336 /* Likewise, for relational operations.
337 CMP_MODE specifies mode comparison is done in. */
340 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
341 enum machine_mode cmp_mode, rtx op0, rtx op1)
343 rtx tem;
345 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
346 op0, op1)))
347 return tem;
349 return gen_rtx_fmt_ee (code, mode, op0, op1);
352 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
353 and simplify the result. If FN is non-NULL, call this callback on each
354 X, if it returns non-NULL, replace X with its return value and simplify the
355 result. */
358 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
359 rtx (*fn) (rtx, const_rtx, void *), void *data)
361 enum rtx_code code = GET_CODE (x);
362 enum machine_mode mode = GET_MODE (x);
363 enum machine_mode op_mode;
364 const char *fmt;
365 rtx op0, op1, op2, newx, op;
366 rtvec vec, newvec;
367 int i, j;
369 if (__builtin_expect (fn != NULL, 0))
371 newx = fn (x, old_rtx, data);
372 if (newx)
373 return newx;
375 else if (rtx_equal_p (x, old_rtx))
376 return copy_rtx ((rtx) data);
378 switch (GET_RTX_CLASS (code))
380 case RTX_UNARY:
381 op0 = XEXP (x, 0);
382 op_mode = GET_MODE (op0);
383 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
384 if (op0 == XEXP (x, 0))
385 return x;
386 return simplify_gen_unary (code, mode, op0, op_mode);
388 case RTX_BIN_ARITH:
389 case RTX_COMM_ARITH:
390 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
391 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
392 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
393 return x;
394 return simplify_gen_binary (code, mode, op0, op1);
396 case RTX_COMPARE:
397 case RTX_COMM_COMPARE:
398 op0 = XEXP (x, 0);
399 op1 = XEXP (x, 1);
400 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
401 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
402 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
403 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
404 return x;
405 return simplify_gen_relational (code, mode, op_mode, op0, op1);
407 case RTX_TERNARY:
408 case RTX_BITFIELD_OPS:
409 op0 = XEXP (x, 0);
410 op_mode = GET_MODE (op0);
411 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
412 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
413 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
414 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
415 return x;
416 if (op_mode == VOIDmode)
417 op_mode = GET_MODE (op0);
418 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
420 case RTX_EXTRA:
421 if (code == SUBREG)
423 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
424 if (op0 == SUBREG_REG (x))
425 return x;
426 op0 = simplify_gen_subreg (GET_MODE (x), op0,
427 GET_MODE (SUBREG_REG (x)),
428 SUBREG_BYTE (x));
429 return op0 ? op0 : x;
431 break;
433 case RTX_OBJ:
434 if (code == MEM)
436 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
437 if (op0 == XEXP (x, 0))
438 return x;
439 return replace_equiv_address_nv (x, op0);
441 else if (code == LO_SUM)
443 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
444 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
446 /* (lo_sum (high x) x) -> x */
447 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
448 return op1;
450 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
451 return x;
452 return gen_rtx_LO_SUM (mode, op0, op1);
454 break;
456 default:
457 break;
460 newx = x;
461 fmt = GET_RTX_FORMAT (code);
462 for (i = 0; fmt[i]; i++)
463 switch (fmt[i])
465 case 'E':
466 vec = XVEC (x, i);
467 newvec = XVEC (newx, i);
468 for (j = 0; j < GET_NUM_ELEM (vec); j++)
470 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
471 old_rtx, fn, data);
472 if (op != RTVEC_ELT (vec, j))
474 if (newvec == vec)
476 newvec = shallow_copy_rtvec (vec);
477 if (x == newx)
478 newx = shallow_copy_rtx (x);
479 XVEC (newx, i) = newvec;
481 RTVEC_ELT (newvec, j) = op;
484 break;
486 case 'e':
487 if (XEXP (x, i))
489 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
490 if (op != XEXP (x, i))
492 if (x == newx)
493 newx = shallow_copy_rtx (x);
494 XEXP (newx, i) = op;
497 break;
499 return newx;
502 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
503 resulting RTX. Return a new RTX which is as simplified as possible. */
506 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
508 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
511 /* Try to simplify a unary operation CODE whose output mode is to be
512 MODE with input operand OP whose mode was originally OP_MODE.
513 Return zero if no simplification can be made. */
515 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
516 rtx op, enum machine_mode op_mode)
518 rtx trueop, tem;
520 trueop = avoid_constant_pool_reference (op);
522 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
523 if (tem)
524 return tem;
526 return simplify_unary_operation_1 (code, mode, op);
529 /* Perform some simplifications we can do even if the operands
530 aren't constant. */
531 static rtx
532 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
534 enum rtx_code reversed;
535 rtx temp;
537 switch (code)
539 case NOT:
540 /* (not (not X)) == X. */
541 if (GET_CODE (op) == NOT)
542 return XEXP (op, 0);
544 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
545 comparison is all ones. */
546 if (COMPARISON_P (op)
547 && (mode == BImode || STORE_FLAG_VALUE == -1)
548 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
549 return simplify_gen_relational (reversed, mode, VOIDmode,
550 XEXP (op, 0), XEXP (op, 1));
552 /* (not (plus X -1)) can become (neg X). */
553 if (GET_CODE (op) == PLUS
554 && XEXP (op, 1) == constm1_rtx)
555 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
557 /* Similarly, (not (neg X)) is (plus X -1). */
558 if (GET_CODE (op) == NEG)
559 return plus_constant (XEXP (op, 0), -1);
561 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
562 if (GET_CODE (op) == XOR
563 && CONST_INT_P (XEXP (op, 1))
564 && (temp = simplify_unary_operation (NOT, mode,
565 XEXP (op, 1), mode)) != 0)
566 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
568 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
569 if (GET_CODE (op) == PLUS
570 && CONST_INT_P (XEXP (op, 1))
571 && mode_signbit_p (mode, XEXP (op, 1))
572 && (temp = simplify_unary_operation (NOT, mode,
573 XEXP (op, 1), mode)) != 0)
574 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
577 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
578 operands other than 1, but that is not valid. We could do a
579 similar simplification for (not (lshiftrt C X)) where C is
580 just the sign bit, but this doesn't seem common enough to
581 bother with. */
582 if (GET_CODE (op) == ASHIFT
583 && XEXP (op, 0) == const1_rtx)
585 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
586 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
589 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
590 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
591 so we can perform the above simplification. */
593 if (STORE_FLAG_VALUE == -1
594 && GET_CODE (op) == ASHIFTRT
595 && GET_CODE (XEXP (op, 1))
596 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
597 return simplify_gen_relational (GE, mode, VOIDmode,
598 XEXP (op, 0), const0_rtx);
601 if (GET_CODE (op) == SUBREG
602 && subreg_lowpart_p (op)
603 && (GET_MODE_SIZE (GET_MODE (op))
604 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
605 && GET_CODE (SUBREG_REG (op)) == ASHIFT
606 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
608 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
609 rtx x;
611 x = gen_rtx_ROTATE (inner_mode,
612 simplify_gen_unary (NOT, inner_mode, const1_rtx,
613 inner_mode),
614 XEXP (SUBREG_REG (op), 1));
615 return rtl_hooks.gen_lowpart_no_emit (mode, x);
618 /* Apply De Morgan's laws to reduce number of patterns for machines
619 with negating logical insns (and-not, nand, etc.). If result has
620 only one NOT, put it first, since that is how the patterns are
621 coded. */
623 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
625 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
626 enum machine_mode op_mode;
628 op_mode = GET_MODE (in1);
629 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
631 op_mode = GET_MODE (in2);
632 if (op_mode == VOIDmode)
633 op_mode = mode;
634 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
636 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
638 rtx tem = in2;
639 in2 = in1; in1 = tem;
642 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
643 mode, in1, in2);
645 break;
647 case NEG:
648 /* (neg (neg X)) == X. */
649 if (GET_CODE (op) == NEG)
650 return XEXP (op, 0);
652 /* (neg (plus X 1)) can become (not X). */
653 if (GET_CODE (op) == PLUS
654 && XEXP (op, 1) == const1_rtx)
655 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
657 /* Similarly, (neg (not X)) is (plus X 1). */
658 if (GET_CODE (op) == NOT)
659 return plus_constant (XEXP (op, 0), 1);
661 /* (neg (minus X Y)) can become (minus Y X). This transformation
662 isn't safe for modes with signed zeros, since if X and Y are
663 both +0, (minus Y X) is the same as (minus X Y). If the
664 rounding mode is towards +infinity (or -infinity) then the two
665 expressions will be rounded differently. */
666 if (GET_CODE (op) == MINUS
667 && !HONOR_SIGNED_ZEROS (mode)
668 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
669 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
671 if (GET_CODE (op) == PLUS
672 && !HONOR_SIGNED_ZEROS (mode)
673 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
675 /* (neg (plus A C)) is simplified to (minus -C A). */
676 if (CONST_INT_P (XEXP (op, 1))
677 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
679 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
680 if (temp)
681 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
684 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
685 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
686 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
689 /* (neg (mult A B)) becomes (mult (neg A) B).
690 This works even for floating-point values. */
691 if (GET_CODE (op) == MULT
692 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
694 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
695 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
698 /* NEG commutes with ASHIFT since it is multiplication. Only do
699 this if we can then eliminate the NEG (e.g., if the operand
700 is a constant). */
701 if (GET_CODE (op) == ASHIFT)
703 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
704 if (temp)
705 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
708 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
709 C is equal to the width of MODE minus 1. */
710 if (GET_CODE (op) == ASHIFTRT
711 && CONST_INT_P (XEXP (op, 1))
712 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
713 return simplify_gen_binary (LSHIFTRT, mode,
714 XEXP (op, 0), XEXP (op, 1));
716 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
717 C is equal to the width of MODE minus 1. */
718 if (GET_CODE (op) == LSHIFTRT
719 && CONST_INT_P (XEXP (op, 1))
720 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
721 return simplify_gen_binary (ASHIFTRT, mode,
722 XEXP (op, 0), XEXP (op, 1));
724 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
725 if (GET_CODE (op) == XOR
726 && XEXP (op, 1) == const1_rtx
727 && nonzero_bits (XEXP (op, 0), mode) == 1)
728 return plus_constant (XEXP (op, 0), -1);
730 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
731 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
732 if (GET_CODE (op) == LT
733 && XEXP (op, 1) == const0_rtx
734 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
736 enum machine_mode inner = GET_MODE (XEXP (op, 0));
737 int isize = GET_MODE_BITSIZE (inner);
738 if (STORE_FLAG_VALUE == 1)
740 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
741 GEN_INT (isize - 1));
742 if (mode == inner)
743 return temp;
744 if (GET_MODE_BITSIZE (mode) > isize)
745 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
746 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
748 else if (STORE_FLAG_VALUE == -1)
750 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
751 GEN_INT (isize - 1));
752 if (mode == inner)
753 return temp;
754 if (GET_MODE_BITSIZE (mode) > isize)
755 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
756 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
759 break;
761 case TRUNCATE:
762 /* We can't handle truncation to a partial integer mode here
763 because we don't know the real bitsize of the partial
764 integer mode. */
765 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
766 break;
768 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
769 if ((GET_CODE (op) == SIGN_EXTEND
770 || GET_CODE (op) == ZERO_EXTEND)
771 && GET_MODE (XEXP (op, 0)) == mode)
772 return XEXP (op, 0);
774 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
775 (OP:SI foo:SI) if OP is NEG or ABS. */
776 if ((GET_CODE (op) == ABS
777 || GET_CODE (op) == NEG)
778 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
779 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
780 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
781 return simplify_gen_unary (GET_CODE (op), mode,
782 XEXP (XEXP (op, 0), 0), mode);
784 /* (truncate:A (subreg:B (truncate:C X) 0)) is
785 (truncate:A X). */
786 if (GET_CODE (op) == SUBREG
787 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
788 && subreg_lowpart_p (op))
789 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
790 GET_MODE (XEXP (SUBREG_REG (op), 0)));
792 /* If we know that the value is already truncated, we can
793 replace the TRUNCATE with a SUBREG. Note that this is also
794 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
795 modes we just have to apply a different definition for
796 truncation. But don't do this for an (LSHIFTRT (MULT ...))
797 since this will cause problems with the umulXi3_highpart
798 patterns. */
799 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
800 GET_MODE_BITSIZE (GET_MODE (op)))
801 ? (num_sign_bit_copies (op, GET_MODE (op))
802 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
803 - GET_MODE_BITSIZE (mode)))
804 : truncated_to_mode (mode, op))
805 && ! (GET_CODE (op) == LSHIFTRT
806 && GET_CODE (XEXP (op, 0)) == MULT))
807 return rtl_hooks.gen_lowpart_no_emit (mode, op);
809 /* A truncate of a comparison can be replaced with a subreg if
810 STORE_FLAG_VALUE permits. This is like the previous test,
811 but it works even if the comparison is done in a mode larger
812 than HOST_BITS_PER_WIDE_INT. */
813 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
814 && COMPARISON_P (op)
815 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
816 return rtl_hooks.gen_lowpart_no_emit (mode, op);
817 break;
819 case FLOAT_TRUNCATE:
820 if (DECIMAL_FLOAT_MODE_P (mode))
821 break;
823 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
824 if (GET_CODE (op) == FLOAT_EXTEND
825 && GET_MODE (XEXP (op, 0)) == mode)
826 return XEXP (op, 0);
828 /* (float_truncate:SF (float_truncate:DF foo:XF))
829 = (float_truncate:SF foo:XF).
830 This may eliminate double rounding, so it is unsafe.
832 (float_truncate:SF (float_extend:XF foo:DF))
833 = (float_truncate:SF foo:DF).
835 (float_truncate:DF (float_extend:XF foo:SF))
836 = (float_extend:SF foo:DF). */
837 if ((GET_CODE (op) == FLOAT_TRUNCATE
838 && flag_unsafe_math_optimizations)
839 || GET_CODE (op) == FLOAT_EXTEND)
840 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
841 0)))
842 > GET_MODE_SIZE (mode)
843 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
844 mode,
845 XEXP (op, 0), mode);
847 /* (float_truncate (float x)) is (float x) */
848 if (GET_CODE (op) == FLOAT
849 && (flag_unsafe_math_optimizations
850 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
851 && ((unsigned)significand_size (GET_MODE (op))
852 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
853 - num_sign_bit_copies (XEXP (op, 0),
854 GET_MODE (XEXP (op, 0))))))))
855 return simplify_gen_unary (FLOAT, mode,
856 XEXP (op, 0),
857 GET_MODE (XEXP (op, 0)));
859 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
860 (OP:SF foo:SF) if OP is NEG or ABS. */
861 if ((GET_CODE (op) == ABS
862 || GET_CODE (op) == NEG)
863 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
864 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
865 return simplify_gen_unary (GET_CODE (op), mode,
866 XEXP (XEXP (op, 0), 0), mode);
868 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
869 is (float_truncate:SF x). */
870 if (GET_CODE (op) == SUBREG
871 && subreg_lowpart_p (op)
872 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
873 return SUBREG_REG (op);
874 break;
876 case FLOAT_EXTEND:
877 if (DECIMAL_FLOAT_MODE_P (mode))
878 break;
880 /* (float_extend (float_extend x)) is (float_extend x)
882 (float_extend (float x)) is (float x) assuming that double
883 rounding can't happen.
885 if (GET_CODE (op) == FLOAT_EXTEND
886 || (GET_CODE (op) == FLOAT
887 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
888 && ((unsigned)significand_size (GET_MODE (op))
889 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
890 - num_sign_bit_copies (XEXP (op, 0),
891 GET_MODE (XEXP (op, 0)))))))
892 return simplify_gen_unary (GET_CODE (op), mode,
893 XEXP (op, 0),
894 GET_MODE (XEXP (op, 0)));
896 break;
898 case ABS:
899 /* (abs (neg <foo>)) -> (abs <foo>) */
900 if (GET_CODE (op) == NEG)
901 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
902 GET_MODE (XEXP (op, 0)));
904 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
905 do nothing. */
906 if (GET_MODE (op) == VOIDmode)
907 break;
909 /* If operand is something known to be positive, ignore the ABS. */
910 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
911 || ((GET_MODE_BITSIZE (GET_MODE (op))
912 <= HOST_BITS_PER_WIDE_INT)
913 && ((nonzero_bits (op, GET_MODE (op))
914 & ((unsigned HOST_WIDE_INT) 1
915 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
916 == 0)))
917 return op;
919 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
920 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
921 return gen_rtx_NEG (mode, op);
923 break;
925 case FFS:
926 /* (ffs (*_extend <X>)) = (ffs <X>) */
927 if (GET_CODE (op) == SIGN_EXTEND
928 || GET_CODE (op) == ZERO_EXTEND)
929 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
930 GET_MODE (XEXP (op, 0)));
931 break;
933 case POPCOUNT:
934 switch (GET_CODE (op))
936 case BSWAP:
937 case ZERO_EXTEND:
938 /* (popcount (zero_extend <X>)) = (popcount <X>) */
939 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
940 GET_MODE (XEXP (op, 0)));
942 case ROTATE:
943 case ROTATERT:
944 /* Rotations don't affect popcount. */
945 if (!side_effects_p (XEXP (op, 1)))
946 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
947 GET_MODE (XEXP (op, 0)));
948 break;
950 default:
951 break;
953 break;
955 case PARITY:
956 switch (GET_CODE (op))
958 case NOT:
959 case BSWAP:
960 case ZERO_EXTEND:
961 case SIGN_EXTEND:
962 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
963 GET_MODE (XEXP (op, 0)));
965 case ROTATE:
966 case ROTATERT:
967 /* Rotations don't affect parity. */
968 if (!side_effects_p (XEXP (op, 1)))
969 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
970 GET_MODE (XEXP (op, 0)));
971 break;
973 default:
974 break;
976 break;
978 case BSWAP:
979 /* (bswap (bswap x)) -> x. */
980 if (GET_CODE (op) == BSWAP)
981 return XEXP (op, 0);
982 break;
984 case FLOAT:
985 /* (float (sign_extend <X>)) = (float <X>). */
986 if (GET_CODE (op) == SIGN_EXTEND)
987 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
988 GET_MODE (XEXP (op, 0)));
989 break;
991 case SIGN_EXTEND:
992 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
993 becomes just the MINUS if its mode is MODE. This allows
994 folding switch statements on machines using casesi (such as
995 the VAX). */
996 if (GET_CODE (op) == TRUNCATE
997 && GET_MODE (XEXP (op, 0)) == mode
998 && GET_CODE (XEXP (op, 0)) == MINUS
999 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1000 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1001 return XEXP (op, 0);
1003 /* Extending a widening multiplication should be canonicalized to
1004 a wider widening multiplication. */
1005 if (GET_CODE (op) == MULT)
1007 rtx lhs = XEXP (op, 0);
1008 rtx rhs = XEXP (op, 1);
1009 enum rtx_code lcode = GET_CODE (lhs);
1010 enum rtx_code rcode = GET_CODE (rhs);
1012 /* Widening multiplies usually extend both operands, but sometimes
1013 they use a shift to extract a portion of a register. */
1014 if ((lcode == SIGN_EXTEND
1015 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1016 && (rcode == SIGN_EXTEND
1017 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1019 enum machine_mode lmode = GET_MODE (lhs);
1020 enum machine_mode rmode = GET_MODE (rhs);
1021 int bits;
1023 if (lcode == ASHIFTRT)
1024 /* Number of bits not shifted off the end. */
1025 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1026 else /* lcode == SIGN_EXTEND */
1027 /* Size of inner mode. */
1028 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1030 if (rcode == ASHIFTRT)
1031 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1032 else /* rcode == SIGN_EXTEND */
1033 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1035 /* We can only widen multiplies if the result is mathematiclly
1036 equivalent. I.e. if overflow was impossible. */
1037 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1038 return simplify_gen_binary
1039 (MULT, mode,
1040 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1041 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1045 /* Check for a sign extension of a subreg of a promoted
1046 variable, where the promotion is sign-extended, and the
1047 target mode is the same as the variable's promotion. */
1048 if (GET_CODE (op) == SUBREG
1049 && SUBREG_PROMOTED_VAR_P (op)
1050 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1051 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1052 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1054 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1055 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1056 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1058 gcc_assert (GET_MODE_BITSIZE (mode)
1059 > GET_MODE_BITSIZE (GET_MODE (op)));
1060 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1061 GET_MODE (XEXP (op, 0)));
1064 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1065 is (sign_extend:M (subreg:O <X>)) if there is mode with
1066 GET_MODE_BITSIZE (N) - I bits.
1067 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1068 is similarly (zero_extend:M (subreg:O <X>)). */
1069 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1070 && GET_CODE (XEXP (op, 0)) == ASHIFT
1071 && CONST_INT_P (XEXP (op, 1))
1072 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1073 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1075 enum machine_mode tmode
1076 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1077 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1078 gcc_assert (GET_MODE_BITSIZE (mode)
1079 > GET_MODE_BITSIZE (GET_MODE (op)));
1080 if (tmode != BLKmode)
1082 rtx inner =
1083 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1084 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1085 ? SIGN_EXTEND : ZERO_EXTEND,
1086 mode, inner, tmode);
1090 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1091 /* As we do not know which address space the pointer is refering to,
1092 we can do this only if the target does not support different pointer
1093 or address modes depending on the address space. */
1094 if (target_default_pointer_address_modes_p ()
1095 && ! POINTERS_EXTEND_UNSIGNED
1096 && mode == Pmode && GET_MODE (op) == ptr_mode
1097 && (CONSTANT_P (op)
1098 || (GET_CODE (op) == SUBREG
1099 && REG_P (SUBREG_REG (op))
1100 && REG_POINTER (SUBREG_REG (op))
1101 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1102 return convert_memory_address (Pmode, op);
1103 #endif
1104 break;
1106 case ZERO_EXTEND:
1107 /* Check for a zero extension of a subreg of a promoted
1108 variable, where the promotion is zero-extended, and the
1109 target mode is the same as the variable's promotion. */
1110 if (GET_CODE (op) == SUBREG
1111 && SUBREG_PROMOTED_VAR_P (op)
1112 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1113 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1114 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1116 /* Extending a widening multiplication should be canonicalized to
1117 a wider widening multiplication. */
1118 if (GET_CODE (op) == MULT)
1120 rtx lhs = XEXP (op, 0);
1121 rtx rhs = XEXP (op, 1);
1122 enum rtx_code lcode = GET_CODE (lhs);
1123 enum rtx_code rcode = GET_CODE (rhs);
1125 /* Widening multiplies usually extend both operands, but sometimes
1126 they use a shift to extract a portion of a register. */
1127 if ((lcode == ZERO_EXTEND
1128 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1129 && (rcode == ZERO_EXTEND
1130 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1132 enum machine_mode lmode = GET_MODE (lhs);
1133 enum machine_mode rmode = GET_MODE (rhs);
1134 int bits;
1136 if (lcode == LSHIFTRT)
1137 /* Number of bits not shifted off the end. */
1138 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1139 else /* lcode == ZERO_EXTEND */
1140 /* Size of inner mode. */
1141 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1143 if (rcode == LSHIFTRT)
1144 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1145 else /* rcode == ZERO_EXTEND */
1146 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1148 /* We can only widen multiplies if the result is mathematiclly
1149 equivalent. I.e. if overflow was impossible. */
1150 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1151 return simplify_gen_binary
1152 (MULT, mode,
1153 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1154 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1158 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1159 if (GET_CODE (op) == ZERO_EXTEND)
1160 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1161 GET_MODE (XEXP (op, 0)));
1163 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1164 is (zero_extend:M (subreg:O <X>)) if there is mode with
1165 GET_MODE_BITSIZE (N) - I bits. */
1166 if (GET_CODE (op) == LSHIFTRT
1167 && GET_CODE (XEXP (op, 0)) == ASHIFT
1168 && CONST_INT_P (XEXP (op, 1))
1169 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1170 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1172 enum machine_mode tmode
1173 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1174 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1175 if (tmode != BLKmode)
1177 rtx inner =
1178 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1179 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1183 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1184 /* As we do not know which address space the pointer is refering to,
1185 we can do this only if the target does not support different pointer
1186 or address modes depending on the address space. */
1187 if (target_default_pointer_address_modes_p ()
1188 && POINTERS_EXTEND_UNSIGNED > 0
1189 && mode == Pmode && GET_MODE (op) == ptr_mode
1190 && (CONSTANT_P (op)
1191 || (GET_CODE (op) == SUBREG
1192 && REG_P (SUBREG_REG (op))
1193 && REG_POINTER (SUBREG_REG (op))
1194 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1195 return convert_memory_address (Pmode, op);
1196 #endif
1197 break;
1199 default:
1200 break;
1203 return 0;
1206 /* Try to compute the value of a unary operation CODE whose output mode is to
1207 be MODE with input operand OP whose mode was originally OP_MODE.
1208 Return zero if the value cannot be computed. */
1210 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1211 rtx op, enum machine_mode op_mode)
1213 unsigned int width = GET_MODE_BITSIZE (mode);
1215 if (code == VEC_DUPLICATE)
1217 gcc_assert (VECTOR_MODE_P (mode));
1218 if (GET_MODE (op) != VOIDmode)
1220 if (!VECTOR_MODE_P (GET_MODE (op)))
1221 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1222 else
1223 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1224 (GET_MODE (op)));
1226 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1227 || GET_CODE (op) == CONST_VECTOR)
1229 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1230 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1231 rtvec v = rtvec_alloc (n_elts);
1232 unsigned int i;
1234 if (GET_CODE (op) != CONST_VECTOR)
1235 for (i = 0; i < n_elts; i++)
1236 RTVEC_ELT (v, i) = op;
1237 else
1239 enum machine_mode inmode = GET_MODE (op);
1240 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1241 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1243 gcc_assert (in_n_elts < n_elts);
1244 gcc_assert ((n_elts % in_n_elts) == 0);
1245 for (i = 0; i < n_elts; i++)
1246 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1248 return gen_rtx_CONST_VECTOR (mode, v);
1252 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1254 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1255 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1256 enum machine_mode opmode = GET_MODE (op);
1257 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1258 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1259 rtvec v = rtvec_alloc (n_elts);
1260 unsigned int i;
1262 gcc_assert (op_n_elts == n_elts);
1263 for (i = 0; i < n_elts; i++)
1265 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1266 CONST_VECTOR_ELT (op, i),
1267 GET_MODE_INNER (opmode));
1268 if (!x)
1269 return 0;
1270 RTVEC_ELT (v, i) = x;
1272 return gen_rtx_CONST_VECTOR (mode, v);
1275 /* The order of these tests is critical so that, for example, we don't
1276 check the wrong mode (input vs. output) for a conversion operation,
1277 such as FIX. At some point, this should be simplified. */
1279 if (code == FLOAT && GET_MODE (op) == VOIDmode
1280 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1282 HOST_WIDE_INT hv, lv;
1283 REAL_VALUE_TYPE d;
1285 if (CONST_INT_P (op))
1286 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1287 else
1288 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1290 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1291 d = real_value_truncate (mode, d);
1292 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1294 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1295 && (GET_CODE (op) == CONST_DOUBLE
1296 || CONST_INT_P (op)))
1298 HOST_WIDE_INT hv, lv;
1299 REAL_VALUE_TYPE d;
1301 if (CONST_INT_P (op))
1302 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1303 else
1304 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1306 if (op_mode == VOIDmode)
1308 /* We don't know how to interpret negative-looking numbers in
1309 this case, so don't try to fold those. */
1310 if (hv < 0)
1311 return 0;
1313 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1315 else
1316 hv = 0, lv &= GET_MODE_MASK (op_mode);
1318 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1319 d = real_value_truncate (mode, d);
1320 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1323 if (CONST_INT_P (op)
1324 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1326 HOST_WIDE_INT arg0 = INTVAL (op);
1327 HOST_WIDE_INT val;
1329 switch (code)
1331 case NOT:
1332 val = ~ arg0;
1333 break;
1335 case NEG:
1336 val = - arg0;
1337 break;
1339 case ABS:
1340 val = (arg0 >= 0 ? arg0 : - arg0);
1341 break;
1343 case FFS:
1344 arg0 &= GET_MODE_MASK (mode);
1345 val = ffs_hwi (arg0);
1346 break;
1348 case CLZ:
1349 arg0 &= GET_MODE_MASK (mode);
1350 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1352 else
1353 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1354 break;
1356 case CTZ:
1357 arg0 &= GET_MODE_MASK (mode);
1358 if (arg0 == 0)
1360 /* Even if the value at zero is undefined, we have to come
1361 up with some replacement. Seems good enough. */
1362 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1363 val = GET_MODE_BITSIZE (mode);
1365 else
1366 val = ctz_hwi (arg0);
1367 break;
1369 case POPCOUNT:
1370 arg0 &= GET_MODE_MASK (mode);
1371 val = 0;
1372 while (arg0)
1373 val++, arg0 &= arg0 - 1;
1374 break;
1376 case PARITY:
1377 arg0 &= GET_MODE_MASK (mode);
1378 val = 0;
1379 while (arg0)
1380 val++, arg0 &= arg0 - 1;
1381 val &= 1;
1382 break;
1384 case BSWAP:
1386 unsigned int s;
1388 val = 0;
1389 for (s = 0; s < width; s += 8)
1391 unsigned int d = width - s - 8;
1392 unsigned HOST_WIDE_INT byte;
1393 byte = (arg0 >> s) & 0xff;
1394 val |= byte << d;
1397 break;
1399 case TRUNCATE:
1400 val = arg0;
1401 break;
1403 case ZERO_EXTEND:
1404 /* When zero-extending a CONST_INT, we need to know its
1405 original mode. */
1406 gcc_assert (op_mode != VOIDmode);
1407 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1409 /* If we were really extending the mode,
1410 we would have to distinguish between zero-extension
1411 and sign-extension. */
1412 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1413 val = arg0;
1415 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1416 val = arg0 & ~((unsigned HOST_WIDE_INT) (-1)
1417 << GET_MODE_BITSIZE (op_mode));
1418 else
1419 return 0;
1420 break;
1422 case SIGN_EXTEND:
1423 if (op_mode == VOIDmode)
1424 op_mode = mode;
1425 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1427 /* If we were really extending the mode,
1428 we would have to distinguish between zero-extension
1429 and sign-extension. */
1430 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1431 val = arg0;
1433 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1436 = arg0 & ~((unsigned HOST_WIDE_INT) (-1)
1437 << GET_MODE_BITSIZE (op_mode));
1438 if (val & ((unsigned HOST_WIDE_INT) 1
1439 << (GET_MODE_BITSIZE (op_mode) - 1)))
1441 -= (unsigned HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1443 else
1444 return 0;
1445 break;
1447 case SQRT:
1448 case FLOAT_EXTEND:
1449 case FLOAT_TRUNCATE:
1450 case SS_TRUNCATE:
1451 case US_TRUNCATE:
1452 case SS_NEG:
1453 case US_NEG:
1454 case SS_ABS:
1455 return 0;
1457 default:
1458 gcc_unreachable ();
1461 return gen_int_mode (val, mode);
1464 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1465 for a DImode operation on a CONST_INT. */
1466 else if (GET_MODE (op) == VOIDmode
1467 && width <= HOST_BITS_PER_WIDE_INT * 2
1468 && (GET_CODE (op) == CONST_DOUBLE
1469 || CONST_INT_P (op)))
1471 unsigned HOST_WIDE_INT l1, lv;
1472 HOST_WIDE_INT h1, hv;
1474 if (GET_CODE (op) == CONST_DOUBLE)
1475 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1476 else
1477 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1479 switch (code)
1481 case NOT:
1482 lv = ~ l1;
1483 hv = ~ h1;
1484 break;
1486 case NEG:
1487 neg_double (l1, h1, &lv, &hv);
1488 break;
1490 case ABS:
1491 if (h1 < 0)
1492 neg_double (l1, h1, &lv, &hv);
1493 else
1494 lv = l1, hv = h1;
1495 break;
1497 case FFS:
1498 hv = 0;
1499 if (l1 != 0)
1500 lv = ffs_hwi (l1);
1501 else if (h1 != 0)
1502 lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
1503 else
1504 lv = 0;
1505 break;
1507 case CLZ:
1508 hv = 0;
1509 if (h1 != 0)
1510 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1511 - HOST_BITS_PER_WIDE_INT;
1512 else if (l1 != 0)
1513 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1514 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1515 lv = GET_MODE_BITSIZE (mode);
1516 break;
1518 case CTZ:
1519 hv = 0;
1520 if (l1 != 0)
1521 lv = ctz_hwi (l1);
1522 else if (h1 != 0)
1523 lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
1524 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1525 lv = GET_MODE_BITSIZE (mode);
1526 break;
1528 case POPCOUNT:
1529 hv = 0;
1530 lv = 0;
1531 while (l1)
1532 lv++, l1 &= l1 - 1;
1533 while (h1)
1534 lv++, h1 &= h1 - 1;
1535 break;
1537 case PARITY:
1538 hv = 0;
1539 lv = 0;
1540 while (l1)
1541 lv++, l1 &= l1 - 1;
1542 while (h1)
1543 lv++, h1 &= h1 - 1;
1544 lv &= 1;
1545 break;
1547 case BSWAP:
1549 unsigned int s;
1551 hv = 0;
1552 lv = 0;
1553 for (s = 0; s < width; s += 8)
1555 unsigned int d = width - s - 8;
1556 unsigned HOST_WIDE_INT byte;
1558 if (s < HOST_BITS_PER_WIDE_INT)
1559 byte = (l1 >> s) & 0xff;
1560 else
1561 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1563 if (d < HOST_BITS_PER_WIDE_INT)
1564 lv |= byte << d;
1565 else
1566 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1569 break;
1571 case TRUNCATE:
1572 /* This is just a change-of-mode, so do nothing. */
1573 lv = l1, hv = h1;
1574 break;
1576 case ZERO_EXTEND:
1577 gcc_assert (op_mode != VOIDmode);
1579 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1580 return 0;
1582 hv = 0;
1583 lv = l1 & GET_MODE_MASK (op_mode);
1584 break;
1586 case SIGN_EXTEND:
1587 if (op_mode == VOIDmode
1588 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1589 return 0;
1590 else
1592 lv = l1 & GET_MODE_MASK (op_mode);
1593 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1594 && (lv & ((unsigned HOST_WIDE_INT) 1
1595 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1596 lv -= (unsigned HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1598 hv = HWI_SIGN_EXTEND (lv);
1600 break;
1602 case SQRT:
1603 return 0;
1605 default:
1606 return 0;
1609 return immed_double_const (lv, hv, mode);
1612 else if (GET_CODE (op) == CONST_DOUBLE
1613 && SCALAR_FLOAT_MODE_P (mode)
1614 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1616 REAL_VALUE_TYPE d, t;
1617 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1619 switch (code)
1621 case SQRT:
1622 if (HONOR_SNANS (mode) && real_isnan (&d))
1623 return 0;
1624 real_sqrt (&t, mode, &d);
1625 d = t;
1626 break;
1627 case ABS:
1628 d = real_value_abs (&d);
1629 break;
1630 case NEG:
1631 d = real_value_negate (&d);
1632 break;
1633 case FLOAT_TRUNCATE:
1634 d = real_value_truncate (mode, d);
1635 break;
1636 case FLOAT_EXTEND:
1637 /* All this does is change the mode, unless changing
1638 mode class. */
1639 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1640 real_convert (&d, mode, &d);
1641 break;
1642 case FIX:
1643 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1644 break;
1645 case NOT:
1647 long tmp[4];
1648 int i;
1650 real_to_target (tmp, &d, GET_MODE (op));
1651 for (i = 0; i < 4; i++)
1652 tmp[i] = ~tmp[i];
1653 real_from_target (&d, tmp, mode);
1654 break;
1656 default:
1657 gcc_unreachable ();
1659 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1662 else if (GET_CODE (op) == CONST_DOUBLE
1663 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1664 && GET_MODE_CLASS (mode) == MODE_INT
1665 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1667 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1668 operators are intentionally left unspecified (to ease implementation
1669 by target backends), for consistency, this routine implements the
1670 same semantics for constant folding as used by the middle-end. */
1672 /* This was formerly used only for non-IEEE float.
1673 eggert@twinsun.com says it is safe for IEEE also. */
1674 HOST_WIDE_INT xh, xl, th, tl;
1675 REAL_VALUE_TYPE x, t;
1676 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1677 switch (code)
1679 case FIX:
1680 if (REAL_VALUE_ISNAN (x))
1681 return const0_rtx;
1683 /* Test against the signed upper bound. */
1684 if (width > HOST_BITS_PER_WIDE_INT)
1686 th = ((unsigned HOST_WIDE_INT) 1
1687 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1688 tl = -1;
1690 else
1692 th = 0;
1693 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1695 real_from_integer (&t, VOIDmode, tl, th, 0);
1696 if (REAL_VALUES_LESS (t, x))
1698 xh = th;
1699 xl = tl;
1700 break;
1703 /* Test against the signed lower bound. */
1704 if (width > HOST_BITS_PER_WIDE_INT)
1706 th = (unsigned HOST_WIDE_INT) (-1)
1707 << (width - HOST_BITS_PER_WIDE_INT - 1);
1708 tl = 0;
1710 else
1712 th = -1;
1713 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1715 real_from_integer (&t, VOIDmode, tl, th, 0);
1716 if (REAL_VALUES_LESS (x, t))
1718 xh = th;
1719 xl = tl;
1720 break;
1722 REAL_VALUE_TO_INT (&xl, &xh, x);
1723 break;
1725 case UNSIGNED_FIX:
1726 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1727 return const0_rtx;
1729 /* Test against the unsigned upper bound. */
1730 if (width == 2*HOST_BITS_PER_WIDE_INT)
1732 th = -1;
1733 tl = -1;
1735 else if (width >= HOST_BITS_PER_WIDE_INT)
1737 th = ((unsigned HOST_WIDE_INT) 1
1738 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1739 tl = -1;
1741 else
1743 th = 0;
1744 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1746 real_from_integer (&t, VOIDmode, tl, th, 1);
1747 if (REAL_VALUES_LESS (t, x))
1749 xh = th;
1750 xl = tl;
1751 break;
1754 REAL_VALUE_TO_INT (&xl, &xh, x);
1755 break;
1757 default:
1758 gcc_unreachable ();
1760 return immed_double_const (xl, xh, mode);
1763 return NULL_RTX;
1766 /* Subroutine of simplify_binary_operation to simplify a commutative,
1767 associative binary operation CODE with result mode MODE, operating
1768 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1769 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1770 canonicalization is possible. */
1772 static rtx
1773 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1774 rtx op0, rtx op1)
1776 rtx tem;
1778 /* Linearize the operator to the left. */
1779 if (GET_CODE (op1) == code)
1781 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1782 if (GET_CODE (op0) == code)
1784 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1785 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1788 /* "a op (b op c)" becomes "(b op c) op a". */
1789 if (! swap_commutative_operands_p (op1, op0))
1790 return simplify_gen_binary (code, mode, op1, op0);
1792 tem = op0;
1793 op0 = op1;
1794 op1 = tem;
1797 if (GET_CODE (op0) == code)
1799 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1800 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1802 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1803 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1806 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1807 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1808 if (tem != 0)
1809 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1811 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1812 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1813 if (tem != 0)
1814 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1817 return 0;
1821 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1822 and OP1. Return 0 if no simplification is possible.
1824 Don't use this for relational operations such as EQ or LT.
1825 Use simplify_relational_operation instead. */
1827 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1828 rtx op0, rtx op1)
1830 rtx trueop0, trueop1;
1831 rtx tem;
1833 /* Relational operations don't work here. We must know the mode
1834 of the operands in order to do the comparison correctly.
1835 Assuming a full word can give incorrect results.
1836 Consider comparing 128 with -128 in QImode. */
1837 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1838 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1840 /* Make sure the constant is second. */
1841 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1842 && swap_commutative_operands_p (op0, op1))
1844 tem = op0, op0 = op1, op1 = tem;
1847 trueop0 = avoid_constant_pool_reference (op0);
1848 trueop1 = avoid_constant_pool_reference (op1);
1850 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1851 if (tem)
1852 return tem;
1853 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1856 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1857 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1858 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1859 actual constants. */
1861 static rtx
1862 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1863 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1865 rtx tem, reversed, opleft, opright;
1866 HOST_WIDE_INT val;
1867 unsigned int width = GET_MODE_BITSIZE (mode);
1869 /* Even if we can't compute a constant result,
1870 there are some cases worth simplifying. */
1872 switch (code)
1874 case PLUS:
1875 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1876 when x is NaN, infinite, or finite and nonzero. They aren't
1877 when x is -0 and the rounding mode is not towards -infinity,
1878 since (-0) + 0 is then 0. */
1879 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1880 return op0;
1882 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1883 transformations are safe even for IEEE. */
1884 if (GET_CODE (op0) == NEG)
1885 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1886 else if (GET_CODE (op1) == NEG)
1887 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1889 /* (~a) + 1 -> -a */
1890 if (INTEGRAL_MODE_P (mode)
1891 && GET_CODE (op0) == NOT
1892 && trueop1 == const1_rtx)
1893 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1895 /* Handle both-operands-constant cases. We can only add
1896 CONST_INTs to constants since the sum of relocatable symbols
1897 can't be handled by most assemblers. Don't add CONST_INT
1898 to CONST_INT since overflow won't be computed properly if wider
1899 than HOST_BITS_PER_WIDE_INT. */
1901 if ((GET_CODE (op0) == CONST
1902 || GET_CODE (op0) == SYMBOL_REF
1903 || GET_CODE (op0) == LABEL_REF)
1904 && CONST_INT_P (op1))
1905 return plus_constant (op0, INTVAL (op1));
1906 else if ((GET_CODE (op1) == CONST
1907 || GET_CODE (op1) == SYMBOL_REF
1908 || GET_CODE (op1) == LABEL_REF)
1909 && CONST_INT_P (op0))
1910 return plus_constant (op1, INTVAL (op0));
1912 /* See if this is something like X * C - X or vice versa or
1913 if the multiplication is written as a shift. If so, we can
1914 distribute and make a new multiply, shift, or maybe just
1915 have X (if C is 2 in the example above). But don't make
1916 something more expensive than we had before. */
1918 if (SCALAR_INT_MODE_P (mode))
1920 double_int coeff0, coeff1;
1921 rtx lhs = op0, rhs = op1;
1923 coeff0 = double_int_one;
1924 coeff1 = double_int_one;
1926 if (GET_CODE (lhs) == NEG)
1928 coeff0 = double_int_minus_one;
1929 lhs = XEXP (lhs, 0);
1931 else if (GET_CODE (lhs) == MULT
1932 && CONST_INT_P (XEXP (lhs, 1)))
1934 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1935 lhs = XEXP (lhs, 0);
1937 else if (GET_CODE (lhs) == ASHIFT
1938 && CONST_INT_P (XEXP (lhs, 1))
1939 && INTVAL (XEXP (lhs, 1)) >= 0
1940 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1942 coeff0 = double_int_setbit (double_int_zero,
1943 INTVAL (XEXP (lhs, 1)));
1944 lhs = XEXP (lhs, 0);
1947 if (GET_CODE (rhs) == NEG)
1949 coeff1 = double_int_minus_one;
1950 rhs = XEXP (rhs, 0);
1952 else if (GET_CODE (rhs) == MULT
1953 && CONST_INT_P (XEXP (rhs, 1)))
1955 coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
1956 rhs = XEXP (rhs, 0);
1958 else if (GET_CODE (rhs) == ASHIFT
1959 && CONST_INT_P (XEXP (rhs, 1))
1960 && INTVAL (XEXP (rhs, 1)) >= 0
1961 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1963 coeff1 = double_int_setbit (double_int_zero,
1964 INTVAL (XEXP (rhs, 1)));
1965 rhs = XEXP (rhs, 0);
1968 if (rtx_equal_p (lhs, rhs))
1970 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1971 rtx coeff;
1972 double_int val;
1973 bool speed = optimize_function_for_speed_p (cfun);
1975 val = double_int_add (coeff0, coeff1);
1976 coeff = immed_double_int_const (val, mode);
1978 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1979 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1980 ? tem : 0;
1984 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1985 if ((CONST_INT_P (op1)
1986 || GET_CODE (op1) == CONST_DOUBLE)
1987 && GET_CODE (op0) == XOR
1988 && (CONST_INT_P (XEXP (op0, 1))
1989 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1990 && mode_signbit_p (mode, op1))
1991 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1992 simplify_gen_binary (XOR, mode, op1,
1993 XEXP (op0, 1)));
1995 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1996 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1997 && GET_CODE (op0) == MULT
1998 && GET_CODE (XEXP (op0, 0)) == NEG)
2000 rtx in1, in2;
2002 in1 = XEXP (XEXP (op0, 0), 0);
2003 in2 = XEXP (op0, 1);
2004 return simplify_gen_binary (MINUS, mode, op1,
2005 simplify_gen_binary (MULT, mode,
2006 in1, in2));
2009 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2010 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2011 is 1. */
2012 if (COMPARISON_P (op0)
2013 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2014 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2015 && (reversed = reversed_comparison (op0, mode)))
2016 return
2017 simplify_gen_unary (NEG, mode, reversed, mode);
2019 /* If one of the operands is a PLUS or a MINUS, see if we can
2020 simplify this by the associative law.
2021 Don't use the associative law for floating point.
2022 The inaccuracy makes it nonassociative,
2023 and subtle programs can break if operations are associated. */
2025 if (INTEGRAL_MODE_P (mode)
2026 && (plus_minus_operand_p (op0)
2027 || plus_minus_operand_p (op1))
2028 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2029 return tem;
2031 /* Reassociate floating point addition only when the user
2032 specifies associative math operations. */
2033 if (FLOAT_MODE_P (mode)
2034 && flag_associative_math)
2036 tem = simplify_associative_operation (code, mode, op0, op1);
2037 if (tem)
2038 return tem;
2040 break;
2042 case COMPARE:
2043 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2044 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2045 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2046 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2048 rtx xop00 = XEXP (op0, 0);
2049 rtx xop10 = XEXP (op1, 0);
2051 #ifdef HAVE_cc0
2052 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2053 #else
2054 if (REG_P (xop00) && REG_P (xop10)
2055 && GET_MODE (xop00) == GET_MODE (xop10)
2056 && REGNO (xop00) == REGNO (xop10)
2057 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2058 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2059 #endif
2060 return xop00;
2062 break;
2064 case MINUS:
2065 /* We can't assume x-x is 0 even with non-IEEE floating point,
2066 but since it is zero except in very strange circumstances, we
2067 will treat it as zero with -ffinite-math-only. */
2068 if (rtx_equal_p (trueop0, trueop1)
2069 && ! side_effects_p (op0)
2070 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2071 return CONST0_RTX (mode);
2073 /* Change subtraction from zero into negation. (0 - x) is the
2074 same as -x when x is NaN, infinite, or finite and nonzero.
2075 But if the mode has signed zeros, and does not round towards
2076 -infinity, then 0 - 0 is 0, not -0. */
2077 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2078 return simplify_gen_unary (NEG, mode, op1, mode);
2080 /* (-1 - a) is ~a. */
2081 if (trueop0 == constm1_rtx)
2082 return simplify_gen_unary (NOT, mode, op1, mode);
2084 /* Subtracting 0 has no effect unless the mode has signed zeros
2085 and supports rounding towards -infinity. In such a case,
2086 0 - 0 is -0. */
2087 if (!(HONOR_SIGNED_ZEROS (mode)
2088 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2089 && trueop1 == CONST0_RTX (mode))
2090 return op0;
2092 /* See if this is something like X * C - X or vice versa or
2093 if the multiplication is written as a shift. If so, we can
2094 distribute and make a new multiply, shift, or maybe just
2095 have X (if C is 2 in the example above). But don't make
2096 something more expensive than we had before. */
2098 if (SCALAR_INT_MODE_P (mode))
2100 double_int coeff0, negcoeff1;
2101 rtx lhs = op0, rhs = op1;
2103 coeff0 = double_int_one;
2104 negcoeff1 = double_int_minus_one;
2106 if (GET_CODE (lhs) == NEG)
2108 coeff0 = double_int_minus_one;
2109 lhs = XEXP (lhs, 0);
2111 else if (GET_CODE (lhs) == MULT
2112 && CONST_INT_P (XEXP (lhs, 1)))
2114 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
2115 lhs = XEXP (lhs, 0);
2117 else if (GET_CODE (lhs) == ASHIFT
2118 && CONST_INT_P (XEXP (lhs, 1))
2119 && INTVAL (XEXP (lhs, 1)) >= 0
2120 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2122 coeff0 = double_int_setbit (double_int_zero,
2123 INTVAL (XEXP (lhs, 1)));
2124 lhs = XEXP (lhs, 0);
2127 if (GET_CODE (rhs) == NEG)
2129 negcoeff1 = double_int_one;
2130 rhs = XEXP (rhs, 0);
2132 else if (GET_CODE (rhs) == MULT
2133 && CONST_INT_P (XEXP (rhs, 1)))
2135 negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
2136 rhs = XEXP (rhs, 0);
2138 else if (GET_CODE (rhs) == ASHIFT
2139 && CONST_INT_P (XEXP (rhs, 1))
2140 && INTVAL (XEXP (rhs, 1)) >= 0
2141 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2143 negcoeff1 = double_int_setbit (double_int_zero,
2144 INTVAL (XEXP (rhs, 1)));
2145 negcoeff1 = double_int_neg (negcoeff1);
2146 rhs = XEXP (rhs, 0);
2149 if (rtx_equal_p (lhs, rhs))
2151 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2152 rtx coeff;
2153 double_int val;
2154 bool speed = optimize_function_for_speed_p (cfun);
2156 val = double_int_add (coeff0, negcoeff1);
2157 coeff = immed_double_int_const (val, mode);
2159 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2160 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
2161 ? tem : 0;
2165 /* (a - (-b)) -> (a + b). True even for IEEE. */
2166 if (GET_CODE (op1) == NEG)
2167 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2169 /* (-x - c) may be simplified as (-c - x). */
2170 if (GET_CODE (op0) == NEG
2171 && (CONST_INT_P (op1)
2172 || GET_CODE (op1) == CONST_DOUBLE))
2174 tem = simplify_unary_operation (NEG, mode, op1, mode);
2175 if (tem)
2176 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2179 /* Don't let a relocatable value get a negative coeff. */
2180 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2181 return simplify_gen_binary (PLUS, mode,
2182 op0,
2183 neg_const_int (mode, op1));
2185 /* (x - (x & y)) -> (x & ~y) */
2186 if (GET_CODE (op1) == AND)
2188 if (rtx_equal_p (op0, XEXP (op1, 0)))
2190 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2191 GET_MODE (XEXP (op1, 1)));
2192 return simplify_gen_binary (AND, mode, op0, tem);
2194 if (rtx_equal_p (op0, XEXP (op1, 1)))
2196 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2197 GET_MODE (XEXP (op1, 0)));
2198 return simplify_gen_binary (AND, mode, op0, tem);
2202 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2203 by reversing the comparison code if valid. */
2204 if (STORE_FLAG_VALUE == 1
2205 && trueop0 == const1_rtx
2206 && COMPARISON_P (op1)
2207 && (reversed = reversed_comparison (op1, mode)))
2208 return reversed;
2210 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2211 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2212 && GET_CODE (op1) == MULT
2213 && GET_CODE (XEXP (op1, 0)) == NEG)
2215 rtx in1, in2;
2217 in1 = XEXP (XEXP (op1, 0), 0);
2218 in2 = XEXP (op1, 1);
2219 return simplify_gen_binary (PLUS, mode,
2220 simplify_gen_binary (MULT, mode,
2221 in1, in2),
2222 op0);
2225 /* Canonicalize (minus (neg A) (mult B C)) to
2226 (minus (mult (neg B) C) A). */
2227 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2228 && GET_CODE (op1) == MULT
2229 && GET_CODE (op0) == NEG)
2231 rtx in1, in2;
2233 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2234 in2 = XEXP (op1, 1);
2235 return simplify_gen_binary (MINUS, mode,
2236 simplify_gen_binary (MULT, mode,
2237 in1, in2),
2238 XEXP (op0, 0));
2241 /* If one of the operands is a PLUS or a MINUS, see if we can
2242 simplify this by the associative law. This will, for example,
2243 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2244 Don't use the associative law for floating point.
2245 The inaccuracy makes it nonassociative,
2246 and subtle programs can break if operations are associated. */
2248 if (INTEGRAL_MODE_P (mode)
2249 && (plus_minus_operand_p (op0)
2250 || plus_minus_operand_p (op1))
2251 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2252 return tem;
2253 break;
2255 case MULT:
2256 if (trueop1 == constm1_rtx)
2257 return simplify_gen_unary (NEG, mode, op0, mode);
2259 if (GET_CODE (op0) == NEG)
2261 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2262 if (temp)
2263 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2265 if (GET_CODE (op1) == NEG)
2267 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2268 if (temp)
2269 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2272 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2273 x is NaN, since x * 0 is then also NaN. Nor is it valid
2274 when the mode has signed zeros, since multiplying a negative
2275 number by 0 will give -0, not 0. */
2276 if (!HONOR_NANS (mode)
2277 && !HONOR_SIGNED_ZEROS (mode)
2278 && trueop1 == CONST0_RTX (mode)
2279 && ! side_effects_p (op0))
2280 return op1;
2282 /* In IEEE floating point, x*1 is not equivalent to x for
2283 signalling NaNs. */
2284 if (!HONOR_SNANS (mode)
2285 && trueop1 == CONST1_RTX (mode))
2286 return op0;
2288 /* Convert multiply by constant power of two into shift unless
2289 we are still generating RTL. This test is a kludge. */
2290 if (CONST_INT_P (trueop1)
2291 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2292 /* If the mode is larger than the host word size, and the
2293 uppermost bit is set, then this isn't a power of two due
2294 to implicit sign extension. */
2295 && (width <= HOST_BITS_PER_WIDE_INT
2296 || val != HOST_BITS_PER_WIDE_INT - 1))
2297 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2299 /* Likewise for multipliers wider than a word. */
2300 if (GET_CODE (trueop1) == CONST_DOUBLE
2301 && (GET_MODE (trueop1) == VOIDmode
2302 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2303 && GET_MODE (op0) == mode
2304 && CONST_DOUBLE_LOW (trueop1) == 0
2305 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2306 return simplify_gen_binary (ASHIFT, mode, op0,
2307 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2309 /* x*2 is x+x and x*(-1) is -x */
2310 if (GET_CODE (trueop1) == CONST_DOUBLE
2311 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2312 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2313 && GET_MODE (op0) == mode)
2315 REAL_VALUE_TYPE d;
2316 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2318 if (REAL_VALUES_EQUAL (d, dconst2))
2319 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2321 if (!HONOR_SNANS (mode)
2322 && REAL_VALUES_EQUAL (d, dconstm1))
2323 return simplify_gen_unary (NEG, mode, op0, mode);
2326 /* Optimize -x * -x as x * x. */
2327 if (FLOAT_MODE_P (mode)
2328 && GET_CODE (op0) == NEG
2329 && GET_CODE (op1) == NEG
2330 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2331 && !side_effects_p (XEXP (op0, 0)))
2332 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2334 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2335 if (SCALAR_FLOAT_MODE_P (mode)
2336 && GET_CODE (op0) == ABS
2337 && GET_CODE (op1) == ABS
2338 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2339 && !side_effects_p (XEXP (op0, 0)))
2340 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2342 /* Reassociate multiplication, but for floating point MULTs
2343 only when the user specifies unsafe math optimizations. */
2344 if (! FLOAT_MODE_P (mode)
2345 || flag_unsafe_math_optimizations)
2347 tem = simplify_associative_operation (code, mode, op0, op1);
2348 if (tem)
2349 return tem;
2351 break;
2353 case IOR:
2354 if (trueop1 == CONST0_RTX (mode))
2355 return op0;
2356 if (CONST_INT_P (trueop1)
2357 && ((UINTVAL (trueop1) & GET_MODE_MASK (mode))
2358 == GET_MODE_MASK (mode)))
2359 return op1;
2360 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2361 return op0;
2362 /* A | (~A) -> -1 */
2363 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2364 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2365 && ! side_effects_p (op0)
2366 && SCALAR_INT_MODE_P (mode))
2367 return constm1_rtx;
2369 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2370 if (CONST_INT_P (op1)
2371 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2372 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0)
2373 return op1;
2375 /* Canonicalize (X & C1) | C2. */
2376 if (GET_CODE (op0) == AND
2377 && CONST_INT_P (trueop1)
2378 && CONST_INT_P (XEXP (op0, 1)))
2380 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2381 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2382 HOST_WIDE_INT c2 = INTVAL (trueop1);
2384 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2385 if ((c1 & c2) == c1
2386 && !side_effects_p (XEXP (op0, 0)))
2387 return trueop1;
2389 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2390 if (((c1|c2) & mask) == mask)
2391 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2393 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2394 if (((c1 & ~c2) & mask) != (c1 & mask))
2396 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2397 gen_int_mode (c1 & ~c2, mode));
2398 return simplify_gen_binary (IOR, mode, tem, op1);
2402 /* Convert (A & B) | A to A. */
2403 if (GET_CODE (op0) == AND
2404 && (rtx_equal_p (XEXP (op0, 0), op1)
2405 || rtx_equal_p (XEXP (op0, 1), op1))
2406 && ! side_effects_p (XEXP (op0, 0))
2407 && ! side_effects_p (XEXP (op0, 1)))
2408 return op1;
2410 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2411 mode size to (rotate A CX). */
2413 if (GET_CODE (op1) == ASHIFT
2414 || GET_CODE (op1) == SUBREG)
2416 opleft = op1;
2417 opright = op0;
2419 else
2421 opright = op1;
2422 opleft = op0;
2425 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2426 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2427 && CONST_INT_P (XEXP (opleft, 1))
2428 && CONST_INT_P (XEXP (opright, 1))
2429 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2430 == GET_MODE_BITSIZE (mode)))
2431 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2433 /* Same, but for ashift that has been "simplified" to a wider mode
2434 by simplify_shift_const. */
2436 if (GET_CODE (opleft) == SUBREG
2437 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2438 && GET_CODE (opright) == LSHIFTRT
2439 && GET_CODE (XEXP (opright, 0)) == SUBREG
2440 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2441 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2442 && (GET_MODE_SIZE (GET_MODE (opleft))
2443 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2444 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2445 SUBREG_REG (XEXP (opright, 0)))
2446 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2447 && CONST_INT_P (XEXP (opright, 1))
2448 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2449 == GET_MODE_BITSIZE (mode)))
2450 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2451 XEXP (SUBREG_REG (opleft), 1));
2453 /* If we have (ior (and (X C1) C2)), simplify this by making
2454 C1 as small as possible if C1 actually changes. */
2455 if (CONST_INT_P (op1)
2456 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2457 || INTVAL (op1) > 0)
2458 && GET_CODE (op0) == AND
2459 && CONST_INT_P (XEXP (op0, 1))
2460 && CONST_INT_P (op1)
2461 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2462 return simplify_gen_binary (IOR, mode,
2463 simplify_gen_binary
2464 (AND, mode, XEXP (op0, 0),
2465 GEN_INT (UINTVAL (XEXP (op0, 1))
2466 & ~UINTVAL (op1))),
2467 op1);
2469 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2470 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2471 the PLUS does not affect any of the bits in OP1: then we can do
2472 the IOR as a PLUS and we can associate. This is valid if OP1
2473 can be safely shifted left C bits. */
2474 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2475 && GET_CODE (XEXP (op0, 0)) == PLUS
2476 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2477 && CONST_INT_P (XEXP (op0, 1))
2478 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2480 int count = INTVAL (XEXP (op0, 1));
2481 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2483 if (mask >> count == INTVAL (trueop1)
2484 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2485 return simplify_gen_binary (ASHIFTRT, mode,
2486 plus_constant (XEXP (op0, 0), mask),
2487 XEXP (op0, 1));
2490 tem = simplify_associative_operation (code, mode, op0, op1);
2491 if (tem)
2492 return tem;
2493 break;
2495 case XOR:
2496 if (trueop1 == CONST0_RTX (mode))
2497 return op0;
2498 if (CONST_INT_P (trueop1)
2499 && ((UINTVAL (trueop1) & GET_MODE_MASK (mode))
2500 == GET_MODE_MASK (mode)))
2501 return simplify_gen_unary (NOT, mode, op0, mode);
2502 if (rtx_equal_p (trueop0, trueop1)
2503 && ! side_effects_p (op0)
2504 && GET_MODE_CLASS (mode) != MODE_CC)
2505 return CONST0_RTX (mode);
2507 /* Canonicalize XOR of the most significant bit to PLUS. */
2508 if ((CONST_INT_P (op1)
2509 || GET_CODE (op1) == CONST_DOUBLE)
2510 && mode_signbit_p (mode, op1))
2511 return simplify_gen_binary (PLUS, mode, op0, op1);
2512 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2513 if ((CONST_INT_P (op1)
2514 || GET_CODE (op1) == CONST_DOUBLE)
2515 && GET_CODE (op0) == PLUS
2516 && (CONST_INT_P (XEXP (op0, 1))
2517 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2518 && mode_signbit_p (mode, XEXP (op0, 1)))
2519 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2520 simplify_gen_binary (XOR, mode, op1,
2521 XEXP (op0, 1)));
2523 /* If we are XORing two things that have no bits in common,
2524 convert them into an IOR. This helps to detect rotation encoded
2525 using those methods and possibly other simplifications. */
2527 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2528 && (nonzero_bits (op0, mode)
2529 & nonzero_bits (op1, mode)) == 0)
2530 return (simplify_gen_binary (IOR, mode, op0, op1));
2532 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2533 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2534 (NOT y). */
2536 int num_negated = 0;
2538 if (GET_CODE (op0) == NOT)
2539 num_negated++, op0 = XEXP (op0, 0);
2540 if (GET_CODE (op1) == NOT)
2541 num_negated++, op1 = XEXP (op1, 0);
2543 if (num_negated == 2)
2544 return simplify_gen_binary (XOR, mode, op0, op1);
2545 else if (num_negated == 1)
2546 return simplify_gen_unary (NOT, mode,
2547 simplify_gen_binary (XOR, mode, op0, op1),
2548 mode);
2551 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2552 correspond to a machine insn or result in further simplifications
2553 if B is a constant. */
2555 if (GET_CODE (op0) == AND
2556 && rtx_equal_p (XEXP (op0, 1), op1)
2557 && ! side_effects_p (op1))
2558 return simplify_gen_binary (AND, mode,
2559 simplify_gen_unary (NOT, mode,
2560 XEXP (op0, 0), mode),
2561 op1);
2563 else if (GET_CODE (op0) == AND
2564 && rtx_equal_p (XEXP (op0, 0), op1)
2565 && ! side_effects_p (op1))
2566 return simplify_gen_binary (AND, mode,
2567 simplify_gen_unary (NOT, mode,
2568 XEXP (op0, 1), mode),
2569 op1);
2571 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2572 we can transform like this:
2573 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2574 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2575 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2576 Attempt a few simplifications when B and C are both constants. */
2577 if (GET_CODE (op0) == AND
2578 && CONST_INT_P (op1)
2579 && CONST_INT_P (XEXP (op0, 1)))
2581 rtx a = XEXP (op0, 0);
2582 rtx b = XEXP (op0, 1);
2583 rtx c = op1;
2584 HOST_WIDE_INT bval = INTVAL (b);
2585 HOST_WIDE_INT cval = INTVAL (c);
2587 rtx na_c
2588 = simplify_binary_operation (AND, mode,
2589 simplify_gen_unary (NOT, mode, a, mode),
2591 if ((~cval & bval) == 0)
2593 /* Try to simplify ~A&C | ~B&C. */
2594 if (na_c != NULL_RTX)
2595 return simplify_gen_binary (IOR, mode, na_c,
2596 GEN_INT (~bval & cval));
2598 else
2600 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2601 if (na_c == const0_rtx)
2603 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2604 GEN_INT (~cval & bval));
2605 return simplify_gen_binary (IOR, mode, a_nc_b,
2606 GEN_INT (~bval & cval));
2611 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2612 comparison if STORE_FLAG_VALUE is 1. */
2613 if (STORE_FLAG_VALUE == 1
2614 && trueop1 == const1_rtx
2615 && COMPARISON_P (op0)
2616 && (reversed = reversed_comparison (op0, mode)))
2617 return reversed;
2619 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2620 is (lt foo (const_int 0)), so we can perform the above
2621 simplification if STORE_FLAG_VALUE is 1. */
2623 if (STORE_FLAG_VALUE == 1
2624 && trueop1 == const1_rtx
2625 && GET_CODE (op0) == LSHIFTRT
2626 && CONST_INT_P (XEXP (op0, 1))
2627 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2628 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2630 /* (xor (comparison foo bar) (const_int sign-bit))
2631 when STORE_FLAG_VALUE is the sign bit. */
2632 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2633 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2634 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2635 && trueop1 == const_true_rtx
2636 && COMPARISON_P (op0)
2637 && (reversed = reversed_comparison (op0, mode)))
2638 return reversed;
2640 tem = simplify_associative_operation (code, mode, op0, op1);
2641 if (tem)
2642 return tem;
2643 break;
2645 case AND:
2646 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2647 return trueop1;
2648 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2650 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2651 HOST_WIDE_INT nzop1;
2652 if (CONST_INT_P (trueop1))
2654 HOST_WIDE_INT val1 = INTVAL (trueop1);
2655 /* If we are turning off bits already known off in OP0, we need
2656 not do an AND. */
2657 if ((nzop0 & ~val1) == 0)
2658 return op0;
2660 nzop1 = nonzero_bits (trueop1, mode);
2661 /* If we are clearing all the nonzero bits, the result is zero. */
2662 if ((nzop1 & nzop0) == 0
2663 && !side_effects_p (op0) && !side_effects_p (op1))
2664 return CONST0_RTX (mode);
2666 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2667 && GET_MODE_CLASS (mode) != MODE_CC)
2668 return op0;
2669 /* A & (~A) -> 0 */
2670 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2671 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2672 && ! side_effects_p (op0)
2673 && GET_MODE_CLASS (mode) != MODE_CC)
2674 return CONST0_RTX (mode);
2676 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2677 there are no nonzero bits of C outside of X's mode. */
2678 if ((GET_CODE (op0) == SIGN_EXTEND
2679 || GET_CODE (op0) == ZERO_EXTEND)
2680 && CONST_INT_P (trueop1)
2681 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2682 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2683 & UINTVAL (trueop1)) == 0)
2685 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2686 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2687 gen_int_mode (INTVAL (trueop1),
2688 imode));
2689 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2692 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2693 we might be able to further simplify the AND with X and potentially
2694 remove the truncation altogether. */
2695 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2697 rtx x = XEXP (op0, 0);
2698 enum machine_mode xmode = GET_MODE (x);
2699 tem = simplify_gen_binary (AND, xmode, x,
2700 gen_int_mode (INTVAL (trueop1), xmode));
2701 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2704 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2705 if (GET_CODE (op0) == IOR
2706 && CONST_INT_P (trueop1)
2707 && CONST_INT_P (XEXP (op0, 1)))
2709 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2710 return simplify_gen_binary (IOR, mode,
2711 simplify_gen_binary (AND, mode,
2712 XEXP (op0, 0), op1),
2713 gen_int_mode (tmp, mode));
2716 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2717 insn (and may simplify more). */
2718 if (GET_CODE (op0) == XOR
2719 && rtx_equal_p (XEXP (op0, 0), op1)
2720 && ! side_effects_p (op1))
2721 return simplify_gen_binary (AND, mode,
2722 simplify_gen_unary (NOT, mode,
2723 XEXP (op0, 1), mode),
2724 op1);
2726 if (GET_CODE (op0) == XOR
2727 && rtx_equal_p (XEXP (op0, 1), op1)
2728 && ! side_effects_p (op1))
2729 return simplify_gen_binary (AND, mode,
2730 simplify_gen_unary (NOT, mode,
2731 XEXP (op0, 0), mode),
2732 op1);
2734 /* Similarly for (~(A ^ B)) & A. */
2735 if (GET_CODE (op0) == NOT
2736 && GET_CODE (XEXP (op0, 0)) == XOR
2737 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2738 && ! side_effects_p (op1))
2739 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2741 if (GET_CODE (op0) == NOT
2742 && GET_CODE (XEXP (op0, 0)) == XOR
2743 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2744 && ! side_effects_p (op1))
2745 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2747 /* Convert (A | B) & A to A. */
2748 if (GET_CODE (op0) == IOR
2749 && (rtx_equal_p (XEXP (op0, 0), op1)
2750 || rtx_equal_p (XEXP (op0, 1), op1))
2751 && ! side_effects_p (XEXP (op0, 0))
2752 && ! side_effects_p (XEXP (op0, 1)))
2753 return op1;
2755 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2756 ((A & N) + B) & M -> (A + B) & M
2757 Similarly if (N & M) == 0,
2758 ((A | N) + B) & M -> (A + B) & M
2759 and for - instead of + and/or ^ instead of |.
2760 Also, if (N & M) == 0, then
2761 (A +- N) & M -> A & M. */
2762 if (CONST_INT_P (trueop1)
2763 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2764 && ~UINTVAL (trueop1)
2765 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2766 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2768 rtx pmop[2];
2769 int which;
2771 pmop[0] = XEXP (op0, 0);
2772 pmop[1] = XEXP (op0, 1);
2774 if (CONST_INT_P (pmop[1])
2775 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2776 return simplify_gen_binary (AND, mode, pmop[0], op1);
2778 for (which = 0; which < 2; which++)
2780 tem = pmop[which];
2781 switch (GET_CODE (tem))
2783 case AND:
2784 if (CONST_INT_P (XEXP (tem, 1))
2785 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2786 == UINTVAL (trueop1))
2787 pmop[which] = XEXP (tem, 0);
2788 break;
2789 case IOR:
2790 case XOR:
2791 if (CONST_INT_P (XEXP (tem, 1))
2792 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2793 pmop[which] = XEXP (tem, 0);
2794 break;
2795 default:
2796 break;
2800 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2802 tem = simplify_gen_binary (GET_CODE (op0), mode,
2803 pmop[0], pmop[1]);
2804 return simplify_gen_binary (code, mode, tem, op1);
2808 /* (and X (ior (not X) Y) -> (and X Y) */
2809 if (GET_CODE (op1) == IOR
2810 && GET_CODE (XEXP (op1, 0)) == NOT
2811 && op0 == XEXP (XEXP (op1, 0), 0))
2812 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2814 /* (and (ior (not X) Y) X) -> (and X Y) */
2815 if (GET_CODE (op0) == IOR
2816 && GET_CODE (XEXP (op0, 0)) == NOT
2817 && op1 == XEXP (XEXP (op0, 0), 0))
2818 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2820 tem = simplify_associative_operation (code, mode, op0, op1);
2821 if (tem)
2822 return tem;
2823 break;
2825 case UDIV:
2826 /* 0/x is 0 (or x&0 if x has side-effects). */
2827 if (trueop0 == CONST0_RTX (mode))
2829 if (side_effects_p (op1))
2830 return simplify_gen_binary (AND, mode, op1, trueop0);
2831 return trueop0;
2833 /* x/1 is x. */
2834 if (trueop1 == CONST1_RTX (mode))
2835 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2836 /* Convert divide by power of two into shift. */
2837 if (CONST_INT_P (trueop1)
2838 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2839 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2840 break;
2842 case DIV:
2843 /* Handle floating point and integers separately. */
2844 if (SCALAR_FLOAT_MODE_P (mode))
2846 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2847 safe for modes with NaNs, since 0.0 / 0.0 will then be
2848 NaN rather than 0.0. Nor is it safe for modes with signed
2849 zeros, since dividing 0 by a negative number gives -0.0 */
2850 if (trueop0 == CONST0_RTX (mode)
2851 && !HONOR_NANS (mode)
2852 && !HONOR_SIGNED_ZEROS (mode)
2853 && ! side_effects_p (op1))
2854 return op0;
2855 /* x/1.0 is x. */
2856 if (trueop1 == CONST1_RTX (mode)
2857 && !HONOR_SNANS (mode))
2858 return op0;
2860 if (GET_CODE (trueop1) == CONST_DOUBLE
2861 && trueop1 != CONST0_RTX (mode))
2863 REAL_VALUE_TYPE d;
2864 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2866 /* x/-1.0 is -x. */
2867 if (REAL_VALUES_EQUAL (d, dconstm1)
2868 && !HONOR_SNANS (mode))
2869 return simplify_gen_unary (NEG, mode, op0, mode);
2871 /* Change FP division by a constant into multiplication.
2872 Only do this with -freciprocal-math. */
2873 if (flag_reciprocal_math
2874 && !REAL_VALUES_EQUAL (d, dconst0))
2876 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2877 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2878 return simplify_gen_binary (MULT, mode, op0, tem);
2882 else
2884 /* 0/x is 0 (or x&0 if x has side-effects). */
2885 if (trueop0 == CONST0_RTX (mode)
2886 && !cfun->can_throw_non_call_exceptions)
2888 if (side_effects_p (op1))
2889 return simplify_gen_binary (AND, mode, op1, trueop0);
2890 return trueop0;
2892 /* x/1 is x. */
2893 if (trueop1 == CONST1_RTX (mode))
2894 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2895 /* x/-1 is -x. */
2896 if (trueop1 == constm1_rtx)
2898 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2899 return simplify_gen_unary (NEG, mode, x, mode);
2902 break;
2904 case UMOD:
2905 /* 0%x is 0 (or x&0 if x has side-effects). */
2906 if (trueop0 == CONST0_RTX (mode))
2908 if (side_effects_p (op1))
2909 return simplify_gen_binary (AND, mode, op1, trueop0);
2910 return trueop0;
2912 /* x%1 is 0 (of x&0 if x has side-effects). */
2913 if (trueop1 == CONST1_RTX (mode))
2915 if (side_effects_p (op0))
2916 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2917 return CONST0_RTX (mode);
2919 /* Implement modulus by power of two as AND. */
2920 if (CONST_INT_P (trueop1)
2921 && exact_log2 (UINTVAL (trueop1)) > 0)
2922 return simplify_gen_binary (AND, mode, op0,
2923 GEN_INT (INTVAL (op1) - 1));
2924 break;
2926 case MOD:
2927 /* 0%x is 0 (or x&0 if x has side-effects). */
2928 if (trueop0 == CONST0_RTX (mode))
2930 if (side_effects_p (op1))
2931 return simplify_gen_binary (AND, mode, op1, trueop0);
2932 return trueop0;
2934 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2935 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2937 if (side_effects_p (op0))
2938 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2939 return CONST0_RTX (mode);
2941 break;
2943 case ROTATERT:
2944 case ROTATE:
2945 case ASHIFTRT:
2946 if (trueop1 == CONST0_RTX (mode))
2947 return op0;
2948 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2949 return op0;
2950 /* Rotating ~0 always results in ~0. */
2951 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2952 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
2953 && ! side_effects_p (op1))
2954 return op0;
2955 canonicalize_shift:
2956 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2958 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2959 if (val != INTVAL (op1))
2960 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2962 break;
2964 case ASHIFT:
2965 case SS_ASHIFT:
2966 case US_ASHIFT:
2967 if (trueop1 == CONST0_RTX (mode))
2968 return op0;
2969 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2970 return op0;
2971 goto canonicalize_shift;
2973 case LSHIFTRT:
2974 if (trueop1 == CONST0_RTX (mode))
2975 return op0;
2976 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2977 return op0;
2978 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2979 if (GET_CODE (op0) == CLZ
2980 && CONST_INT_P (trueop1)
2981 && STORE_FLAG_VALUE == 1
2982 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2984 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2985 unsigned HOST_WIDE_INT zero_val = 0;
2987 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2988 && zero_val == GET_MODE_BITSIZE (imode)
2989 && INTVAL (trueop1) == exact_log2 (zero_val))
2990 return simplify_gen_relational (EQ, mode, imode,
2991 XEXP (op0, 0), const0_rtx);
2993 goto canonicalize_shift;
2995 case SMIN:
2996 if (width <= HOST_BITS_PER_WIDE_INT
2997 && CONST_INT_P (trueop1)
2998 && UINTVAL (trueop1) == (unsigned HOST_WIDE_INT) 1 << (width -1)
2999 && ! side_effects_p (op0))
3000 return op1;
3001 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3002 return op0;
3003 tem = simplify_associative_operation (code, mode, op0, op1);
3004 if (tem)
3005 return tem;
3006 break;
3008 case SMAX:
3009 if (width <= HOST_BITS_PER_WIDE_INT
3010 && CONST_INT_P (trueop1)
3011 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3012 && ! side_effects_p (op0))
3013 return op1;
3014 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3015 return op0;
3016 tem = simplify_associative_operation (code, mode, op0, op1);
3017 if (tem)
3018 return tem;
3019 break;
3021 case UMIN:
3022 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3023 return op1;
3024 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3025 return op0;
3026 tem = simplify_associative_operation (code, mode, op0, op1);
3027 if (tem)
3028 return tem;
3029 break;
3031 case UMAX:
3032 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3033 return op1;
3034 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3035 return op0;
3036 tem = simplify_associative_operation (code, mode, op0, op1);
3037 if (tem)
3038 return tem;
3039 break;
3041 case SS_PLUS:
3042 case US_PLUS:
3043 case SS_MINUS:
3044 case US_MINUS:
3045 case SS_MULT:
3046 case US_MULT:
3047 case SS_DIV:
3048 case US_DIV:
3049 /* ??? There are simplifications that can be done. */
3050 return 0;
3052 case VEC_SELECT:
3053 if (!VECTOR_MODE_P (mode))
3055 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3056 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3057 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3058 gcc_assert (XVECLEN (trueop1, 0) == 1);
3059 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3061 if (GET_CODE (trueop0) == CONST_VECTOR)
3062 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3063 (trueop1, 0, 0)));
3065 /* Extract a scalar element from a nested VEC_SELECT expression
3066 (with optional nested VEC_CONCAT expression). Some targets
3067 (i386) extract scalar element from a vector using chain of
3068 nested VEC_SELECT expressions. When input operand is a memory
3069 operand, this operation can be simplified to a simple scalar
3070 load from an offseted memory address. */
3071 if (GET_CODE (trueop0) == VEC_SELECT)
3073 rtx op0 = XEXP (trueop0, 0);
3074 rtx op1 = XEXP (trueop0, 1);
3076 enum machine_mode opmode = GET_MODE (op0);
3077 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3078 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3080 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3081 int elem;
3083 rtvec vec;
3084 rtx tmp_op, tmp;
3086 gcc_assert (GET_CODE (op1) == PARALLEL);
3087 gcc_assert (i < n_elts);
3089 /* Select element, pointed by nested selector. */
3090 elem = INTVAL (XVECEXP (op1, 0, i));
3092 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3093 if (GET_CODE (op0) == VEC_CONCAT)
3095 rtx op00 = XEXP (op0, 0);
3096 rtx op01 = XEXP (op0, 1);
3098 enum machine_mode mode00, mode01;
3099 int n_elts00, n_elts01;
3101 mode00 = GET_MODE (op00);
3102 mode01 = GET_MODE (op01);
3104 /* Find out number of elements of each operand. */
3105 if (VECTOR_MODE_P (mode00))
3107 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3108 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3110 else
3111 n_elts00 = 1;
3113 if (VECTOR_MODE_P (mode01))
3115 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3116 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3118 else
3119 n_elts01 = 1;
3121 gcc_assert (n_elts == n_elts00 + n_elts01);
3123 /* Select correct operand of VEC_CONCAT
3124 and adjust selector. */
3125 if (elem < n_elts01)
3126 tmp_op = op00;
3127 else
3129 tmp_op = op01;
3130 elem -= n_elts00;
3133 else
3134 tmp_op = op0;
3136 vec = rtvec_alloc (1);
3137 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3139 tmp = gen_rtx_fmt_ee (code, mode,
3140 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3141 return tmp;
3143 if (GET_CODE (trueop0) == VEC_DUPLICATE
3144 && GET_MODE (XEXP (trueop0, 0)) == mode)
3145 return XEXP (trueop0, 0);
3147 else
3149 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3150 gcc_assert (GET_MODE_INNER (mode)
3151 == GET_MODE_INNER (GET_MODE (trueop0)));
3152 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3154 if (GET_CODE (trueop0) == CONST_VECTOR)
3156 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3157 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3158 rtvec v = rtvec_alloc (n_elts);
3159 unsigned int i;
3161 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3162 for (i = 0; i < n_elts; i++)
3164 rtx x = XVECEXP (trueop1, 0, i);
3166 gcc_assert (CONST_INT_P (x));
3167 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3168 INTVAL (x));
3171 return gen_rtx_CONST_VECTOR (mode, v);
3175 if (XVECLEN (trueop1, 0) == 1
3176 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3177 && GET_CODE (trueop0) == VEC_CONCAT)
3179 rtx vec = trueop0;
3180 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3182 /* Try to find the element in the VEC_CONCAT. */
3183 while (GET_MODE (vec) != mode
3184 && GET_CODE (vec) == VEC_CONCAT)
3186 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3187 if (offset < vec_size)
3188 vec = XEXP (vec, 0);
3189 else
3191 offset -= vec_size;
3192 vec = XEXP (vec, 1);
3194 vec = avoid_constant_pool_reference (vec);
3197 if (GET_MODE (vec) == mode)
3198 return vec;
3201 return 0;
3202 case VEC_CONCAT:
3204 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3205 ? GET_MODE (trueop0)
3206 : GET_MODE_INNER (mode));
3207 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3208 ? GET_MODE (trueop1)
3209 : GET_MODE_INNER (mode));
3211 gcc_assert (VECTOR_MODE_P (mode));
3212 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3213 == GET_MODE_SIZE (mode));
3215 if (VECTOR_MODE_P (op0_mode))
3216 gcc_assert (GET_MODE_INNER (mode)
3217 == GET_MODE_INNER (op0_mode));
3218 else
3219 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3221 if (VECTOR_MODE_P (op1_mode))
3222 gcc_assert (GET_MODE_INNER (mode)
3223 == GET_MODE_INNER (op1_mode));
3224 else
3225 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3227 if ((GET_CODE (trueop0) == CONST_VECTOR
3228 || CONST_INT_P (trueop0)
3229 || GET_CODE (trueop0) == CONST_DOUBLE)
3230 && (GET_CODE (trueop1) == CONST_VECTOR
3231 || CONST_INT_P (trueop1)
3232 || GET_CODE (trueop1) == CONST_DOUBLE))
3234 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3235 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3236 rtvec v = rtvec_alloc (n_elts);
3237 unsigned int i;
3238 unsigned in_n_elts = 1;
3240 if (VECTOR_MODE_P (op0_mode))
3241 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3242 for (i = 0; i < n_elts; i++)
3244 if (i < in_n_elts)
3246 if (!VECTOR_MODE_P (op0_mode))
3247 RTVEC_ELT (v, i) = trueop0;
3248 else
3249 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3251 else
3253 if (!VECTOR_MODE_P (op1_mode))
3254 RTVEC_ELT (v, i) = trueop1;
3255 else
3256 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3257 i - in_n_elts);
3261 return gen_rtx_CONST_VECTOR (mode, v);
3264 return 0;
3266 default:
3267 gcc_unreachable ();
3270 return 0;
3274 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3275 rtx op0, rtx op1)
3277 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3278 HOST_WIDE_INT val;
3279 unsigned int width = GET_MODE_BITSIZE (mode);
3281 if (VECTOR_MODE_P (mode)
3282 && code != VEC_CONCAT
3283 && GET_CODE (op0) == CONST_VECTOR
3284 && GET_CODE (op1) == CONST_VECTOR)
3286 unsigned n_elts = GET_MODE_NUNITS (mode);
3287 enum machine_mode op0mode = GET_MODE (op0);
3288 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3289 enum machine_mode op1mode = GET_MODE (op1);
3290 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3291 rtvec v = rtvec_alloc (n_elts);
3292 unsigned int i;
3294 gcc_assert (op0_n_elts == n_elts);
3295 gcc_assert (op1_n_elts == n_elts);
3296 for (i = 0; i < n_elts; i++)
3298 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3299 CONST_VECTOR_ELT (op0, i),
3300 CONST_VECTOR_ELT (op1, i));
3301 if (!x)
3302 return 0;
3303 RTVEC_ELT (v, i) = x;
3306 return gen_rtx_CONST_VECTOR (mode, v);
3309 if (VECTOR_MODE_P (mode)
3310 && code == VEC_CONCAT
3311 && (CONST_INT_P (op0)
3312 || GET_CODE (op0) == CONST_DOUBLE
3313 || GET_CODE (op0) == CONST_FIXED)
3314 && (CONST_INT_P (op1)
3315 || GET_CODE (op1) == CONST_DOUBLE
3316 || GET_CODE (op1) == CONST_FIXED))
3318 unsigned n_elts = GET_MODE_NUNITS (mode);
3319 rtvec v = rtvec_alloc (n_elts);
3321 gcc_assert (n_elts >= 2);
3322 if (n_elts == 2)
3324 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3325 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3327 RTVEC_ELT (v, 0) = op0;
3328 RTVEC_ELT (v, 1) = op1;
3330 else
3332 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3333 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3334 unsigned i;
3336 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3337 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3338 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3340 for (i = 0; i < op0_n_elts; ++i)
3341 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3342 for (i = 0; i < op1_n_elts; ++i)
3343 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3346 return gen_rtx_CONST_VECTOR (mode, v);
3349 if (SCALAR_FLOAT_MODE_P (mode)
3350 && GET_CODE (op0) == CONST_DOUBLE
3351 && GET_CODE (op1) == CONST_DOUBLE
3352 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3354 if (code == AND
3355 || code == IOR
3356 || code == XOR)
3358 long tmp0[4];
3359 long tmp1[4];
3360 REAL_VALUE_TYPE r;
3361 int i;
3363 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3364 GET_MODE (op0));
3365 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3366 GET_MODE (op1));
3367 for (i = 0; i < 4; i++)
3369 switch (code)
3371 case AND:
3372 tmp0[i] &= tmp1[i];
3373 break;
3374 case IOR:
3375 tmp0[i] |= tmp1[i];
3376 break;
3377 case XOR:
3378 tmp0[i] ^= tmp1[i];
3379 break;
3380 default:
3381 gcc_unreachable ();
3384 real_from_target (&r, tmp0, mode);
3385 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3387 else
3389 REAL_VALUE_TYPE f0, f1, value, result;
3390 bool inexact;
3392 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3393 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3394 real_convert (&f0, mode, &f0);
3395 real_convert (&f1, mode, &f1);
3397 if (HONOR_SNANS (mode)
3398 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3399 return 0;
3401 if (code == DIV
3402 && REAL_VALUES_EQUAL (f1, dconst0)
3403 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3404 return 0;
3406 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3407 && flag_trapping_math
3408 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3410 int s0 = REAL_VALUE_NEGATIVE (f0);
3411 int s1 = REAL_VALUE_NEGATIVE (f1);
3413 switch (code)
3415 case PLUS:
3416 /* Inf + -Inf = NaN plus exception. */
3417 if (s0 != s1)
3418 return 0;
3419 break;
3420 case MINUS:
3421 /* Inf - Inf = NaN plus exception. */
3422 if (s0 == s1)
3423 return 0;
3424 break;
3425 case DIV:
3426 /* Inf / Inf = NaN plus exception. */
3427 return 0;
3428 default:
3429 break;
3433 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3434 && flag_trapping_math
3435 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3436 || (REAL_VALUE_ISINF (f1)
3437 && REAL_VALUES_EQUAL (f0, dconst0))))
3438 /* Inf * 0 = NaN plus exception. */
3439 return 0;
3441 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3442 &f0, &f1);
3443 real_convert (&result, mode, &value);
3445 /* Don't constant fold this floating point operation if
3446 the result has overflowed and flag_trapping_math. */
3448 if (flag_trapping_math
3449 && MODE_HAS_INFINITIES (mode)
3450 && REAL_VALUE_ISINF (result)
3451 && !REAL_VALUE_ISINF (f0)
3452 && !REAL_VALUE_ISINF (f1))
3453 /* Overflow plus exception. */
3454 return 0;
3456 /* Don't constant fold this floating point operation if the
3457 result may dependent upon the run-time rounding mode and
3458 flag_rounding_math is set, or if GCC's software emulation
3459 is unable to accurately represent the result. */
3461 if ((flag_rounding_math
3462 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3463 && (inexact || !real_identical (&result, &value)))
3464 return NULL_RTX;
3466 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3470 /* We can fold some multi-word operations. */
3471 if (GET_MODE_CLASS (mode) == MODE_INT
3472 && width == HOST_BITS_PER_DOUBLE_INT
3473 && (CONST_DOUBLE_P (op0) || CONST_INT_P (op0))
3474 && (CONST_DOUBLE_P (op1) || CONST_INT_P (op1)))
3476 double_int o0, o1, res, tmp;
3478 o0 = rtx_to_double_int (op0);
3479 o1 = rtx_to_double_int (op1);
3481 switch (code)
3483 case MINUS:
3484 /* A - B == A + (-B). */
3485 o1 = double_int_neg (o1);
3487 /* Fall through.... */
3489 case PLUS:
3490 res = double_int_add (o0, o1);
3491 break;
3493 case MULT:
3494 res = double_int_mul (o0, o1);
3495 break;
3497 case DIV:
3498 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3499 o0.low, o0.high, o1.low, o1.high,
3500 &res.low, &res.high,
3501 &tmp.low, &tmp.high))
3502 return 0;
3503 break;
3505 case MOD:
3506 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3507 o0.low, o0.high, o1.low, o1.high,
3508 &tmp.low, &tmp.high,
3509 &res.low, &res.high))
3510 return 0;
3511 break;
3513 case UDIV:
3514 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3515 o0.low, o0.high, o1.low, o1.high,
3516 &res.low, &res.high,
3517 &tmp.low, &tmp.high))
3518 return 0;
3519 break;
3521 case UMOD:
3522 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3523 o0.low, o0.high, o1.low, o1.high,
3524 &tmp.low, &tmp.high,
3525 &res.low, &res.high))
3526 return 0;
3527 break;
3529 case AND:
3530 res = double_int_and (o0, o1);
3531 break;
3533 case IOR:
3534 res = double_int_ior (o0, o1);
3535 break;
3537 case XOR:
3538 res = double_int_xor (o0, o1);
3539 break;
3541 case SMIN:
3542 res = double_int_smin (o0, o1);
3543 break;
3545 case SMAX:
3546 res = double_int_smax (o0, o1);
3547 break;
3549 case UMIN:
3550 res = double_int_umin (o0, o1);
3551 break;
3553 case UMAX:
3554 res = double_int_umax (o0, o1);
3555 break;
3557 case LSHIFTRT: case ASHIFTRT:
3558 case ASHIFT:
3559 case ROTATE: case ROTATERT:
3561 unsigned HOST_WIDE_INT cnt;
3563 if (SHIFT_COUNT_TRUNCATED)
3564 o1 = double_int_zext (o1, GET_MODE_BITSIZE (mode));
3566 if (!double_int_fits_in_uhwi_p (o1)
3567 || double_int_to_uhwi (o1) >= GET_MODE_BITSIZE (mode))
3568 return 0;
3570 cnt = double_int_to_uhwi (o1);
3572 if (code == LSHIFTRT || code == ASHIFTRT)
3573 res = double_int_rshift (o0, cnt, GET_MODE_BITSIZE (mode),
3574 code == ASHIFTRT);
3575 else if (code == ASHIFT)
3576 res = double_int_lshift (o0, cnt, GET_MODE_BITSIZE (mode),
3577 true);
3578 else if (code == ROTATE)
3579 res = double_int_lrotate (o0, cnt, GET_MODE_BITSIZE (mode));
3580 else /* code == ROTATERT */
3581 res = double_int_rrotate (o0, cnt, GET_MODE_BITSIZE (mode));
3583 break;
3585 default:
3586 return 0;
3589 return immed_double_int_const (res, mode);
3592 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3593 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3595 /* Get the integer argument values in two forms:
3596 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3598 arg0 = INTVAL (op0);
3599 arg1 = INTVAL (op1);
3601 if (width < HOST_BITS_PER_WIDE_INT)
3603 arg0 &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
3604 arg1 &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
3606 arg0s = arg0;
3607 if (arg0s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
3608 arg0s |= ((unsigned HOST_WIDE_INT) (-1) << width);
3610 arg1s = arg1;
3611 if (arg1s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
3612 arg1s |= ((unsigned HOST_WIDE_INT) (-1) << width);
3614 else
3616 arg0s = arg0;
3617 arg1s = arg1;
3620 /* Compute the value of the arithmetic. */
3622 switch (code)
3624 case PLUS:
3625 val = arg0s + arg1s;
3626 break;
3628 case MINUS:
3629 val = arg0s - arg1s;
3630 break;
3632 case MULT:
3633 val = arg0s * arg1s;
3634 break;
3636 case DIV:
3637 if (arg1s == 0
3638 || ((unsigned HOST_WIDE_INT) arg0s
3639 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3640 && arg1s == -1))
3641 return 0;
3642 val = arg0s / arg1s;
3643 break;
3645 case MOD:
3646 if (arg1s == 0
3647 || ((unsigned HOST_WIDE_INT) arg0s
3648 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3649 && arg1s == -1))
3650 return 0;
3651 val = arg0s % arg1s;
3652 break;
3654 case UDIV:
3655 if (arg1 == 0
3656 || ((unsigned HOST_WIDE_INT) arg0s
3657 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3658 && arg1s == -1))
3659 return 0;
3660 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3661 break;
3663 case UMOD:
3664 if (arg1 == 0
3665 || ((unsigned HOST_WIDE_INT) arg0s
3666 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3667 && arg1s == -1))
3668 return 0;
3669 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3670 break;
3672 case AND:
3673 val = arg0 & arg1;
3674 break;
3676 case IOR:
3677 val = arg0 | arg1;
3678 break;
3680 case XOR:
3681 val = arg0 ^ arg1;
3682 break;
3684 case LSHIFTRT:
3685 case ASHIFT:
3686 case ASHIFTRT:
3687 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3688 the value is in range. We can't return any old value for
3689 out-of-range arguments because either the middle-end (via
3690 shift_truncation_mask) or the back-end might be relying on
3691 target-specific knowledge. Nor can we rely on
3692 shift_truncation_mask, since the shift might not be part of an
3693 ashlM3, lshrM3 or ashrM3 instruction. */
3694 if (SHIFT_COUNT_TRUNCATED)
3695 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3696 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3697 return 0;
3699 val = (code == ASHIFT
3700 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3701 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3703 /* Sign-extend the result for arithmetic right shifts. */
3704 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3705 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
3706 break;
3708 case ROTATERT:
3709 if (arg1 < 0)
3710 return 0;
3712 arg1 %= width;
3713 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3714 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3715 break;
3717 case ROTATE:
3718 if (arg1 < 0)
3719 return 0;
3721 arg1 %= width;
3722 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3723 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3724 break;
3726 case COMPARE:
3727 /* Do nothing here. */
3728 return 0;
3730 case SMIN:
3731 val = arg0s <= arg1s ? arg0s : arg1s;
3732 break;
3734 case UMIN:
3735 val = ((unsigned HOST_WIDE_INT) arg0
3736 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3737 break;
3739 case SMAX:
3740 val = arg0s > arg1s ? arg0s : arg1s;
3741 break;
3743 case UMAX:
3744 val = ((unsigned HOST_WIDE_INT) arg0
3745 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3746 break;
3748 case SS_PLUS:
3749 case US_PLUS:
3750 case SS_MINUS:
3751 case US_MINUS:
3752 case SS_MULT:
3753 case US_MULT:
3754 case SS_DIV:
3755 case US_DIV:
3756 case SS_ASHIFT:
3757 case US_ASHIFT:
3758 /* ??? There are simplifications that can be done. */
3759 return 0;
3761 default:
3762 gcc_unreachable ();
3765 return gen_int_mode (val, mode);
3768 return NULL_RTX;
3773 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3774 PLUS or MINUS.
3776 Rather than test for specific case, we do this by a brute-force method
3777 and do all possible simplifications until no more changes occur. Then
3778 we rebuild the operation. */
3780 struct simplify_plus_minus_op_data
3782 rtx op;
3783 short neg;
3786 static bool
3787 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3789 int result;
3791 result = (commutative_operand_precedence (y)
3792 - commutative_operand_precedence (x));
3793 if (result)
3794 return result > 0;
3796 /* Group together equal REGs to do more simplification. */
3797 if (REG_P (x) && REG_P (y))
3798 return REGNO (x) > REGNO (y);
3799 else
3800 return false;
3803 static rtx
3804 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3805 rtx op1)
3807 struct simplify_plus_minus_op_data ops[8];
3808 rtx result, tem;
3809 int n_ops = 2, input_ops = 2;
3810 int changed, n_constants = 0, canonicalized = 0;
3811 int i, j;
3813 memset (ops, 0, sizeof ops);
3815 /* Set up the two operands and then expand them until nothing has been
3816 changed. If we run out of room in our array, give up; this should
3817 almost never happen. */
3819 ops[0].op = op0;
3820 ops[0].neg = 0;
3821 ops[1].op = op1;
3822 ops[1].neg = (code == MINUS);
3826 changed = 0;
3828 for (i = 0; i < n_ops; i++)
3830 rtx this_op = ops[i].op;
3831 int this_neg = ops[i].neg;
3832 enum rtx_code this_code = GET_CODE (this_op);
3834 switch (this_code)
3836 case PLUS:
3837 case MINUS:
3838 if (n_ops == 7)
3839 return NULL_RTX;
3841 ops[n_ops].op = XEXP (this_op, 1);
3842 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3843 n_ops++;
3845 ops[i].op = XEXP (this_op, 0);
3846 input_ops++;
3847 changed = 1;
3848 canonicalized |= this_neg;
3849 break;
3851 case NEG:
3852 ops[i].op = XEXP (this_op, 0);
3853 ops[i].neg = ! this_neg;
3854 changed = 1;
3855 canonicalized = 1;
3856 break;
3858 case CONST:
3859 if (n_ops < 7
3860 && GET_CODE (XEXP (this_op, 0)) == PLUS
3861 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3862 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3864 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3865 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3866 ops[n_ops].neg = this_neg;
3867 n_ops++;
3868 changed = 1;
3869 canonicalized = 1;
3871 break;
3873 case NOT:
3874 /* ~a -> (-a - 1) */
3875 if (n_ops != 7)
3877 ops[n_ops].op = constm1_rtx;
3878 ops[n_ops++].neg = this_neg;
3879 ops[i].op = XEXP (this_op, 0);
3880 ops[i].neg = !this_neg;
3881 changed = 1;
3882 canonicalized = 1;
3884 break;
3886 case CONST_INT:
3887 n_constants++;
3888 if (this_neg)
3890 ops[i].op = neg_const_int (mode, this_op);
3891 ops[i].neg = 0;
3892 changed = 1;
3893 canonicalized = 1;
3895 break;
3897 default:
3898 break;
3902 while (changed);
3904 if (n_constants > 1)
3905 canonicalized = 1;
3907 gcc_assert (n_ops >= 2);
3909 /* If we only have two operands, we can avoid the loops. */
3910 if (n_ops == 2)
3912 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3913 rtx lhs, rhs;
3915 /* Get the two operands. Be careful with the order, especially for
3916 the cases where code == MINUS. */
3917 if (ops[0].neg && ops[1].neg)
3919 lhs = gen_rtx_NEG (mode, ops[0].op);
3920 rhs = ops[1].op;
3922 else if (ops[0].neg)
3924 lhs = ops[1].op;
3925 rhs = ops[0].op;
3927 else
3929 lhs = ops[0].op;
3930 rhs = ops[1].op;
3933 return simplify_const_binary_operation (code, mode, lhs, rhs);
3936 /* Now simplify each pair of operands until nothing changes. */
3939 /* Insertion sort is good enough for an eight-element array. */
3940 for (i = 1; i < n_ops; i++)
3942 struct simplify_plus_minus_op_data save;
3943 j = i - 1;
3944 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3945 continue;
3947 canonicalized = 1;
3948 save = ops[i];
3950 ops[j + 1] = ops[j];
3951 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3952 ops[j + 1] = save;
3955 changed = 0;
3956 for (i = n_ops - 1; i > 0; i--)
3957 for (j = i - 1; j >= 0; j--)
3959 rtx lhs = ops[j].op, rhs = ops[i].op;
3960 int lneg = ops[j].neg, rneg = ops[i].neg;
3962 if (lhs != 0 && rhs != 0)
3964 enum rtx_code ncode = PLUS;
3966 if (lneg != rneg)
3968 ncode = MINUS;
3969 if (lneg)
3970 tem = lhs, lhs = rhs, rhs = tem;
3972 else if (swap_commutative_operands_p (lhs, rhs))
3973 tem = lhs, lhs = rhs, rhs = tem;
3975 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
3976 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
3978 rtx tem_lhs, tem_rhs;
3980 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3981 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3982 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3984 if (tem && !CONSTANT_P (tem))
3985 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3987 else
3988 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3990 /* Reject "simplifications" that just wrap the two
3991 arguments in a CONST. Failure to do so can result
3992 in infinite recursion with simplify_binary_operation
3993 when it calls us to simplify CONST operations. */
3994 if (tem
3995 && ! (GET_CODE (tem) == CONST
3996 && GET_CODE (XEXP (tem, 0)) == ncode
3997 && XEXP (XEXP (tem, 0), 0) == lhs
3998 && XEXP (XEXP (tem, 0), 1) == rhs))
4000 lneg &= rneg;
4001 if (GET_CODE (tem) == NEG)
4002 tem = XEXP (tem, 0), lneg = !lneg;
4003 if (CONST_INT_P (tem) && lneg)
4004 tem = neg_const_int (mode, tem), lneg = 0;
4006 ops[i].op = tem;
4007 ops[i].neg = lneg;
4008 ops[j].op = NULL_RTX;
4009 changed = 1;
4010 canonicalized = 1;
4015 /* If nothing changed, fail. */
4016 if (!canonicalized)
4017 return NULL_RTX;
4019 /* Pack all the operands to the lower-numbered entries. */
4020 for (i = 0, j = 0; j < n_ops; j++)
4021 if (ops[j].op)
4023 ops[i] = ops[j];
4024 i++;
4026 n_ops = i;
4028 while (changed);
4030 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4031 if (n_ops == 2
4032 && CONST_INT_P (ops[1].op)
4033 && CONSTANT_P (ops[0].op)
4034 && ops[0].neg)
4035 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4037 /* We suppressed creation of trivial CONST expressions in the
4038 combination loop to avoid recursion. Create one manually now.
4039 The combination loop should have ensured that there is exactly
4040 one CONST_INT, and the sort will have ensured that it is last
4041 in the array and that any other constant will be next-to-last. */
4043 if (n_ops > 1
4044 && CONST_INT_P (ops[n_ops - 1].op)
4045 && CONSTANT_P (ops[n_ops - 2].op))
4047 rtx value = ops[n_ops - 1].op;
4048 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4049 value = neg_const_int (mode, value);
4050 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
4051 n_ops--;
4054 /* Put a non-negated operand first, if possible. */
4056 for (i = 0; i < n_ops && ops[i].neg; i++)
4057 continue;
4058 if (i == n_ops)
4059 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4060 else if (i != 0)
4062 tem = ops[0].op;
4063 ops[0] = ops[i];
4064 ops[i].op = tem;
4065 ops[i].neg = 1;
4068 /* Now make the result by performing the requested operations. */
4069 result = ops[0].op;
4070 for (i = 1; i < n_ops; i++)
4071 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4072 mode, result, ops[i].op);
4074 return result;
4077 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4078 static bool
4079 plus_minus_operand_p (const_rtx x)
4081 return GET_CODE (x) == PLUS
4082 || GET_CODE (x) == MINUS
4083 || (GET_CODE (x) == CONST
4084 && GET_CODE (XEXP (x, 0)) == PLUS
4085 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4086 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4089 /* Like simplify_binary_operation except used for relational operators.
4090 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4091 not also be VOIDmode.
4093 CMP_MODE specifies in which mode the comparison is done in, so it is
4094 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4095 the operands or, if both are VOIDmode, the operands are compared in
4096 "infinite precision". */
4098 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4099 enum machine_mode cmp_mode, rtx op0, rtx op1)
4101 rtx tem, trueop0, trueop1;
4103 if (cmp_mode == VOIDmode)
4104 cmp_mode = GET_MODE (op0);
4105 if (cmp_mode == VOIDmode)
4106 cmp_mode = GET_MODE (op1);
4108 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4109 if (tem)
4111 if (SCALAR_FLOAT_MODE_P (mode))
4113 if (tem == const0_rtx)
4114 return CONST0_RTX (mode);
4115 #ifdef FLOAT_STORE_FLAG_VALUE
4117 REAL_VALUE_TYPE val;
4118 val = FLOAT_STORE_FLAG_VALUE (mode);
4119 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4121 #else
4122 return NULL_RTX;
4123 #endif
4125 if (VECTOR_MODE_P (mode))
4127 if (tem == const0_rtx)
4128 return CONST0_RTX (mode);
4129 #ifdef VECTOR_STORE_FLAG_VALUE
4131 int i, units;
4132 rtvec v;
4134 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4135 if (val == NULL_RTX)
4136 return NULL_RTX;
4137 if (val == const1_rtx)
4138 return CONST1_RTX (mode);
4140 units = GET_MODE_NUNITS (mode);
4141 v = rtvec_alloc (units);
4142 for (i = 0; i < units; i++)
4143 RTVEC_ELT (v, i) = val;
4144 return gen_rtx_raw_CONST_VECTOR (mode, v);
4146 #else
4147 return NULL_RTX;
4148 #endif
4151 return tem;
4154 /* For the following tests, ensure const0_rtx is op1. */
4155 if (swap_commutative_operands_p (op0, op1)
4156 || (op0 == const0_rtx && op1 != const0_rtx))
4157 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4159 /* If op0 is a compare, extract the comparison arguments from it. */
4160 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4161 return simplify_gen_relational (code, mode, VOIDmode,
4162 XEXP (op0, 0), XEXP (op0, 1));
4164 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4165 || CC0_P (op0))
4166 return NULL_RTX;
4168 trueop0 = avoid_constant_pool_reference (op0);
4169 trueop1 = avoid_constant_pool_reference (op1);
4170 return simplify_relational_operation_1 (code, mode, cmp_mode,
4171 trueop0, trueop1);
4174 /* This part of simplify_relational_operation is only used when CMP_MODE
4175 is not in class MODE_CC (i.e. it is a real comparison).
4177 MODE is the mode of the result, while CMP_MODE specifies in which
4178 mode the comparison is done in, so it is the mode of the operands. */
4180 static rtx
4181 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4182 enum machine_mode cmp_mode, rtx op0, rtx op1)
4184 enum rtx_code op0code = GET_CODE (op0);
4186 if (op1 == const0_rtx && COMPARISON_P (op0))
4188 /* If op0 is a comparison, extract the comparison arguments
4189 from it. */
4190 if (code == NE)
4192 if (GET_MODE (op0) == mode)
4193 return simplify_rtx (op0);
4194 else
4195 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4196 XEXP (op0, 0), XEXP (op0, 1));
4198 else if (code == EQ)
4200 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4201 if (new_code != UNKNOWN)
4202 return simplify_gen_relational (new_code, mode, VOIDmode,
4203 XEXP (op0, 0), XEXP (op0, 1));
4207 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4208 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4209 if ((code == LTU || code == GEU)
4210 && GET_CODE (op0) == PLUS
4211 && CONST_INT_P (XEXP (op0, 1))
4212 && (rtx_equal_p (op1, XEXP (op0, 0))
4213 || rtx_equal_p (op1, XEXP (op0, 1))))
4215 rtx new_cmp
4216 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4217 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4218 cmp_mode, XEXP (op0, 0), new_cmp);
4221 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4222 if ((code == LTU || code == GEU)
4223 && GET_CODE (op0) == PLUS
4224 && rtx_equal_p (op1, XEXP (op0, 1))
4225 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4226 && !rtx_equal_p (op1, XEXP (op0, 0)))
4227 return simplify_gen_relational (code, mode, cmp_mode, op0,
4228 copy_rtx (XEXP (op0, 0)));
4230 if (op1 == const0_rtx)
4232 /* Canonicalize (GTU x 0) as (NE x 0). */
4233 if (code == GTU)
4234 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4235 /* Canonicalize (LEU x 0) as (EQ x 0). */
4236 if (code == LEU)
4237 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4239 else if (op1 == const1_rtx)
4241 switch (code)
4243 case GE:
4244 /* Canonicalize (GE x 1) as (GT x 0). */
4245 return simplify_gen_relational (GT, mode, cmp_mode,
4246 op0, const0_rtx);
4247 case GEU:
4248 /* Canonicalize (GEU x 1) as (NE x 0). */
4249 return simplify_gen_relational (NE, mode, cmp_mode,
4250 op0, const0_rtx);
4251 case LT:
4252 /* Canonicalize (LT x 1) as (LE x 0). */
4253 return simplify_gen_relational (LE, mode, cmp_mode,
4254 op0, const0_rtx);
4255 case LTU:
4256 /* Canonicalize (LTU x 1) as (EQ x 0). */
4257 return simplify_gen_relational (EQ, mode, cmp_mode,
4258 op0, const0_rtx);
4259 default:
4260 break;
4263 else if (op1 == constm1_rtx)
4265 /* Canonicalize (LE x -1) as (LT x 0). */
4266 if (code == LE)
4267 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4268 /* Canonicalize (GT x -1) as (GE x 0). */
4269 if (code == GT)
4270 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4273 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4274 if ((code == EQ || code == NE)
4275 && (op0code == PLUS || op0code == MINUS)
4276 && CONSTANT_P (op1)
4277 && CONSTANT_P (XEXP (op0, 1))
4278 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4280 rtx x = XEXP (op0, 0);
4281 rtx c = XEXP (op0, 1);
4283 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
4284 cmp_mode, op1, c);
4285 return simplify_gen_relational (code, mode, cmp_mode, x, c);
4288 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4289 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4290 if (code == NE
4291 && op1 == const0_rtx
4292 && GET_MODE_CLASS (mode) == MODE_INT
4293 && cmp_mode != VOIDmode
4294 /* ??? Work-around BImode bugs in the ia64 backend. */
4295 && mode != BImode
4296 && cmp_mode != BImode
4297 && nonzero_bits (op0, cmp_mode) == 1
4298 && STORE_FLAG_VALUE == 1)
4299 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4300 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4301 : lowpart_subreg (mode, op0, cmp_mode);
4303 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4304 if ((code == EQ || code == NE)
4305 && op1 == const0_rtx
4306 && op0code == XOR)
4307 return simplify_gen_relational (code, mode, cmp_mode,
4308 XEXP (op0, 0), XEXP (op0, 1));
4310 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4311 if ((code == EQ || code == NE)
4312 && op0code == XOR
4313 && rtx_equal_p (XEXP (op0, 0), op1)
4314 && !side_effects_p (XEXP (op0, 0)))
4315 return simplify_gen_relational (code, mode, cmp_mode,
4316 XEXP (op0, 1), const0_rtx);
4318 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4319 if ((code == EQ || code == NE)
4320 && op0code == XOR
4321 && rtx_equal_p (XEXP (op0, 1), op1)
4322 && !side_effects_p (XEXP (op0, 1)))
4323 return simplify_gen_relational (code, mode, cmp_mode,
4324 XEXP (op0, 0), const0_rtx);
4326 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4327 if ((code == EQ || code == NE)
4328 && op0code == XOR
4329 && (CONST_INT_P (op1)
4330 || GET_CODE (op1) == CONST_DOUBLE)
4331 && (CONST_INT_P (XEXP (op0, 1))
4332 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4333 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4334 simplify_gen_binary (XOR, cmp_mode,
4335 XEXP (op0, 1), op1));
4337 if (op0code == POPCOUNT && op1 == const0_rtx)
4338 switch (code)
4340 case EQ:
4341 case LE:
4342 case LEU:
4343 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4344 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4345 XEXP (op0, 0), const0_rtx);
4347 case NE:
4348 case GT:
4349 case GTU:
4350 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4351 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4352 XEXP (op0, 0), const0_rtx);
4354 default:
4355 break;
4358 return NULL_RTX;
4361 enum
4363 CMP_EQ = 1,
4364 CMP_LT = 2,
4365 CMP_GT = 4,
4366 CMP_LTU = 8,
4367 CMP_GTU = 16
4371 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4372 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4373 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4374 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4375 For floating-point comparisons, assume that the operands were ordered. */
4377 static rtx
4378 comparison_result (enum rtx_code code, int known_results)
4380 switch (code)
4382 case EQ:
4383 case UNEQ:
4384 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4385 case NE:
4386 case LTGT:
4387 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4389 case LT:
4390 case UNLT:
4391 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4392 case GE:
4393 case UNGE:
4394 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4396 case GT:
4397 case UNGT:
4398 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4399 case LE:
4400 case UNLE:
4401 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4403 case LTU:
4404 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4405 case GEU:
4406 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4408 case GTU:
4409 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4410 case LEU:
4411 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4413 case ORDERED:
4414 return const_true_rtx;
4415 case UNORDERED:
4416 return const0_rtx;
4417 default:
4418 gcc_unreachable ();
4422 /* Check if the given comparison (done in the given MODE) is actually a
4423 tautology or a contradiction.
4424 If no simplification is possible, this function returns zero.
4425 Otherwise, it returns either const_true_rtx or const0_rtx. */
4428 simplify_const_relational_operation (enum rtx_code code,
4429 enum machine_mode mode,
4430 rtx op0, rtx op1)
4432 rtx tem;
4433 rtx trueop0;
4434 rtx trueop1;
4436 gcc_assert (mode != VOIDmode
4437 || (GET_MODE (op0) == VOIDmode
4438 && GET_MODE (op1) == VOIDmode));
4440 /* If op0 is a compare, extract the comparison arguments from it. */
4441 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4443 op1 = XEXP (op0, 1);
4444 op0 = XEXP (op0, 0);
4446 if (GET_MODE (op0) != VOIDmode)
4447 mode = GET_MODE (op0);
4448 else if (GET_MODE (op1) != VOIDmode)
4449 mode = GET_MODE (op1);
4450 else
4451 return 0;
4454 /* We can't simplify MODE_CC values since we don't know what the
4455 actual comparison is. */
4456 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4457 return 0;
4459 /* Make sure the constant is second. */
4460 if (swap_commutative_operands_p (op0, op1))
4462 tem = op0, op0 = op1, op1 = tem;
4463 code = swap_condition (code);
4466 trueop0 = avoid_constant_pool_reference (op0);
4467 trueop1 = avoid_constant_pool_reference (op1);
4469 /* For integer comparisons of A and B maybe we can simplify A - B and can
4470 then simplify a comparison of that with zero. If A and B are both either
4471 a register or a CONST_INT, this can't help; testing for these cases will
4472 prevent infinite recursion here and speed things up.
4474 We can only do this for EQ and NE comparisons as otherwise we may
4475 lose or introduce overflow which we cannot disregard as undefined as
4476 we do not know the signedness of the operation on either the left or
4477 the right hand side of the comparison. */
4479 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4480 && (code == EQ || code == NE)
4481 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4482 && (REG_P (op1) || CONST_INT_P (trueop1)))
4483 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4484 /* We cannot do this if tem is a nonzero address. */
4485 && ! nonzero_address_p (tem))
4486 return simplify_const_relational_operation (signed_condition (code),
4487 mode, tem, const0_rtx);
4489 if (! HONOR_NANS (mode) && code == ORDERED)
4490 return const_true_rtx;
4492 if (! HONOR_NANS (mode) && code == UNORDERED)
4493 return const0_rtx;
4495 /* For modes without NaNs, if the two operands are equal, we know the
4496 result except if they have side-effects. Even with NaNs we know
4497 the result of unordered comparisons and, if signaling NaNs are
4498 irrelevant, also the result of LT/GT/LTGT. */
4499 if ((! HONOR_NANS (GET_MODE (trueop0))
4500 || code == UNEQ || code == UNLE || code == UNGE
4501 || ((code == LT || code == GT || code == LTGT)
4502 && ! HONOR_SNANS (GET_MODE (trueop0))))
4503 && rtx_equal_p (trueop0, trueop1)
4504 && ! side_effects_p (trueop0))
4505 return comparison_result (code, CMP_EQ);
4507 /* If the operands are floating-point constants, see if we can fold
4508 the result. */
4509 if (GET_CODE (trueop0) == CONST_DOUBLE
4510 && GET_CODE (trueop1) == CONST_DOUBLE
4511 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4513 REAL_VALUE_TYPE d0, d1;
4515 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4516 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4518 /* Comparisons are unordered iff at least one of the values is NaN. */
4519 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4520 switch (code)
4522 case UNEQ:
4523 case UNLT:
4524 case UNGT:
4525 case UNLE:
4526 case UNGE:
4527 case NE:
4528 case UNORDERED:
4529 return const_true_rtx;
4530 case EQ:
4531 case LT:
4532 case GT:
4533 case LE:
4534 case GE:
4535 case LTGT:
4536 case ORDERED:
4537 return const0_rtx;
4538 default:
4539 return 0;
4542 return comparison_result (code,
4543 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4544 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4547 /* Otherwise, see if the operands are both integers. */
4548 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4549 && (GET_CODE (trueop0) == CONST_DOUBLE
4550 || CONST_INT_P (trueop0))
4551 && (GET_CODE (trueop1) == CONST_DOUBLE
4552 || CONST_INT_P (trueop1)))
4554 int width = GET_MODE_BITSIZE (mode);
4555 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4556 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4558 /* Get the two words comprising each integer constant. */
4559 if (GET_CODE (trueop0) == CONST_DOUBLE)
4561 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4562 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4564 else
4566 l0u = l0s = INTVAL (trueop0);
4567 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4570 if (GET_CODE (trueop1) == CONST_DOUBLE)
4572 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4573 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4575 else
4577 l1u = l1s = INTVAL (trueop1);
4578 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4581 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4582 we have to sign or zero-extend the values. */
4583 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4585 l0u &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4586 l1u &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4588 if (l0s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
4589 l0s |= ((unsigned HOST_WIDE_INT) (-1) << width);
4591 if (l1s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
4592 l1s |= ((unsigned HOST_WIDE_INT) (-1) << width);
4594 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4595 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4597 if (h0u == h1u && l0u == l1u)
4598 return comparison_result (code, CMP_EQ);
4599 else
4601 int cr;
4602 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4603 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4604 return comparison_result (code, cr);
4608 /* Optimize comparisons with upper and lower bounds. */
4609 if (SCALAR_INT_MODE_P (mode)
4610 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4611 && CONST_INT_P (trueop1))
4613 int sign;
4614 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4615 HOST_WIDE_INT val = INTVAL (trueop1);
4616 HOST_WIDE_INT mmin, mmax;
4618 if (code == GEU
4619 || code == LEU
4620 || code == GTU
4621 || code == LTU)
4622 sign = 0;
4623 else
4624 sign = 1;
4626 /* Get a reduced range if the sign bit is zero. */
4627 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4629 mmin = 0;
4630 mmax = nonzero;
4632 else
4634 rtx mmin_rtx, mmax_rtx;
4635 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4637 mmin = INTVAL (mmin_rtx);
4638 mmax = INTVAL (mmax_rtx);
4639 if (sign)
4641 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4643 mmin >>= (sign_copies - 1);
4644 mmax >>= (sign_copies - 1);
4648 switch (code)
4650 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4651 case GEU:
4652 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4653 return const_true_rtx;
4654 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4655 return const0_rtx;
4656 break;
4657 case GE:
4658 if (val <= mmin)
4659 return const_true_rtx;
4660 if (val > mmax)
4661 return const0_rtx;
4662 break;
4664 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4665 case LEU:
4666 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4667 return const_true_rtx;
4668 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4669 return const0_rtx;
4670 break;
4671 case LE:
4672 if (val >= mmax)
4673 return const_true_rtx;
4674 if (val < mmin)
4675 return const0_rtx;
4676 break;
4678 case EQ:
4679 /* x == y is always false for y out of range. */
4680 if (val < mmin || val > mmax)
4681 return const0_rtx;
4682 break;
4684 /* x > y is always false for y >= mmax, always true for y < mmin. */
4685 case GTU:
4686 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4687 return const0_rtx;
4688 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4689 return const_true_rtx;
4690 break;
4691 case GT:
4692 if (val >= mmax)
4693 return const0_rtx;
4694 if (val < mmin)
4695 return const_true_rtx;
4696 break;
4698 /* x < y is always false for y <= mmin, always true for y > mmax. */
4699 case LTU:
4700 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4701 return const0_rtx;
4702 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4703 return const_true_rtx;
4704 break;
4705 case LT:
4706 if (val <= mmin)
4707 return const0_rtx;
4708 if (val > mmax)
4709 return const_true_rtx;
4710 break;
4712 case NE:
4713 /* x != y is always true for y out of range. */
4714 if (val < mmin || val > mmax)
4715 return const_true_rtx;
4716 break;
4718 default:
4719 break;
4723 /* Optimize integer comparisons with zero. */
4724 if (trueop1 == const0_rtx)
4726 /* Some addresses are known to be nonzero. We don't know
4727 their sign, but equality comparisons are known. */
4728 if (nonzero_address_p (trueop0))
4730 if (code == EQ || code == LEU)
4731 return const0_rtx;
4732 if (code == NE || code == GTU)
4733 return const_true_rtx;
4736 /* See if the first operand is an IOR with a constant. If so, we
4737 may be able to determine the result of this comparison. */
4738 if (GET_CODE (op0) == IOR)
4740 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4741 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4743 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4744 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4745 && (UINTVAL (inner_const)
4746 & ((unsigned HOST_WIDE_INT) 1
4747 << sign_bitnum)));
4749 switch (code)
4751 case EQ:
4752 case LEU:
4753 return const0_rtx;
4754 case NE:
4755 case GTU:
4756 return const_true_rtx;
4757 case LT:
4758 case LE:
4759 if (has_sign)
4760 return const_true_rtx;
4761 break;
4762 case GT:
4763 case GE:
4764 if (has_sign)
4765 return const0_rtx;
4766 break;
4767 default:
4768 break;
4774 /* Optimize comparison of ABS with zero. */
4775 if (trueop1 == CONST0_RTX (mode)
4776 && (GET_CODE (trueop0) == ABS
4777 || (GET_CODE (trueop0) == FLOAT_EXTEND
4778 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4780 switch (code)
4782 case LT:
4783 /* Optimize abs(x) < 0.0. */
4784 if (!HONOR_SNANS (mode)
4785 && (!INTEGRAL_MODE_P (mode)
4786 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4788 if (INTEGRAL_MODE_P (mode)
4789 && (issue_strict_overflow_warning
4790 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4791 warning (OPT_Wstrict_overflow,
4792 ("assuming signed overflow does not occur when "
4793 "assuming abs (x) < 0 is false"));
4794 return const0_rtx;
4796 break;
4798 case GE:
4799 /* Optimize abs(x) >= 0.0. */
4800 if (!HONOR_NANS (mode)
4801 && (!INTEGRAL_MODE_P (mode)
4802 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4804 if (INTEGRAL_MODE_P (mode)
4805 && (issue_strict_overflow_warning
4806 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4807 warning (OPT_Wstrict_overflow,
4808 ("assuming signed overflow does not occur when "
4809 "assuming abs (x) >= 0 is true"));
4810 return const_true_rtx;
4812 break;
4814 case UNGE:
4815 /* Optimize ! (abs(x) < 0.0). */
4816 return const_true_rtx;
4818 default:
4819 break;
4823 return 0;
4826 /* Simplify CODE, an operation with result mode MODE and three operands,
4827 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4828 a constant. Return 0 if no simplifications is possible. */
4831 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4832 enum machine_mode op0_mode, rtx op0, rtx op1,
4833 rtx op2)
4835 unsigned int width = GET_MODE_BITSIZE (mode);
4836 bool any_change = false;
4837 rtx tem;
4839 /* VOIDmode means "infinite" precision. */
4840 if (width == 0)
4841 width = HOST_BITS_PER_WIDE_INT;
4843 switch (code)
4845 case FMA:
4846 /* Simplify negations around the multiplication. */
4847 /* -a * -b + c => a * b + c. */
4848 if (GET_CODE (op0) == NEG)
4850 tem = simplify_unary_operation (NEG, mode, op1, mode);
4851 if (tem)
4852 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4854 else if (GET_CODE (op1) == NEG)
4856 tem = simplify_unary_operation (NEG, mode, op0, mode);
4857 if (tem)
4858 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
4861 /* Canonicalize the two multiplication operands. */
4862 /* a * -b + c => -b * a + c. */
4863 if (swap_commutative_operands_p (op0, op1))
4864 tem = op0, op0 = op1, op1 = tem, any_change = true;
4866 if (any_change)
4867 return gen_rtx_FMA (mode, op0, op1, op2);
4868 return NULL_RTX;
4870 case SIGN_EXTRACT:
4871 case ZERO_EXTRACT:
4872 if (CONST_INT_P (op0)
4873 && CONST_INT_P (op1)
4874 && CONST_INT_P (op2)
4875 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4876 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4878 /* Extracting a bit-field from a constant */
4879 unsigned HOST_WIDE_INT val = UINTVAL (op0);
4881 if (BITS_BIG_ENDIAN)
4882 val >>= GET_MODE_BITSIZE (op0_mode) - INTVAL (op2) - INTVAL (op1);
4883 else
4884 val >>= INTVAL (op2);
4886 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4888 /* First zero-extend. */
4889 val &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4890 /* If desired, propagate sign bit. */
4891 if (code == SIGN_EXTRACT
4892 && (val & ((unsigned HOST_WIDE_INT) 1 << (INTVAL (op1) - 1)))
4893 != 0)
4894 val |= ~ (((unsigned HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4897 /* Clear the bits that don't belong in our mode,
4898 unless they and our sign bit are all one.
4899 So we get either a reasonable negative value or a reasonable
4900 unsigned value for this mode. */
4901 if (width < HOST_BITS_PER_WIDE_INT
4902 && ((val & ((unsigned HOST_WIDE_INT) (-1) << (width - 1)))
4903 != ((unsigned HOST_WIDE_INT) (-1) << (width - 1))))
4904 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4906 return gen_int_mode (val, mode);
4908 break;
4910 case IF_THEN_ELSE:
4911 if (CONST_INT_P (op0))
4912 return op0 != const0_rtx ? op1 : op2;
4914 /* Convert c ? a : a into "a". */
4915 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4916 return op1;
4918 /* Convert a != b ? a : b into "a". */
4919 if (GET_CODE (op0) == NE
4920 && ! side_effects_p (op0)
4921 && ! HONOR_NANS (mode)
4922 && ! HONOR_SIGNED_ZEROS (mode)
4923 && ((rtx_equal_p (XEXP (op0, 0), op1)
4924 && rtx_equal_p (XEXP (op0, 1), op2))
4925 || (rtx_equal_p (XEXP (op0, 0), op2)
4926 && rtx_equal_p (XEXP (op0, 1), op1))))
4927 return op1;
4929 /* Convert a == b ? a : b into "b". */
4930 if (GET_CODE (op0) == EQ
4931 && ! side_effects_p (op0)
4932 && ! HONOR_NANS (mode)
4933 && ! HONOR_SIGNED_ZEROS (mode)
4934 && ((rtx_equal_p (XEXP (op0, 0), op1)
4935 && rtx_equal_p (XEXP (op0, 1), op2))
4936 || (rtx_equal_p (XEXP (op0, 0), op2)
4937 && rtx_equal_p (XEXP (op0, 1), op1))))
4938 return op2;
4940 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4942 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4943 ? GET_MODE (XEXP (op0, 1))
4944 : GET_MODE (XEXP (op0, 0)));
4945 rtx temp;
4947 /* Look for happy constants in op1 and op2. */
4948 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4950 HOST_WIDE_INT t = INTVAL (op1);
4951 HOST_WIDE_INT f = INTVAL (op2);
4953 if (t == STORE_FLAG_VALUE && f == 0)
4954 code = GET_CODE (op0);
4955 else if (t == 0 && f == STORE_FLAG_VALUE)
4957 enum rtx_code tmp;
4958 tmp = reversed_comparison_code (op0, NULL_RTX);
4959 if (tmp == UNKNOWN)
4960 break;
4961 code = tmp;
4963 else
4964 break;
4966 return simplify_gen_relational (code, mode, cmp_mode,
4967 XEXP (op0, 0), XEXP (op0, 1));
4970 if (cmp_mode == VOIDmode)
4971 cmp_mode = op0_mode;
4972 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4973 cmp_mode, XEXP (op0, 0),
4974 XEXP (op0, 1));
4976 /* See if any simplifications were possible. */
4977 if (temp)
4979 if (CONST_INT_P (temp))
4980 return temp == const0_rtx ? op2 : op1;
4981 else if (temp)
4982 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4985 break;
4987 case VEC_MERGE:
4988 gcc_assert (GET_MODE (op0) == mode);
4989 gcc_assert (GET_MODE (op1) == mode);
4990 gcc_assert (VECTOR_MODE_P (mode));
4991 op2 = avoid_constant_pool_reference (op2);
4992 if (CONST_INT_P (op2))
4994 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4995 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4996 int mask = (1 << n_elts) - 1;
4998 if (!(INTVAL (op2) & mask))
4999 return op1;
5000 if ((INTVAL (op2) & mask) == mask)
5001 return op0;
5003 op0 = avoid_constant_pool_reference (op0);
5004 op1 = avoid_constant_pool_reference (op1);
5005 if (GET_CODE (op0) == CONST_VECTOR
5006 && GET_CODE (op1) == CONST_VECTOR)
5008 rtvec v = rtvec_alloc (n_elts);
5009 unsigned int i;
5011 for (i = 0; i < n_elts; i++)
5012 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5013 ? CONST_VECTOR_ELT (op0, i)
5014 : CONST_VECTOR_ELT (op1, i));
5015 return gen_rtx_CONST_VECTOR (mode, v);
5018 break;
5020 default:
5021 gcc_unreachable ();
5024 return 0;
5027 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5028 or CONST_VECTOR,
5029 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5031 Works by unpacking OP into a collection of 8-bit values
5032 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5033 and then repacking them again for OUTERMODE. */
5035 static rtx
5036 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5037 enum machine_mode innermode, unsigned int byte)
5039 /* We support up to 512-bit values (for V8DFmode). */
5040 enum {
5041 max_bitsize = 512,
5042 value_bit = 8,
5043 value_mask = (1 << value_bit) - 1
5045 unsigned char value[max_bitsize / value_bit];
5046 int value_start;
5047 int i;
5048 int elem;
5050 int num_elem;
5051 rtx * elems;
5052 int elem_bitsize;
5053 rtx result_s;
5054 rtvec result_v = NULL;
5055 enum mode_class outer_class;
5056 enum machine_mode outer_submode;
5058 /* Some ports misuse CCmode. */
5059 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5060 return op;
5062 /* We have no way to represent a complex constant at the rtl level. */
5063 if (COMPLEX_MODE_P (outermode))
5064 return NULL_RTX;
5066 /* Unpack the value. */
5068 if (GET_CODE (op) == CONST_VECTOR)
5070 num_elem = CONST_VECTOR_NUNITS (op);
5071 elems = &CONST_VECTOR_ELT (op, 0);
5072 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5074 else
5076 num_elem = 1;
5077 elems = &op;
5078 elem_bitsize = max_bitsize;
5080 /* If this asserts, it is too complicated; reducing value_bit may help. */
5081 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5082 /* I don't know how to handle endianness of sub-units. */
5083 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5085 for (elem = 0; elem < num_elem; elem++)
5087 unsigned char * vp;
5088 rtx el = elems[elem];
5090 /* Vectors are kept in target memory order. (This is probably
5091 a mistake.) */
5093 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5094 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5095 / BITS_PER_UNIT);
5096 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5097 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5098 unsigned bytele = (subword_byte % UNITS_PER_WORD
5099 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5100 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5103 switch (GET_CODE (el))
5105 case CONST_INT:
5106 for (i = 0;
5107 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5108 i += value_bit)
5109 *vp++ = INTVAL (el) >> i;
5110 /* CONST_INTs are always logically sign-extended. */
5111 for (; i < elem_bitsize; i += value_bit)
5112 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5113 break;
5115 case CONST_DOUBLE:
5116 if (GET_MODE (el) == VOIDmode)
5118 /* If this triggers, someone should have generated a
5119 CONST_INT instead. */
5120 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5122 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5123 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5124 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
5126 *vp++
5127 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5128 i += value_bit;
5130 /* It shouldn't matter what's done here, so fill it with
5131 zero. */
5132 for (; i < elem_bitsize; i += value_bit)
5133 *vp++ = 0;
5135 else
5137 long tmp[max_bitsize / 32];
5138 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5140 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5141 gcc_assert (bitsize <= elem_bitsize);
5142 gcc_assert (bitsize % value_bit == 0);
5144 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5145 GET_MODE (el));
5147 /* real_to_target produces its result in words affected by
5148 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5149 and use WORDS_BIG_ENDIAN instead; see the documentation
5150 of SUBREG in rtl.texi. */
5151 for (i = 0; i < bitsize; i += value_bit)
5153 int ibase;
5154 if (WORDS_BIG_ENDIAN)
5155 ibase = bitsize - 1 - i;
5156 else
5157 ibase = i;
5158 *vp++ = tmp[ibase / 32] >> i % 32;
5161 /* It shouldn't matter what's done here, so fill it with
5162 zero. */
5163 for (; i < elem_bitsize; i += value_bit)
5164 *vp++ = 0;
5166 break;
5168 case CONST_FIXED:
5169 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5171 for (i = 0; i < elem_bitsize; i += value_bit)
5172 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5174 else
5176 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5177 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5178 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5179 i += value_bit)
5180 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5181 >> (i - HOST_BITS_PER_WIDE_INT);
5182 for (; i < elem_bitsize; i += value_bit)
5183 *vp++ = 0;
5185 break;
5187 default:
5188 gcc_unreachable ();
5192 /* Now, pick the right byte to start with. */
5193 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5194 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5195 will already have offset 0. */
5196 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5198 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5199 - byte);
5200 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5201 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5202 byte = (subword_byte % UNITS_PER_WORD
5203 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5206 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5207 so if it's become negative it will instead be very large.) */
5208 gcc_assert (byte < GET_MODE_SIZE (innermode));
5210 /* Convert from bytes to chunks of size value_bit. */
5211 value_start = byte * (BITS_PER_UNIT / value_bit);
5213 /* Re-pack the value. */
5215 if (VECTOR_MODE_P (outermode))
5217 num_elem = GET_MODE_NUNITS (outermode);
5218 result_v = rtvec_alloc (num_elem);
5219 elems = &RTVEC_ELT (result_v, 0);
5220 outer_submode = GET_MODE_INNER (outermode);
5222 else
5224 num_elem = 1;
5225 elems = &result_s;
5226 outer_submode = outermode;
5229 outer_class = GET_MODE_CLASS (outer_submode);
5230 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5232 gcc_assert (elem_bitsize % value_bit == 0);
5233 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5235 for (elem = 0; elem < num_elem; elem++)
5237 unsigned char *vp;
5239 /* Vectors are stored in target memory order. (This is probably
5240 a mistake.) */
5242 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5243 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5244 / BITS_PER_UNIT);
5245 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5246 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5247 unsigned bytele = (subword_byte % UNITS_PER_WORD
5248 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5249 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5252 switch (outer_class)
5254 case MODE_INT:
5255 case MODE_PARTIAL_INT:
5257 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5259 for (i = 0;
5260 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5261 i += value_bit)
5262 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5263 for (; i < elem_bitsize; i += value_bit)
5264 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5265 << (i - HOST_BITS_PER_WIDE_INT);
5267 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5268 know why. */
5269 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5270 elems[elem] = gen_int_mode (lo, outer_submode);
5271 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5272 elems[elem] = immed_double_const (lo, hi, outer_submode);
5273 else
5274 return NULL_RTX;
5276 break;
5278 case MODE_FLOAT:
5279 case MODE_DECIMAL_FLOAT:
5281 REAL_VALUE_TYPE r;
5282 long tmp[max_bitsize / 32];
5284 /* real_from_target wants its input in words affected by
5285 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5286 and use WORDS_BIG_ENDIAN instead; see the documentation
5287 of SUBREG in rtl.texi. */
5288 for (i = 0; i < max_bitsize / 32; i++)
5289 tmp[i] = 0;
5290 for (i = 0; i < elem_bitsize; i += value_bit)
5292 int ibase;
5293 if (WORDS_BIG_ENDIAN)
5294 ibase = elem_bitsize - 1 - i;
5295 else
5296 ibase = i;
5297 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5300 real_from_target (&r, tmp, outer_submode);
5301 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5303 break;
5305 case MODE_FRACT:
5306 case MODE_UFRACT:
5307 case MODE_ACCUM:
5308 case MODE_UACCUM:
5310 FIXED_VALUE_TYPE f;
5311 f.data.low = 0;
5312 f.data.high = 0;
5313 f.mode = outer_submode;
5315 for (i = 0;
5316 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5317 i += value_bit)
5318 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5319 for (; i < elem_bitsize; i += value_bit)
5320 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5321 << (i - HOST_BITS_PER_WIDE_INT));
5323 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5325 break;
5327 default:
5328 gcc_unreachable ();
5331 if (VECTOR_MODE_P (outermode))
5332 return gen_rtx_CONST_VECTOR (outermode, result_v);
5333 else
5334 return result_s;
5337 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5338 Return 0 if no simplifications are possible. */
5340 simplify_subreg (enum machine_mode outermode, rtx op,
5341 enum machine_mode innermode, unsigned int byte)
5343 /* Little bit of sanity checking. */
5344 gcc_assert (innermode != VOIDmode);
5345 gcc_assert (outermode != VOIDmode);
5346 gcc_assert (innermode != BLKmode);
5347 gcc_assert (outermode != BLKmode);
5349 gcc_assert (GET_MODE (op) == innermode
5350 || GET_MODE (op) == VOIDmode);
5352 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5353 gcc_assert (byte < GET_MODE_SIZE (innermode));
5355 if (outermode == innermode && !byte)
5356 return op;
5358 if (CONST_INT_P (op)
5359 || GET_CODE (op) == CONST_DOUBLE
5360 || GET_CODE (op) == CONST_FIXED
5361 || GET_CODE (op) == CONST_VECTOR)
5362 return simplify_immed_subreg (outermode, op, innermode, byte);
5364 /* Changing mode twice with SUBREG => just change it once,
5365 or not at all if changing back op starting mode. */
5366 if (GET_CODE (op) == SUBREG)
5368 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5369 int final_offset = byte + SUBREG_BYTE (op);
5370 rtx newx;
5372 if (outermode == innermostmode
5373 && byte == 0 && SUBREG_BYTE (op) == 0)
5374 return SUBREG_REG (op);
5376 /* The SUBREG_BYTE represents offset, as if the value were stored
5377 in memory. Irritating exception is paradoxical subreg, where
5378 we define SUBREG_BYTE to be 0. On big endian machines, this
5379 value should be negative. For a moment, undo this exception. */
5380 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5382 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5383 if (WORDS_BIG_ENDIAN)
5384 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5385 if (BYTES_BIG_ENDIAN)
5386 final_offset += difference % UNITS_PER_WORD;
5388 if (SUBREG_BYTE (op) == 0
5389 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5391 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5392 if (WORDS_BIG_ENDIAN)
5393 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5394 if (BYTES_BIG_ENDIAN)
5395 final_offset += difference % UNITS_PER_WORD;
5398 /* See whether resulting subreg will be paradoxical. */
5399 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5401 /* In nonparadoxical subregs we can't handle negative offsets. */
5402 if (final_offset < 0)
5403 return NULL_RTX;
5404 /* Bail out in case resulting subreg would be incorrect. */
5405 if (final_offset % GET_MODE_SIZE (outermode)
5406 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5407 return NULL_RTX;
5409 else
5411 int offset = 0;
5412 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5414 /* In paradoxical subreg, see if we are still looking on lower part.
5415 If so, our SUBREG_BYTE will be 0. */
5416 if (WORDS_BIG_ENDIAN)
5417 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5418 if (BYTES_BIG_ENDIAN)
5419 offset += difference % UNITS_PER_WORD;
5420 if (offset == final_offset)
5421 final_offset = 0;
5422 else
5423 return NULL_RTX;
5426 /* Recurse for further possible simplifications. */
5427 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5428 final_offset);
5429 if (newx)
5430 return newx;
5431 if (validate_subreg (outermode, innermostmode,
5432 SUBREG_REG (op), final_offset))
5434 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5435 if (SUBREG_PROMOTED_VAR_P (op)
5436 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5437 && GET_MODE_CLASS (outermode) == MODE_INT
5438 && IN_RANGE (GET_MODE_SIZE (outermode),
5439 GET_MODE_SIZE (innermode),
5440 GET_MODE_SIZE (innermostmode))
5441 && subreg_lowpart_p (newx))
5443 SUBREG_PROMOTED_VAR_P (newx) = 1;
5444 SUBREG_PROMOTED_UNSIGNED_SET
5445 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5447 return newx;
5449 return NULL_RTX;
5452 /* Merge implicit and explicit truncations. */
5454 if (GET_CODE (op) == TRUNCATE
5455 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5456 && subreg_lowpart_offset (outermode, innermode) == byte)
5457 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5458 GET_MODE (XEXP (op, 0)));
5460 /* SUBREG of a hard register => just change the register number
5461 and/or mode. If the hard register is not valid in that mode,
5462 suppress this simplification. If the hard register is the stack,
5463 frame, or argument pointer, leave this as a SUBREG. */
5465 if (REG_P (op) && HARD_REGISTER_P (op))
5467 unsigned int regno, final_regno;
5469 regno = REGNO (op);
5470 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5471 if (HARD_REGISTER_NUM_P (final_regno))
5473 rtx x;
5474 int final_offset = byte;
5476 /* Adjust offset for paradoxical subregs. */
5477 if (byte == 0
5478 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5480 int difference = (GET_MODE_SIZE (innermode)
5481 - GET_MODE_SIZE (outermode));
5482 if (WORDS_BIG_ENDIAN)
5483 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5484 if (BYTES_BIG_ENDIAN)
5485 final_offset += difference % UNITS_PER_WORD;
5488 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5490 /* Propagate original regno. We don't have any way to specify
5491 the offset inside original regno, so do so only for lowpart.
5492 The information is used only by alias analysis that can not
5493 grog partial register anyway. */
5495 if (subreg_lowpart_offset (outermode, innermode) == byte)
5496 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5497 return x;
5501 /* If we have a SUBREG of a register that we are replacing and we are
5502 replacing it with a MEM, make a new MEM and try replacing the
5503 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5504 or if we would be widening it. */
5506 if (MEM_P (op)
5507 && ! mode_dependent_address_p (XEXP (op, 0))
5508 /* Allow splitting of volatile memory references in case we don't
5509 have instruction to move the whole thing. */
5510 && (! MEM_VOLATILE_P (op)
5511 || ! have_insn_for (SET, innermode))
5512 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5513 return adjust_address_nv (op, outermode, byte);
5515 /* Handle complex values represented as CONCAT
5516 of real and imaginary part. */
5517 if (GET_CODE (op) == CONCAT)
5519 unsigned int part_size, final_offset;
5520 rtx part, res;
5522 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5523 if (byte < part_size)
5525 part = XEXP (op, 0);
5526 final_offset = byte;
5528 else
5530 part = XEXP (op, 1);
5531 final_offset = byte - part_size;
5534 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5535 return NULL_RTX;
5537 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5538 if (res)
5539 return res;
5540 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5541 return gen_rtx_SUBREG (outermode, part, final_offset);
5542 return NULL_RTX;
5545 /* Optimize SUBREG truncations of zero and sign extended values. */
5546 if ((GET_CODE (op) == ZERO_EXTEND
5547 || GET_CODE (op) == SIGN_EXTEND)
5548 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5550 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5552 /* If we're requesting the lowpart of a zero or sign extension,
5553 there are three possibilities. If the outermode is the same
5554 as the origmode, we can omit both the extension and the subreg.
5555 If the outermode is not larger than the origmode, we can apply
5556 the truncation without the extension. Finally, if the outermode
5557 is larger than the origmode, but both are integer modes, we
5558 can just extend to the appropriate mode. */
5559 if (bitpos == 0)
5561 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5562 if (outermode == origmode)
5563 return XEXP (op, 0);
5564 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5565 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5566 subreg_lowpart_offset (outermode,
5567 origmode));
5568 if (SCALAR_INT_MODE_P (outermode))
5569 return simplify_gen_unary (GET_CODE (op), outermode,
5570 XEXP (op, 0), origmode);
5573 /* A SUBREG resulting from a zero extension may fold to zero if
5574 it extracts higher bits that the ZERO_EXTEND's source bits. */
5575 if (GET_CODE (op) == ZERO_EXTEND
5576 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5577 return CONST0_RTX (outermode);
5580 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5581 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5582 the outer subreg is effectively a truncation to the original mode. */
5583 if ((GET_CODE (op) == LSHIFTRT
5584 || GET_CODE (op) == ASHIFTRT)
5585 && SCALAR_INT_MODE_P (outermode)
5586 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5587 to avoid the possibility that an outer LSHIFTRT shifts by more
5588 than the sign extension's sign_bit_copies and introduces zeros
5589 into the high bits of the result. */
5590 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5591 && CONST_INT_P (XEXP (op, 1))
5592 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5593 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5594 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5595 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5596 return simplify_gen_binary (ASHIFTRT, outermode,
5597 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5599 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5600 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5601 the outer subreg is effectively a truncation to the original mode. */
5602 if ((GET_CODE (op) == LSHIFTRT
5603 || GET_CODE (op) == ASHIFTRT)
5604 && SCALAR_INT_MODE_P (outermode)
5605 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5606 && CONST_INT_P (XEXP (op, 1))
5607 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5608 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5609 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5610 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5611 return simplify_gen_binary (LSHIFTRT, outermode,
5612 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5614 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5615 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5616 the outer subreg is effectively a truncation to the original mode. */
5617 if (GET_CODE (op) == ASHIFT
5618 && SCALAR_INT_MODE_P (outermode)
5619 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5620 && CONST_INT_P (XEXP (op, 1))
5621 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5622 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5623 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5624 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5625 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5626 return simplify_gen_binary (ASHIFT, outermode,
5627 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5629 /* Recognize a word extraction from a multi-word subreg. */
5630 if ((GET_CODE (op) == LSHIFTRT
5631 || GET_CODE (op) == ASHIFTRT)
5632 && SCALAR_INT_MODE_P (outermode)
5633 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5634 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5635 && CONST_INT_P (XEXP (op, 1))
5636 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5637 && INTVAL (XEXP (op, 1)) >= 0
5638 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5639 && byte == subreg_lowpart_offset (outermode, innermode))
5641 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5642 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5643 (WORDS_BIG_ENDIAN
5644 ? byte - shifted_bytes
5645 : byte + shifted_bytes));
5648 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5649 and try replacing the SUBREG and shift with it. Don't do this if
5650 the MEM has a mode-dependent address or if we would be widening it. */
5652 if ((GET_CODE (op) == LSHIFTRT
5653 || GET_CODE (op) == ASHIFTRT)
5654 && MEM_P (XEXP (op, 0))
5655 && CONST_INT_P (XEXP (op, 1))
5656 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
5657 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
5658 && INTVAL (XEXP (op, 1)) > 0
5659 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5660 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0))
5661 && ! MEM_VOLATILE_P (XEXP (op, 0))
5662 && byte == subreg_lowpart_offset (outermode, innermode)
5663 && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
5664 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
5666 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5667 return adjust_address_nv (XEXP (op, 0), outermode,
5668 (WORDS_BIG_ENDIAN
5669 ? byte - shifted_bytes
5670 : byte + shifted_bytes));
5673 return NULL_RTX;
5676 /* Make a SUBREG operation or equivalent if it folds. */
5679 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5680 enum machine_mode innermode, unsigned int byte)
5682 rtx newx;
5684 newx = simplify_subreg (outermode, op, innermode, byte);
5685 if (newx)
5686 return newx;
5688 if (GET_CODE (op) == SUBREG
5689 || GET_CODE (op) == CONCAT
5690 || GET_MODE (op) == VOIDmode)
5691 return NULL_RTX;
5693 if (validate_subreg (outermode, innermode, op, byte))
5694 return gen_rtx_SUBREG (outermode, op, byte);
5696 return NULL_RTX;
5699 /* Simplify X, an rtx expression.
5701 Return the simplified expression or NULL if no simplifications
5702 were possible.
5704 This is the preferred entry point into the simplification routines;
5705 however, we still allow passes to call the more specific routines.
5707 Right now GCC has three (yes, three) major bodies of RTL simplification
5708 code that need to be unified.
5710 1. fold_rtx in cse.c. This code uses various CSE specific
5711 information to aid in RTL simplification.
5713 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5714 it uses combine specific information to aid in RTL
5715 simplification.
5717 3. The routines in this file.
5720 Long term we want to only have one body of simplification code; to
5721 get to that state I recommend the following steps:
5723 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5724 which are not pass dependent state into these routines.
5726 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5727 use this routine whenever possible.
5729 3. Allow for pass dependent state to be provided to these
5730 routines and add simplifications based on the pass dependent
5731 state. Remove code from cse.c & combine.c that becomes
5732 redundant/dead.
5734 It will take time, but ultimately the compiler will be easier to
5735 maintain and improve. It's totally silly that when we add a
5736 simplification that it needs to be added to 4 places (3 for RTL
5737 simplification and 1 for tree simplification. */
5740 simplify_rtx (const_rtx x)
5742 const enum rtx_code code = GET_CODE (x);
5743 const enum machine_mode mode = GET_MODE (x);
5745 switch (GET_RTX_CLASS (code))
5747 case RTX_UNARY:
5748 return simplify_unary_operation (code, mode,
5749 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5750 case RTX_COMM_ARITH:
5751 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5752 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5754 /* Fall through.... */
5756 case RTX_BIN_ARITH:
5757 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5759 case RTX_TERNARY:
5760 case RTX_BITFIELD_OPS:
5761 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5762 XEXP (x, 0), XEXP (x, 1),
5763 XEXP (x, 2));
5765 case RTX_COMPARE:
5766 case RTX_COMM_COMPARE:
5767 return simplify_relational_operation (code, mode,
5768 ((GET_MODE (XEXP (x, 0))
5769 != VOIDmode)
5770 ? GET_MODE (XEXP (x, 0))
5771 : GET_MODE (XEXP (x, 1))),
5772 XEXP (x, 0),
5773 XEXP (x, 1));
5775 case RTX_EXTRA:
5776 if (code == SUBREG)
5777 return simplify_subreg (mode, SUBREG_REG (x),
5778 GET_MODE (SUBREG_REG (x)),
5779 SUBREG_BYTE (x));
5780 break;
5782 case RTX_OBJ:
5783 if (code == LO_SUM)
5785 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5786 if (GET_CODE (XEXP (x, 0)) == HIGH
5787 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5788 return XEXP (x, 1);
5790 break;
5792 default:
5793 break;
5795 return NULL;