* config/rx/rx.h (LABEL_ALIGN_FOR_BARRIER): Define.
[official-gcc.git] / gcc / simplify-rtx.c
blob4a8e3c6166726b76d0b33adde566b73d47a1fe00
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "diagnostic-core.h"
38 #include "output.h"
39 #include "ggc.h"
40 #include "target.h"
42 /* Simplification and canonicalization of RTL. */
44 /* Much code operates on (low, high) pairs; the low value is an
45 unsigned wide int, the high value a signed wide int. We
46 occasionally need to sign extend from low to high as if low were a
47 signed wide int. */
48 #define HWI_SIGN_EXTEND(low) \
49 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51 static rtx neg_const_int (enum machine_mode, const_rtx);
52 static bool plus_minus_operand_p (const_rtx);
53 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
55 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
56 unsigned int);
57 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
58 rtx, rtx);
59 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
60 enum machine_mode, rtx, rtx);
61 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
62 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
63 rtx, rtx, rtx, rtx);
65 /* Negate a CONST_INT rtx, truncating (because a conversion from a
66 maximally negative number can overflow). */
67 static rtx
68 neg_const_int (enum machine_mode mode, const_rtx i)
70 return gen_int_mode (- INTVAL (i), mode);
73 /* Test whether expression, X, is an immediate constant that represents
74 the most significant bit of machine mode MODE. */
76 bool
77 mode_signbit_p (enum machine_mode mode, const_rtx x)
79 unsigned HOST_WIDE_INT val;
80 unsigned int width;
82 if (GET_MODE_CLASS (mode) != MODE_INT)
83 return false;
85 width = GET_MODE_BITSIZE (mode);
86 if (width == 0)
87 return false;
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
93 && GET_CODE (x) == CONST_DOUBLE
94 && CONST_DOUBLE_LOW (x) == 0)
96 val = CONST_DOUBLE_HIGH (x);
97 width -= HOST_BITS_PER_WIDE_INT;
99 else
100 return false;
102 if (width < HOST_BITS_PER_WIDE_INT)
103 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
107 /* Make a binary operation by properly ordering the operands and
108 seeing if the expression folds. */
111 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
112 rtx op1)
114 rtx tem;
116 /* If this simplifies, do it. */
117 tem = simplify_binary_operation (code, mode, op0, op1);
118 if (tem)
119 return tem;
121 /* Put complex operands first and constants second if commutative. */
122 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
123 && swap_commutative_operands_p (op0, op1))
124 tem = op0, op0 = op1, op1 = tem;
126 return gen_rtx_fmt_ee (code, mode, op0, op1);
129 /* If X is a MEM referencing the constant pool, return the real value.
130 Otherwise return X. */
132 avoid_constant_pool_reference (rtx x)
134 rtx c, tmp, addr;
135 enum machine_mode cmode;
136 HOST_WIDE_INT offset = 0;
138 switch (GET_CODE (x))
140 case MEM:
141 break;
143 case FLOAT_EXTEND:
144 /* Handle float extensions of constant pool references. */
145 tmp = XEXP (x, 0);
146 c = avoid_constant_pool_reference (tmp);
147 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
149 REAL_VALUE_TYPE d;
151 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
152 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
154 return x;
156 default:
157 return x;
160 if (GET_MODE (x) == BLKmode)
161 return x;
163 addr = XEXP (x, 0);
165 /* Call target hook to avoid the effects of -fpic etc.... */
166 addr = targetm.delegitimize_address (addr);
168 /* Split the address into a base and integer offset. */
169 if (GET_CODE (addr) == CONST
170 && GET_CODE (XEXP (addr, 0)) == PLUS
171 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
173 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
174 addr = XEXP (XEXP (addr, 0), 0);
177 if (GET_CODE (addr) == LO_SUM)
178 addr = XEXP (addr, 1);
180 /* If this is a constant pool reference, we can turn it into its
181 constant and hope that simplifications happen. */
182 if (GET_CODE (addr) == SYMBOL_REF
183 && CONSTANT_POOL_ADDRESS_P (addr))
185 c = get_pool_constant (addr);
186 cmode = get_pool_mode (addr);
188 /* If we're accessing the constant in a different mode than it was
189 originally stored, attempt to fix that up via subreg simplifications.
190 If that fails we have no choice but to return the original memory. */
191 if (offset != 0 || cmode != GET_MODE (x))
193 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
194 if (tem && CONSTANT_P (tem))
195 return tem;
197 else
198 return c;
201 return x;
204 /* Simplify a MEM based on its attributes. This is the default
205 delegitimize_address target hook, and it's recommended that every
206 overrider call it. */
209 delegitimize_mem_from_attrs (rtx x)
211 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
212 use their base addresses as equivalent. */
213 if (MEM_P (x)
214 && MEM_EXPR (x)
215 && MEM_OFFSET (x))
217 tree decl = MEM_EXPR (x);
218 enum machine_mode mode = GET_MODE (x);
219 HOST_WIDE_INT offset = 0;
221 switch (TREE_CODE (decl))
223 default:
224 decl = NULL;
225 break;
227 case VAR_DECL:
228 break;
230 case ARRAY_REF:
231 case ARRAY_RANGE_REF:
232 case COMPONENT_REF:
233 case BIT_FIELD_REF:
234 case REALPART_EXPR:
235 case IMAGPART_EXPR:
236 case VIEW_CONVERT_EXPR:
238 HOST_WIDE_INT bitsize, bitpos;
239 tree toffset;
240 int unsignedp = 0, volatilep = 0;
242 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
243 &mode, &unsignedp, &volatilep, false);
244 if (bitsize != GET_MODE_BITSIZE (mode)
245 || (bitpos % BITS_PER_UNIT)
246 || (toffset && !host_integerp (toffset, 0)))
247 decl = NULL;
248 else
250 offset += bitpos / BITS_PER_UNIT;
251 if (toffset)
252 offset += TREE_INT_CST_LOW (toffset);
254 break;
258 if (decl
259 && mode == GET_MODE (x)
260 && TREE_CODE (decl) == VAR_DECL
261 && (TREE_STATIC (decl)
262 || DECL_THREAD_LOCAL_P (decl))
263 && DECL_RTL_SET_P (decl)
264 && MEM_P (DECL_RTL (decl)))
266 rtx newx;
268 offset += INTVAL (MEM_OFFSET (x));
270 newx = DECL_RTL (decl);
272 if (MEM_P (newx))
274 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
276 /* Avoid creating a new MEM needlessly if we already had
277 the same address. We do if there's no OFFSET and the
278 old address X is identical to NEWX, or if X is of the
279 form (plus NEWX OFFSET), or the NEWX is of the form
280 (plus Y (const_int Z)) and X is that with the offset
281 added: (plus Y (const_int Z+OFFSET)). */
282 if (!((offset == 0
283 || (GET_CODE (o) == PLUS
284 && GET_CODE (XEXP (o, 1)) == CONST_INT
285 && (offset == INTVAL (XEXP (o, 1))
286 || (GET_CODE (n) == PLUS
287 && GET_CODE (XEXP (n, 1)) == CONST_INT
288 && (INTVAL (XEXP (n, 1)) + offset
289 == INTVAL (XEXP (o, 1)))
290 && (n = XEXP (n, 0))))
291 && (o = XEXP (o, 0))))
292 && rtx_equal_p (o, n)))
293 x = adjust_address_nv (newx, mode, offset);
295 else if (GET_MODE (x) == GET_MODE (newx)
296 && offset == 0)
297 x = newx;
301 return x;
304 /* Make a unary operation by first seeing if it folds and otherwise making
305 the specified operation. */
308 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
309 enum machine_mode op_mode)
311 rtx tem;
313 /* If this simplifies, use it. */
314 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
315 return tem;
317 return gen_rtx_fmt_e (code, mode, op);
320 /* Likewise for ternary operations. */
323 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
324 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
326 rtx tem;
328 /* If this simplifies, use it. */
329 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
330 op0, op1, op2)))
331 return tem;
333 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
336 /* Likewise, for relational operations.
337 CMP_MODE specifies mode comparison is done in. */
340 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
341 enum machine_mode cmp_mode, rtx op0, rtx op1)
343 rtx tem;
345 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
346 op0, op1)))
347 return tem;
349 return gen_rtx_fmt_ee (code, mode, op0, op1);
352 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
353 and simplify the result. If FN is non-NULL, call this callback on each
354 X, if it returns non-NULL, replace X with its return value and simplify the
355 result. */
358 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
359 rtx (*fn) (rtx, const_rtx, void *), void *data)
361 enum rtx_code code = GET_CODE (x);
362 enum machine_mode mode = GET_MODE (x);
363 enum machine_mode op_mode;
364 const char *fmt;
365 rtx op0, op1, op2, newx, op;
366 rtvec vec, newvec;
367 int i, j;
369 if (__builtin_expect (fn != NULL, 0))
371 newx = fn (x, old_rtx, data);
372 if (newx)
373 return newx;
375 else if (rtx_equal_p (x, old_rtx))
376 return copy_rtx ((rtx) data);
378 switch (GET_RTX_CLASS (code))
380 case RTX_UNARY:
381 op0 = XEXP (x, 0);
382 op_mode = GET_MODE (op0);
383 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
384 if (op0 == XEXP (x, 0))
385 return x;
386 return simplify_gen_unary (code, mode, op0, op_mode);
388 case RTX_BIN_ARITH:
389 case RTX_COMM_ARITH:
390 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
391 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
392 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
393 return x;
394 return simplify_gen_binary (code, mode, op0, op1);
396 case RTX_COMPARE:
397 case RTX_COMM_COMPARE:
398 op0 = XEXP (x, 0);
399 op1 = XEXP (x, 1);
400 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
401 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
402 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
403 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
404 return x;
405 return simplify_gen_relational (code, mode, op_mode, op0, op1);
407 case RTX_TERNARY:
408 case RTX_BITFIELD_OPS:
409 op0 = XEXP (x, 0);
410 op_mode = GET_MODE (op0);
411 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
412 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
413 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
414 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
415 return x;
416 if (op_mode == VOIDmode)
417 op_mode = GET_MODE (op0);
418 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
420 case RTX_EXTRA:
421 if (code == SUBREG)
423 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
424 if (op0 == SUBREG_REG (x))
425 return x;
426 op0 = simplify_gen_subreg (GET_MODE (x), op0,
427 GET_MODE (SUBREG_REG (x)),
428 SUBREG_BYTE (x));
429 return op0 ? op0 : x;
431 break;
433 case RTX_OBJ:
434 if (code == MEM)
436 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
437 if (op0 == XEXP (x, 0))
438 return x;
439 return replace_equiv_address_nv (x, op0);
441 else if (code == LO_SUM)
443 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
444 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
446 /* (lo_sum (high x) x) -> x */
447 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
448 return op1;
450 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
451 return x;
452 return gen_rtx_LO_SUM (mode, op0, op1);
454 break;
456 default:
457 break;
460 newx = x;
461 fmt = GET_RTX_FORMAT (code);
462 for (i = 0; fmt[i]; i++)
463 switch (fmt[i])
465 case 'E':
466 vec = XVEC (x, i);
467 newvec = XVEC (newx, i);
468 for (j = 0; j < GET_NUM_ELEM (vec); j++)
470 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
471 old_rtx, fn, data);
472 if (op != RTVEC_ELT (vec, j))
474 if (newvec == vec)
476 newvec = shallow_copy_rtvec (vec);
477 if (x == newx)
478 newx = shallow_copy_rtx (x);
479 XVEC (newx, i) = newvec;
481 RTVEC_ELT (newvec, j) = op;
484 break;
486 case 'e':
487 if (XEXP (x, i))
489 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
490 if (op != XEXP (x, i))
492 if (x == newx)
493 newx = shallow_copy_rtx (x);
494 XEXP (newx, i) = op;
497 break;
499 return newx;
502 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
503 resulting RTX. Return a new RTX which is as simplified as possible. */
506 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
508 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
511 /* Try to simplify a unary operation CODE whose output mode is to be
512 MODE with input operand OP whose mode was originally OP_MODE.
513 Return zero if no simplification can be made. */
515 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
516 rtx op, enum machine_mode op_mode)
518 rtx trueop, tem;
520 trueop = avoid_constant_pool_reference (op);
522 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
523 if (tem)
524 return tem;
526 return simplify_unary_operation_1 (code, mode, op);
529 /* Perform some simplifications we can do even if the operands
530 aren't constant. */
531 static rtx
532 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
534 enum rtx_code reversed;
535 rtx temp;
537 switch (code)
539 case NOT:
540 /* (not (not X)) == X. */
541 if (GET_CODE (op) == NOT)
542 return XEXP (op, 0);
544 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
545 comparison is all ones. */
546 if (COMPARISON_P (op)
547 && (mode == BImode || STORE_FLAG_VALUE == -1)
548 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
549 return simplify_gen_relational (reversed, mode, VOIDmode,
550 XEXP (op, 0), XEXP (op, 1));
552 /* (not (plus X -1)) can become (neg X). */
553 if (GET_CODE (op) == PLUS
554 && XEXP (op, 1) == constm1_rtx)
555 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
557 /* Similarly, (not (neg X)) is (plus X -1). */
558 if (GET_CODE (op) == NEG)
559 return plus_constant (XEXP (op, 0), -1);
561 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
562 if (GET_CODE (op) == XOR
563 && CONST_INT_P (XEXP (op, 1))
564 && (temp = simplify_unary_operation (NOT, mode,
565 XEXP (op, 1), mode)) != 0)
566 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
568 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
569 if (GET_CODE (op) == PLUS
570 && CONST_INT_P (XEXP (op, 1))
571 && mode_signbit_p (mode, XEXP (op, 1))
572 && (temp = simplify_unary_operation (NOT, mode,
573 XEXP (op, 1), mode)) != 0)
574 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
577 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
578 operands other than 1, but that is not valid. We could do a
579 similar simplification for (not (lshiftrt C X)) where C is
580 just the sign bit, but this doesn't seem common enough to
581 bother with. */
582 if (GET_CODE (op) == ASHIFT
583 && XEXP (op, 0) == const1_rtx)
585 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
586 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
589 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
590 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
591 so we can perform the above simplification. */
593 if (STORE_FLAG_VALUE == -1
594 && GET_CODE (op) == ASHIFTRT
595 && GET_CODE (XEXP (op, 1))
596 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
597 return simplify_gen_relational (GE, mode, VOIDmode,
598 XEXP (op, 0), const0_rtx);
601 if (GET_CODE (op) == SUBREG
602 && subreg_lowpart_p (op)
603 && (GET_MODE_SIZE (GET_MODE (op))
604 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
605 && GET_CODE (SUBREG_REG (op)) == ASHIFT
606 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
608 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
609 rtx x;
611 x = gen_rtx_ROTATE (inner_mode,
612 simplify_gen_unary (NOT, inner_mode, const1_rtx,
613 inner_mode),
614 XEXP (SUBREG_REG (op), 1));
615 return rtl_hooks.gen_lowpart_no_emit (mode, x);
618 /* Apply De Morgan's laws to reduce number of patterns for machines
619 with negating logical insns (and-not, nand, etc.). If result has
620 only one NOT, put it first, since that is how the patterns are
621 coded. */
623 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
625 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
626 enum machine_mode op_mode;
628 op_mode = GET_MODE (in1);
629 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
631 op_mode = GET_MODE (in2);
632 if (op_mode == VOIDmode)
633 op_mode = mode;
634 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
636 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
638 rtx tem = in2;
639 in2 = in1; in1 = tem;
642 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
643 mode, in1, in2);
645 break;
647 case NEG:
648 /* (neg (neg X)) == X. */
649 if (GET_CODE (op) == NEG)
650 return XEXP (op, 0);
652 /* (neg (plus X 1)) can become (not X). */
653 if (GET_CODE (op) == PLUS
654 && XEXP (op, 1) == const1_rtx)
655 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
657 /* Similarly, (neg (not X)) is (plus X 1). */
658 if (GET_CODE (op) == NOT)
659 return plus_constant (XEXP (op, 0), 1);
661 /* (neg (minus X Y)) can become (minus Y X). This transformation
662 isn't safe for modes with signed zeros, since if X and Y are
663 both +0, (minus Y X) is the same as (minus X Y). If the
664 rounding mode is towards +infinity (or -infinity) then the two
665 expressions will be rounded differently. */
666 if (GET_CODE (op) == MINUS
667 && !HONOR_SIGNED_ZEROS (mode)
668 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
669 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
671 if (GET_CODE (op) == PLUS
672 && !HONOR_SIGNED_ZEROS (mode)
673 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
675 /* (neg (plus A C)) is simplified to (minus -C A). */
676 if (CONST_INT_P (XEXP (op, 1))
677 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
679 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
680 if (temp)
681 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
684 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
685 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
686 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
689 /* (neg (mult A B)) becomes (mult (neg A) B).
690 This works even for floating-point values. */
691 if (GET_CODE (op) == MULT
692 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
694 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
695 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
698 /* NEG commutes with ASHIFT since it is multiplication. Only do
699 this if we can then eliminate the NEG (e.g., if the operand
700 is a constant). */
701 if (GET_CODE (op) == ASHIFT)
703 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
704 if (temp)
705 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
708 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
709 C is equal to the width of MODE minus 1. */
710 if (GET_CODE (op) == ASHIFTRT
711 && CONST_INT_P (XEXP (op, 1))
712 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
713 return simplify_gen_binary (LSHIFTRT, mode,
714 XEXP (op, 0), XEXP (op, 1));
716 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
717 C is equal to the width of MODE minus 1. */
718 if (GET_CODE (op) == LSHIFTRT
719 && CONST_INT_P (XEXP (op, 1))
720 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
721 return simplify_gen_binary (ASHIFTRT, mode,
722 XEXP (op, 0), XEXP (op, 1));
724 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
725 if (GET_CODE (op) == XOR
726 && XEXP (op, 1) == const1_rtx
727 && nonzero_bits (XEXP (op, 0), mode) == 1)
728 return plus_constant (XEXP (op, 0), -1);
730 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
731 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
732 if (GET_CODE (op) == LT
733 && XEXP (op, 1) == const0_rtx
734 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
736 enum machine_mode inner = GET_MODE (XEXP (op, 0));
737 int isize = GET_MODE_BITSIZE (inner);
738 if (STORE_FLAG_VALUE == 1)
740 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
741 GEN_INT (isize - 1));
742 if (mode == inner)
743 return temp;
744 if (GET_MODE_BITSIZE (mode) > isize)
745 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
746 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
748 else if (STORE_FLAG_VALUE == -1)
750 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
751 GEN_INT (isize - 1));
752 if (mode == inner)
753 return temp;
754 if (GET_MODE_BITSIZE (mode) > isize)
755 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
756 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
759 break;
761 case TRUNCATE:
762 /* We can't handle truncation to a partial integer mode here
763 because we don't know the real bitsize of the partial
764 integer mode. */
765 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
766 break;
768 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
769 if ((GET_CODE (op) == SIGN_EXTEND
770 || GET_CODE (op) == ZERO_EXTEND)
771 && GET_MODE (XEXP (op, 0)) == mode)
772 return XEXP (op, 0);
774 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
775 (OP:SI foo:SI) if OP is NEG or ABS. */
776 if ((GET_CODE (op) == ABS
777 || GET_CODE (op) == NEG)
778 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
779 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
780 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
781 return simplify_gen_unary (GET_CODE (op), mode,
782 XEXP (XEXP (op, 0), 0), mode);
784 /* (truncate:A (subreg:B (truncate:C X) 0)) is
785 (truncate:A X). */
786 if (GET_CODE (op) == SUBREG
787 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
788 && subreg_lowpart_p (op))
789 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
790 GET_MODE (XEXP (SUBREG_REG (op), 0)));
792 /* If we know that the value is already truncated, we can
793 replace the TRUNCATE with a SUBREG. Note that this is also
794 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
795 modes we just have to apply a different definition for
796 truncation. But don't do this for an (LSHIFTRT (MULT ...))
797 since this will cause problems with the umulXi3_highpart
798 patterns. */
799 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
800 GET_MODE_BITSIZE (GET_MODE (op)))
801 ? (num_sign_bit_copies (op, GET_MODE (op))
802 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
803 - GET_MODE_BITSIZE (mode)))
804 : truncated_to_mode (mode, op))
805 && ! (GET_CODE (op) == LSHIFTRT
806 && GET_CODE (XEXP (op, 0)) == MULT))
807 return rtl_hooks.gen_lowpart_no_emit (mode, op);
809 /* A truncate of a comparison can be replaced with a subreg if
810 STORE_FLAG_VALUE permits. This is like the previous test,
811 but it works even if the comparison is done in a mode larger
812 than HOST_BITS_PER_WIDE_INT. */
813 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
814 && COMPARISON_P (op)
815 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
816 return rtl_hooks.gen_lowpart_no_emit (mode, op);
817 break;
819 case FLOAT_TRUNCATE:
820 if (DECIMAL_FLOAT_MODE_P (mode))
821 break;
823 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
824 if (GET_CODE (op) == FLOAT_EXTEND
825 && GET_MODE (XEXP (op, 0)) == mode)
826 return XEXP (op, 0);
828 /* (float_truncate:SF (float_truncate:DF foo:XF))
829 = (float_truncate:SF foo:XF).
830 This may eliminate double rounding, so it is unsafe.
832 (float_truncate:SF (float_extend:XF foo:DF))
833 = (float_truncate:SF foo:DF).
835 (float_truncate:DF (float_extend:XF foo:SF))
836 = (float_extend:SF foo:DF). */
837 if ((GET_CODE (op) == FLOAT_TRUNCATE
838 && flag_unsafe_math_optimizations)
839 || GET_CODE (op) == FLOAT_EXTEND)
840 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
841 0)))
842 > GET_MODE_SIZE (mode)
843 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
844 mode,
845 XEXP (op, 0), mode);
847 /* (float_truncate (float x)) is (float x) */
848 if (GET_CODE (op) == FLOAT
849 && (flag_unsafe_math_optimizations
850 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
851 && ((unsigned)significand_size (GET_MODE (op))
852 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
853 - num_sign_bit_copies (XEXP (op, 0),
854 GET_MODE (XEXP (op, 0))))))))
855 return simplify_gen_unary (FLOAT, mode,
856 XEXP (op, 0),
857 GET_MODE (XEXP (op, 0)));
859 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
860 (OP:SF foo:SF) if OP is NEG or ABS. */
861 if ((GET_CODE (op) == ABS
862 || GET_CODE (op) == NEG)
863 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
864 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
865 return simplify_gen_unary (GET_CODE (op), mode,
866 XEXP (XEXP (op, 0), 0), mode);
868 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
869 is (float_truncate:SF x). */
870 if (GET_CODE (op) == SUBREG
871 && subreg_lowpart_p (op)
872 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
873 return SUBREG_REG (op);
874 break;
876 case FLOAT_EXTEND:
877 if (DECIMAL_FLOAT_MODE_P (mode))
878 break;
880 /* (float_extend (float_extend x)) is (float_extend x)
882 (float_extend (float x)) is (float x) assuming that double
883 rounding can't happen.
885 if (GET_CODE (op) == FLOAT_EXTEND
886 || (GET_CODE (op) == FLOAT
887 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
888 && ((unsigned)significand_size (GET_MODE (op))
889 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
890 - num_sign_bit_copies (XEXP (op, 0),
891 GET_MODE (XEXP (op, 0)))))))
892 return simplify_gen_unary (GET_CODE (op), mode,
893 XEXP (op, 0),
894 GET_MODE (XEXP (op, 0)));
896 break;
898 case ABS:
899 /* (abs (neg <foo>)) -> (abs <foo>) */
900 if (GET_CODE (op) == NEG)
901 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
902 GET_MODE (XEXP (op, 0)));
904 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
905 do nothing. */
906 if (GET_MODE (op) == VOIDmode)
907 break;
909 /* If operand is something known to be positive, ignore the ABS. */
910 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
911 || ((GET_MODE_BITSIZE (GET_MODE (op))
912 <= HOST_BITS_PER_WIDE_INT)
913 && ((nonzero_bits (op, GET_MODE (op))
914 & ((unsigned HOST_WIDE_INT) 1
915 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
916 == 0)))
917 return op;
919 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
920 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
921 return gen_rtx_NEG (mode, op);
923 break;
925 case FFS:
926 /* (ffs (*_extend <X>)) = (ffs <X>) */
927 if (GET_CODE (op) == SIGN_EXTEND
928 || GET_CODE (op) == ZERO_EXTEND)
929 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
930 GET_MODE (XEXP (op, 0)));
931 break;
933 case POPCOUNT:
934 switch (GET_CODE (op))
936 case BSWAP:
937 case ZERO_EXTEND:
938 /* (popcount (zero_extend <X>)) = (popcount <X>) */
939 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
940 GET_MODE (XEXP (op, 0)));
942 case ROTATE:
943 case ROTATERT:
944 /* Rotations don't affect popcount. */
945 if (!side_effects_p (XEXP (op, 1)))
946 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
947 GET_MODE (XEXP (op, 0)));
948 break;
950 default:
951 break;
953 break;
955 case PARITY:
956 switch (GET_CODE (op))
958 case NOT:
959 case BSWAP:
960 case ZERO_EXTEND:
961 case SIGN_EXTEND:
962 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
963 GET_MODE (XEXP (op, 0)));
965 case ROTATE:
966 case ROTATERT:
967 /* Rotations don't affect parity. */
968 if (!side_effects_p (XEXP (op, 1)))
969 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
970 GET_MODE (XEXP (op, 0)));
971 break;
973 default:
974 break;
976 break;
978 case BSWAP:
979 /* (bswap (bswap x)) -> x. */
980 if (GET_CODE (op) == BSWAP)
981 return XEXP (op, 0);
982 break;
984 case FLOAT:
985 /* (float (sign_extend <X>)) = (float <X>). */
986 if (GET_CODE (op) == SIGN_EXTEND)
987 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
988 GET_MODE (XEXP (op, 0)));
989 break;
991 case SIGN_EXTEND:
992 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
993 becomes just the MINUS if its mode is MODE. This allows
994 folding switch statements on machines using casesi (such as
995 the VAX). */
996 if (GET_CODE (op) == TRUNCATE
997 && GET_MODE (XEXP (op, 0)) == mode
998 && GET_CODE (XEXP (op, 0)) == MINUS
999 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1000 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1001 return XEXP (op, 0);
1003 /* Check for a sign extension of a subreg of a promoted
1004 variable, where the promotion is sign-extended, and the
1005 target mode is the same as the variable's promotion. */
1006 if (GET_CODE (op) == SUBREG
1007 && SUBREG_PROMOTED_VAR_P (op)
1008 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1009 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1010 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1012 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1013 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1014 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1016 gcc_assert (GET_MODE_BITSIZE (mode)
1017 > GET_MODE_BITSIZE (GET_MODE (op)));
1018 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1019 GET_MODE (XEXP (op, 0)));
1022 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1023 is (sign_extend:M (subreg:O <X>)) if there is mode with
1024 GET_MODE_BITSIZE (N) - I bits.
1025 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1026 is similarly (zero_extend:M (subreg:O <X>)). */
1027 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1028 && GET_CODE (XEXP (op, 0)) == ASHIFT
1029 && CONST_INT_P (XEXP (op, 1))
1030 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1031 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1033 enum machine_mode tmode
1034 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1035 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1036 gcc_assert (GET_MODE_BITSIZE (mode)
1037 > GET_MODE_BITSIZE (GET_MODE (op)));
1038 if (tmode != BLKmode)
1040 rtx inner =
1041 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1042 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1043 ? SIGN_EXTEND : ZERO_EXTEND,
1044 mode, inner, tmode);
1048 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1049 /* As we do not know which address space the pointer is refering to,
1050 we can do this only if the target does not support different pointer
1051 or address modes depending on the address space. */
1052 if (target_default_pointer_address_modes_p ()
1053 && ! POINTERS_EXTEND_UNSIGNED
1054 && mode == Pmode && GET_MODE (op) == ptr_mode
1055 && (CONSTANT_P (op)
1056 || (GET_CODE (op) == SUBREG
1057 && REG_P (SUBREG_REG (op))
1058 && REG_POINTER (SUBREG_REG (op))
1059 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1060 return convert_memory_address (Pmode, op);
1061 #endif
1062 break;
1064 case ZERO_EXTEND:
1065 /* Check for a zero extension of a subreg of a promoted
1066 variable, where the promotion is zero-extended, and the
1067 target mode is the same as the variable's promotion. */
1068 if (GET_CODE (op) == SUBREG
1069 && SUBREG_PROMOTED_VAR_P (op)
1070 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1071 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1072 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1074 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1075 if (GET_CODE (op) == ZERO_EXTEND)
1076 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1077 GET_MODE (XEXP (op, 0)));
1079 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1080 is (zero_extend:M (subreg:O <X>)) if there is mode with
1081 GET_MODE_BITSIZE (N) - I bits. */
1082 if (GET_CODE (op) == LSHIFTRT
1083 && GET_CODE (XEXP (op, 0)) == ASHIFT
1084 && CONST_INT_P (XEXP (op, 1))
1085 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1086 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1088 enum machine_mode tmode
1089 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1090 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1091 if (tmode != BLKmode)
1093 rtx inner =
1094 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1095 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1099 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1100 /* As we do not know which address space the pointer is refering to,
1101 we can do this only if the target does not support different pointer
1102 or address modes depending on the address space. */
1103 if (target_default_pointer_address_modes_p ()
1104 && POINTERS_EXTEND_UNSIGNED > 0
1105 && mode == Pmode && GET_MODE (op) == ptr_mode
1106 && (CONSTANT_P (op)
1107 || (GET_CODE (op) == SUBREG
1108 && REG_P (SUBREG_REG (op))
1109 && REG_POINTER (SUBREG_REG (op))
1110 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1111 return convert_memory_address (Pmode, op);
1112 #endif
1113 break;
1115 default:
1116 break;
1119 return 0;
1122 /* Try to compute the value of a unary operation CODE whose output mode is to
1123 be MODE with input operand OP whose mode was originally OP_MODE.
1124 Return zero if the value cannot be computed. */
1126 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1127 rtx op, enum machine_mode op_mode)
1129 unsigned int width = GET_MODE_BITSIZE (mode);
1131 if (code == VEC_DUPLICATE)
1133 gcc_assert (VECTOR_MODE_P (mode));
1134 if (GET_MODE (op) != VOIDmode)
1136 if (!VECTOR_MODE_P (GET_MODE (op)))
1137 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1138 else
1139 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1140 (GET_MODE (op)));
1142 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1143 || GET_CODE (op) == CONST_VECTOR)
1145 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1146 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1147 rtvec v = rtvec_alloc (n_elts);
1148 unsigned int i;
1150 if (GET_CODE (op) != CONST_VECTOR)
1151 for (i = 0; i < n_elts; i++)
1152 RTVEC_ELT (v, i) = op;
1153 else
1155 enum machine_mode inmode = GET_MODE (op);
1156 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1157 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1159 gcc_assert (in_n_elts < n_elts);
1160 gcc_assert ((n_elts % in_n_elts) == 0);
1161 for (i = 0; i < n_elts; i++)
1162 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1164 return gen_rtx_CONST_VECTOR (mode, v);
1168 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1170 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1171 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1172 enum machine_mode opmode = GET_MODE (op);
1173 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1174 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1175 rtvec v = rtvec_alloc (n_elts);
1176 unsigned int i;
1178 gcc_assert (op_n_elts == n_elts);
1179 for (i = 0; i < n_elts; i++)
1181 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1182 CONST_VECTOR_ELT (op, i),
1183 GET_MODE_INNER (opmode));
1184 if (!x)
1185 return 0;
1186 RTVEC_ELT (v, i) = x;
1188 return gen_rtx_CONST_VECTOR (mode, v);
1191 /* The order of these tests is critical so that, for example, we don't
1192 check the wrong mode (input vs. output) for a conversion operation,
1193 such as FIX. At some point, this should be simplified. */
1195 if (code == FLOAT && GET_MODE (op) == VOIDmode
1196 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1198 HOST_WIDE_INT hv, lv;
1199 REAL_VALUE_TYPE d;
1201 if (CONST_INT_P (op))
1202 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1203 else
1204 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1206 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1207 d = real_value_truncate (mode, d);
1208 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1210 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1211 && (GET_CODE (op) == CONST_DOUBLE
1212 || CONST_INT_P (op)))
1214 HOST_WIDE_INT hv, lv;
1215 REAL_VALUE_TYPE d;
1217 if (CONST_INT_P (op))
1218 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1219 else
1220 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1222 if (op_mode == VOIDmode)
1224 /* We don't know how to interpret negative-looking numbers in
1225 this case, so don't try to fold those. */
1226 if (hv < 0)
1227 return 0;
1229 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1231 else
1232 hv = 0, lv &= GET_MODE_MASK (op_mode);
1234 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1235 d = real_value_truncate (mode, d);
1236 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1239 if (CONST_INT_P (op)
1240 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1242 HOST_WIDE_INT arg0 = INTVAL (op);
1243 HOST_WIDE_INT val;
1245 switch (code)
1247 case NOT:
1248 val = ~ arg0;
1249 break;
1251 case NEG:
1252 val = - arg0;
1253 break;
1255 case ABS:
1256 val = (arg0 >= 0 ? arg0 : - arg0);
1257 break;
1259 case FFS:
1260 arg0 &= GET_MODE_MASK (mode);
1261 val = ffs_hwi (arg0);
1262 break;
1264 case CLZ:
1265 arg0 &= GET_MODE_MASK (mode);
1266 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1268 else
1269 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1270 break;
1272 case CTZ:
1273 arg0 &= GET_MODE_MASK (mode);
1274 if (arg0 == 0)
1276 /* Even if the value at zero is undefined, we have to come
1277 up with some replacement. Seems good enough. */
1278 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1279 val = GET_MODE_BITSIZE (mode);
1281 else
1282 val = ctz_hwi (arg0);
1283 break;
1285 case POPCOUNT:
1286 arg0 &= GET_MODE_MASK (mode);
1287 val = 0;
1288 while (arg0)
1289 val++, arg0 &= arg0 - 1;
1290 break;
1292 case PARITY:
1293 arg0 &= GET_MODE_MASK (mode);
1294 val = 0;
1295 while (arg0)
1296 val++, arg0 &= arg0 - 1;
1297 val &= 1;
1298 break;
1300 case BSWAP:
1302 unsigned int s;
1304 val = 0;
1305 for (s = 0; s < width; s += 8)
1307 unsigned int d = width - s - 8;
1308 unsigned HOST_WIDE_INT byte;
1309 byte = (arg0 >> s) & 0xff;
1310 val |= byte << d;
1313 break;
1315 case TRUNCATE:
1316 val = arg0;
1317 break;
1319 case ZERO_EXTEND:
1320 /* When zero-extending a CONST_INT, we need to know its
1321 original mode. */
1322 gcc_assert (op_mode != VOIDmode);
1323 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1325 /* If we were really extending the mode,
1326 we would have to distinguish between zero-extension
1327 and sign-extension. */
1328 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1329 val = arg0;
1331 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1332 val = arg0 & ~((unsigned HOST_WIDE_INT) (-1)
1333 << GET_MODE_BITSIZE (op_mode));
1334 else
1335 return 0;
1336 break;
1338 case SIGN_EXTEND:
1339 if (op_mode == VOIDmode)
1340 op_mode = mode;
1341 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1343 /* If we were really extending the mode,
1344 we would have to distinguish between zero-extension
1345 and sign-extension. */
1346 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1347 val = arg0;
1349 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1352 = arg0 & ~((unsigned HOST_WIDE_INT) (-1)
1353 << GET_MODE_BITSIZE (op_mode));
1354 if (val & ((unsigned HOST_WIDE_INT) 1
1355 << (GET_MODE_BITSIZE (op_mode) - 1)))
1357 -= (unsigned HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1359 else
1360 return 0;
1361 break;
1363 case SQRT:
1364 case FLOAT_EXTEND:
1365 case FLOAT_TRUNCATE:
1366 case SS_TRUNCATE:
1367 case US_TRUNCATE:
1368 case SS_NEG:
1369 case US_NEG:
1370 case SS_ABS:
1371 return 0;
1373 default:
1374 gcc_unreachable ();
1377 return gen_int_mode (val, mode);
1380 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1381 for a DImode operation on a CONST_INT. */
1382 else if (GET_MODE (op) == VOIDmode
1383 && width <= HOST_BITS_PER_WIDE_INT * 2
1384 && (GET_CODE (op) == CONST_DOUBLE
1385 || CONST_INT_P (op)))
1387 unsigned HOST_WIDE_INT l1, lv;
1388 HOST_WIDE_INT h1, hv;
1390 if (GET_CODE (op) == CONST_DOUBLE)
1391 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1392 else
1393 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1395 switch (code)
1397 case NOT:
1398 lv = ~ l1;
1399 hv = ~ h1;
1400 break;
1402 case NEG:
1403 neg_double (l1, h1, &lv, &hv);
1404 break;
1406 case ABS:
1407 if (h1 < 0)
1408 neg_double (l1, h1, &lv, &hv);
1409 else
1410 lv = l1, hv = h1;
1411 break;
1413 case FFS:
1414 hv = 0;
1415 if (l1 != 0)
1416 lv = ffs_hwi (l1);
1417 else if (h1 != 0)
1418 lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
1419 else
1420 lv = 0;
1421 break;
1423 case CLZ:
1424 hv = 0;
1425 if (h1 != 0)
1426 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1427 - HOST_BITS_PER_WIDE_INT;
1428 else if (l1 != 0)
1429 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1430 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1431 lv = GET_MODE_BITSIZE (mode);
1432 break;
1434 case CTZ:
1435 hv = 0;
1436 if (l1 != 0)
1437 lv = ctz_hwi (l1);
1438 else if (h1 != 0)
1439 lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
1440 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1441 lv = GET_MODE_BITSIZE (mode);
1442 break;
1444 case POPCOUNT:
1445 hv = 0;
1446 lv = 0;
1447 while (l1)
1448 lv++, l1 &= l1 - 1;
1449 while (h1)
1450 lv++, h1 &= h1 - 1;
1451 break;
1453 case PARITY:
1454 hv = 0;
1455 lv = 0;
1456 while (l1)
1457 lv++, l1 &= l1 - 1;
1458 while (h1)
1459 lv++, h1 &= h1 - 1;
1460 lv &= 1;
1461 break;
1463 case BSWAP:
1465 unsigned int s;
1467 hv = 0;
1468 lv = 0;
1469 for (s = 0; s < width; s += 8)
1471 unsigned int d = width - s - 8;
1472 unsigned HOST_WIDE_INT byte;
1474 if (s < HOST_BITS_PER_WIDE_INT)
1475 byte = (l1 >> s) & 0xff;
1476 else
1477 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1479 if (d < HOST_BITS_PER_WIDE_INT)
1480 lv |= byte << d;
1481 else
1482 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1485 break;
1487 case TRUNCATE:
1488 /* This is just a change-of-mode, so do nothing. */
1489 lv = l1, hv = h1;
1490 break;
1492 case ZERO_EXTEND:
1493 gcc_assert (op_mode != VOIDmode);
1495 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1496 return 0;
1498 hv = 0;
1499 lv = l1 & GET_MODE_MASK (op_mode);
1500 break;
1502 case SIGN_EXTEND:
1503 if (op_mode == VOIDmode
1504 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1505 return 0;
1506 else
1508 lv = l1 & GET_MODE_MASK (op_mode);
1509 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1510 && (lv & ((unsigned HOST_WIDE_INT) 1
1511 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1512 lv -= (unsigned HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1514 hv = HWI_SIGN_EXTEND (lv);
1516 break;
1518 case SQRT:
1519 return 0;
1521 default:
1522 return 0;
1525 return immed_double_const (lv, hv, mode);
1528 else if (GET_CODE (op) == CONST_DOUBLE
1529 && SCALAR_FLOAT_MODE_P (mode))
1531 REAL_VALUE_TYPE d, t;
1532 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1534 switch (code)
1536 case SQRT:
1537 if (HONOR_SNANS (mode) && real_isnan (&d))
1538 return 0;
1539 real_sqrt (&t, mode, &d);
1540 d = t;
1541 break;
1542 case ABS:
1543 d = real_value_abs (&d);
1544 break;
1545 case NEG:
1546 d = real_value_negate (&d);
1547 break;
1548 case FLOAT_TRUNCATE:
1549 d = real_value_truncate (mode, d);
1550 break;
1551 case FLOAT_EXTEND:
1552 /* All this does is change the mode. */
1553 break;
1554 case FIX:
1555 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1556 break;
1557 case NOT:
1559 long tmp[4];
1560 int i;
1562 real_to_target (tmp, &d, GET_MODE (op));
1563 for (i = 0; i < 4; i++)
1564 tmp[i] = ~tmp[i];
1565 real_from_target (&d, tmp, mode);
1566 break;
1568 default:
1569 gcc_unreachable ();
1571 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1574 else if (GET_CODE (op) == CONST_DOUBLE
1575 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1576 && GET_MODE_CLASS (mode) == MODE_INT
1577 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1579 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1580 operators are intentionally left unspecified (to ease implementation
1581 by target backends), for consistency, this routine implements the
1582 same semantics for constant folding as used by the middle-end. */
1584 /* This was formerly used only for non-IEEE float.
1585 eggert@twinsun.com says it is safe for IEEE also. */
1586 HOST_WIDE_INT xh, xl, th, tl;
1587 REAL_VALUE_TYPE x, t;
1588 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1589 switch (code)
1591 case FIX:
1592 if (REAL_VALUE_ISNAN (x))
1593 return const0_rtx;
1595 /* Test against the signed upper bound. */
1596 if (width > HOST_BITS_PER_WIDE_INT)
1598 th = ((unsigned HOST_WIDE_INT) 1
1599 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1600 tl = -1;
1602 else
1604 th = 0;
1605 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1607 real_from_integer (&t, VOIDmode, tl, th, 0);
1608 if (REAL_VALUES_LESS (t, x))
1610 xh = th;
1611 xl = tl;
1612 break;
1615 /* Test against the signed lower bound. */
1616 if (width > HOST_BITS_PER_WIDE_INT)
1618 th = (unsigned HOST_WIDE_INT) (-1)
1619 << (width - HOST_BITS_PER_WIDE_INT - 1);
1620 tl = 0;
1622 else
1624 th = -1;
1625 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1627 real_from_integer (&t, VOIDmode, tl, th, 0);
1628 if (REAL_VALUES_LESS (x, t))
1630 xh = th;
1631 xl = tl;
1632 break;
1634 REAL_VALUE_TO_INT (&xl, &xh, x);
1635 break;
1637 case UNSIGNED_FIX:
1638 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1639 return const0_rtx;
1641 /* Test against the unsigned upper bound. */
1642 if (width == 2*HOST_BITS_PER_WIDE_INT)
1644 th = -1;
1645 tl = -1;
1647 else if (width >= HOST_BITS_PER_WIDE_INT)
1649 th = ((unsigned HOST_WIDE_INT) 1
1650 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1651 tl = -1;
1653 else
1655 th = 0;
1656 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1658 real_from_integer (&t, VOIDmode, tl, th, 1);
1659 if (REAL_VALUES_LESS (t, x))
1661 xh = th;
1662 xl = tl;
1663 break;
1666 REAL_VALUE_TO_INT (&xl, &xh, x);
1667 break;
1669 default:
1670 gcc_unreachable ();
1672 return immed_double_const (xl, xh, mode);
1675 return NULL_RTX;
1678 /* Subroutine of simplify_binary_operation to simplify a commutative,
1679 associative binary operation CODE with result mode MODE, operating
1680 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1681 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1682 canonicalization is possible. */
1684 static rtx
1685 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1686 rtx op0, rtx op1)
1688 rtx tem;
1690 /* Linearize the operator to the left. */
1691 if (GET_CODE (op1) == code)
1693 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1694 if (GET_CODE (op0) == code)
1696 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1697 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1700 /* "a op (b op c)" becomes "(b op c) op a". */
1701 if (! swap_commutative_operands_p (op1, op0))
1702 return simplify_gen_binary (code, mode, op1, op0);
1704 tem = op0;
1705 op0 = op1;
1706 op1 = tem;
1709 if (GET_CODE (op0) == code)
1711 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1712 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1714 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1715 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1718 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1719 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1720 if (tem != 0)
1721 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1723 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1724 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1725 if (tem != 0)
1726 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1729 return 0;
1733 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1734 and OP1. Return 0 if no simplification is possible.
1736 Don't use this for relational operations such as EQ or LT.
1737 Use simplify_relational_operation instead. */
1739 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1740 rtx op0, rtx op1)
1742 rtx trueop0, trueop1;
1743 rtx tem;
1745 /* Relational operations don't work here. We must know the mode
1746 of the operands in order to do the comparison correctly.
1747 Assuming a full word can give incorrect results.
1748 Consider comparing 128 with -128 in QImode. */
1749 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1750 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1752 /* Make sure the constant is second. */
1753 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1754 && swap_commutative_operands_p (op0, op1))
1756 tem = op0, op0 = op1, op1 = tem;
1759 trueop0 = avoid_constant_pool_reference (op0);
1760 trueop1 = avoid_constant_pool_reference (op1);
1762 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1763 if (tem)
1764 return tem;
1765 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1768 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1769 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1770 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1771 actual constants. */
1773 static rtx
1774 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1775 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1777 rtx tem, reversed, opleft, opright;
1778 HOST_WIDE_INT val;
1779 unsigned int width = GET_MODE_BITSIZE (mode);
1781 /* Even if we can't compute a constant result,
1782 there are some cases worth simplifying. */
1784 switch (code)
1786 case PLUS:
1787 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1788 when x is NaN, infinite, or finite and nonzero. They aren't
1789 when x is -0 and the rounding mode is not towards -infinity,
1790 since (-0) + 0 is then 0. */
1791 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1792 return op0;
1794 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1795 transformations are safe even for IEEE. */
1796 if (GET_CODE (op0) == NEG)
1797 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1798 else if (GET_CODE (op1) == NEG)
1799 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1801 /* (~a) + 1 -> -a */
1802 if (INTEGRAL_MODE_P (mode)
1803 && GET_CODE (op0) == NOT
1804 && trueop1 == const1_rtx)
1805 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1807 /* Handle both-operands-constant cases. We can only add
1808 CONST_INTs to constants since the sum of relocatable symbols
1809 can't be handled by most assemblers. Don't add CONST_INT
1810 to CONST_INT since overflow won't be computed properly if wider
1811 than HOST_BITS_PER_WIDE_INT. */
1813 if ((GET_CODE (op0) == CONST
1814 || GET_CODE (op0) == SYMBOL_REF
1815 || GET_CODE (op0) == LABEL_REF)
1816 && CONST_INT_P (op1))
1817 return plus_constant (op0, INTVAL (op1));
1818 else if ((GET_CODE (op1) == CONST
1819 || GET_CODE (op1) == SYMBOL_REF
1820 || GET_CODE (op1) == LABEL_REF)
1821 && CONST_INT_P (op0))
1822 return plus_constant (op1, INTVAL (op0));
1824 /* See if this is something like X * C - X or vice versa or
1825 if the multiplication is written as a shift. If so, we can
1826 distribute and make a new multiply, shift, or maybe just
1827 have X (if C is 2 in the example above). But don't make
1828 something more expensive than we had before. */
1830 if (SCALAR_INT_MODE_P (mode))
1832 double_int coeff0, coeff1;
1833 rtx lhs = op0, rhs = op1;
1835 coeff0 = double_int_one;
1836 coeff1 = double_int_one;
1838 if (GET_CODE (lhs) == NEG)
1840 coeff0 = double_int_minus_one;
1841 lhs = XEXP (lhs, 0);
1843 else if (GET_CODE (lhs) == MULT
1844 && CONST_INT_P (XEXP (lhs, 1)))
1846 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1847 lhs = XEXP (lhs, 0);
1849 else if (GET_CODE (lhs) == ASHIFT
1850 && CONST_INT_P (XEXP (lhs, 1))
1851 && INTVAL (XEXP (lhs, 1)) >= 0
1852 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1854 coeff0 = double_int_setbit (double_int_zero,
1855 INTVAL (XEXP (lhs, 1)));
1856 lhs = XEXP (lhs, 0);
1859 if (GET_CODE (rhs) == NEG)
1861 coeff1 = double_int_minus_one;
1862 rhs = XEXP (rhs, 0);
1864 else if (GET_CODE (rhs) == MULT
1865 && CONST_INT_P (XEXP (rhs, 1)))
1867 coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
1868 rhs = XEXP (rhs, 0);
1870 else if (GET_CODE (rhs) == ASHIFT
1871 && CONST_INT_P (XEXP (rhs, 1))
1872 && INTVAL (XEXP (rhs, 1)) >= 0
1873 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1875 coeff1 = double_int_setbit (double_int_zero,
1876 INTVAL (XEXP (rhs, 1)));
1877 rhs = XEXP (rhs, 0);
1880 if (rtx_equal_p (lhs, rhs))
1882 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1883 rtx coeff;
1884 double_int val;
1885 bool speed = optimize_function_for_speed_p (cfun);
1887 val = double_int_add (coeff0, coeff1);
1888 coeff = immed_double_int_const (val, mode);
1890 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1891 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1892 ? tem : 0;
1896 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1897 if ((CONST_INT_P (op1)
1898 || GET_CODE (op1) == CONST_DOUBLE)
1899 && GET_CODE (op0) == XOR
1900 && (CONST_INT_P (XEXP (op0, 1))
1901 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1902 && mode_signbit_p (mode, op1))
1903 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1904 simplify_gen_binary (XOR, mode, op1,
1905 XEXP (op0, 1)));
1907 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1908 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1909 && GET_CODE (op0) == MULT
1910 && GET_CODE (XEXP (op0, 0)) == NEG)
1912 rtx in1, in2;
1914 in1 = XEXP (XEXP (op0, 0), 0);
1915 in2 = XEXP (op0, 1);
1916 return simplify_gen_binary (MINUS, mode, op1,
1917 simplify_gen_binary (MULT, mode,
1918 in1, in2));
1921 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1922 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1923 is 1. */
1924 if (COMPARISON_P (op0)
1925 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1926 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1927 && (reversed = reversed_comparison (op0, mode)))
1928 return
1929 simplify_gen_unary (NEG, mode, reversed, mode);
1931 /* If one of the operands is a PLUS or a MINUS, see if we can
1932 simplify this by the associative law.
1933 Don't use the associative law for floating point.
1934 The inaccuracy makes it nonassociative,
1935 and subtle programs can break if operations are associated. */
1937 if (INTEGRAL_MODE_P (mode)
1938 && (plus_minus_operand_p (op0)
1939 || plus_minus_operand_p (op1))
1940 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1941 return tem;
1943 /* Reassociate floating point addition only when the user
1944 specifies associative math operations. */
1945 if (FLOAT_MODE_P (mode)
1946 && flag_associative_math)
1948 tem = simplify_associative_operation (code, mode, op0, op1);
1949 if (tem)
1950 return tem;
1952 break;
1954 case COMPARE:
1955 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1956 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1957 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1958 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1960 rtx xop00 = XEXP (op0, 0);
1961 rtx xop10 = XEXP (op1, 0);
1963 #ifdef HAVE_cc0
1964 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1965 #else
1966 if (REG_P (xop00) && REG_P (xop10)
1967 && GET_MODE (xop00) == GET_MODE (xop10)
1968 && REGNO (xop00) == REGNO (xop10)
1969 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1970 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1971 #endif
1972 return xop00;
1974 break;
1976 case MINUS:
1977 /* We can't assume x-x is 0 even with non-IEEE floating point,
1978 but since it is zero except in very strange circumstances, we
1979 will treat it as zero with -ffinite-math-only. */
1980 if (rtx_equal_p (trueop0, trueop1)
1981 && ! side_effects_p (op0)
1982 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1983 return CONST0_RTX (mode);
1985 /* Change subtraction from zero into negation. (0 - x) is the
1986 same as -x when x is NaN, infinite, or finite and nonzero.
1987 But if the mode has signed zeros, and does not round towards
1988 -infinity, then 0 - 0 is 0, not -0. */
1989 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1990 return simplify_gen_unary (NEG, mode, op1, mode);
1992 /* (-1 - a) is ~a. */
1993 if (trueop0 == constm1_rtx)
1994 return simplify_gen_unary (NOT, mode, op1, mode);
1996 /* Subtracting 0 has no effect unless the mode has signed zeros
1997 and supports rounding towards -infinity. In such a case,
1998 0 - 0 is -0. */
1999 if (!(HONOR_SIGNED_ZEROS (mode)
2000 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2001 && trueop1 == CONST0_RTX (mode))
2002 return op0;
2004 /* See if this is something like X * C - X or vice versa or
2005 if the multiplication is written as a shift. If so, we can
2006 distribute and make a new multiply, shift, or maybe just
2007 have X (if C is 2 in the example above). But don't make
2008 something more expensive than we had before. */
2010 if (SCALAR_INT_MODE_P (mode))
2012 double_int coeff0, negcoeff1;
2013 rtx lhs = op0, rhs = op1;
2015 coeff0 = double_int_one;
2016 negcoeff1 = double_int_minus_one;
2018 if (GET_CODE (lhs) == NEG)
2020 coeff0 = double_int_minus_one;
2021 lhs = XEXP (lhs, 0);
2023 else if (GET_CODE (lhs) == MULT
2024 && CONST_INT_P (XEXP (lhs, 1)))
2026 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
2027 lhs = XEXP (lhs, 0);
2029 else if (GET_CODE (lhs) == ASHIFT
2030 && CONST_INT_P (XEXP (lhs, 1))
2031 && INTVAL (XEXP (lhs, 1)) >= 0
2032 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2034 coeff0 = double_int_setbit (double_int_zero,
2035 INTVAL (XEXP (lhs, 1)));
2036 lhs = XEXP (lhs, 0);
2039 if (GET_CODE (rhs) == NEG)
2041 negcoeff1 = double_int_one;
2042 rhs = XEXP (rhs, 0);
2044 else if (GET_CODE (rhs) == MULT
2045 && CONST_INT_P (XEXP (rhs, 1)))
2047 negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
2048 rhs = XEXP (rhs, 0);
2050 else if (GET_CODE (rhs) == ASHIFT
2051 && CONST_INT_P (XEXP (rhs, 1))
2052 && INTVAL (XEXP (rhs, 1)) >= 0
2053 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2055 negcoeff1 = double_int_setbit (double_int_zero,
2056 INTVAL (XEXP (rhs, 1)));
2057 negcoeff1 = double_int_neg (negcoeff1);
2058 rhs = XEXP (rhs, 0);
2061 if (rtx_equal_p (lhs, rhs))
2063 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2064 rtx coeff;
2065 double_int val;
2066 bool speed = optimize_function_for_speed_p (cfun);
2068 val = double_int_add (coeff0, negcoeff1);
2069 coeff = immed_double_int_const (val, mode);
2071 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2072 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
2073 ? tem : 0;
2077 /* (a - (-b)) -> (a + b). True even for IEEE. */
2078 if (GET_CODE (op1) == NEG)
2079 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2081 /* (-x - c) may be simplified as (-c - x). */
2082 if (GET_CODE (op0) == NEG
2083 && (CONST_INT_P (op1)
2084 || GET_CODE (op1) == CONST_DOUBLE))
2086 tem = simplify_unary_operation (NEG, mode, op1, mode);
2087 if (tem)
2088 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2091 /* Don't let a relocatable value get a negative coeff. */
2092 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2093 return simplify_gen_binary (PLUS, mode,
2094 op0,
2095 neg_const_int (mode, op1));
2097 /* (x - (x & y)) -> (x & ~y) */
2098 if (GET_CODE (op1) == AND)
2100 if (rtx_equal_p (op0, XEXP (op1, 0)))
2102 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2103 GET_MODE (XEXP (op1, 1)));
2104 return simplify_gen_binary (AND, mode, op0, tem);
2106 if (rtx_equal_p (op0, XEXP (op1, 1)))
2108 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2109 GET_MODE (XEXP (op1, 0)));
2110 return simplify_gen_binary (AND, mode, op0, tem);
2114 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2115 by reversing the comparison code if valid. */
2116 if (STORE_FLAG_VALUE == 1
2117 && trueop0 == const1_rtx
2118 && COMPARISON_P (op1)
2119 && (reversed = reversed_comparison (op1, mode)))
2120 return reversed;
2122 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2123 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2124 && GET_CODE (op1) == MULT
2125 && GET_CODE (XEXP (op1, 0)) == NEG)
2127 rtx in1, in2;
2129 in1 = XEXP (XEXP (op1, 0), 0);
2130 in2 = XEXP (op1, 1);
2131 return simplify_gen_binary (PLUS, mode,
2132 simplify_gen_binary (MULT, mode,
2133 in1, in2),
2134 op0);
2137 /* Canonicalize (minus (neg A) (mult B C)) to
2138 (minus (mult (neg B) C) A). */
2139 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2140 && GET_CODE (op1) == MULT
2141 && GET_CODE (op0) == NEG)
2143 rtx in1, in2;
2145 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2146 in2 = XEXP (op1, 1);
2147 return simplify_gen_binary (MINUS, mode,
2148 simplify_gen_binary (MULT, mode,
2149 in1, in2),
2150 XEXP (op0, 0));
2153 /* If one of the operands is a PLUS or a MINUS, see if we can
2154 simplify this by the associative law. This will, for example,
2155 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2156 Don't use the associative law for floating point.
2157 The inaccuracy makes it nonassociative,
2158 and subtle programs can break if operations are associated. */
2160 if (INTEGRAL_MODE_P (mode)
2161 && (plus_minus_operand_p (op0)
2162 || plus_minus_operand_p (op1))
2163 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2164 return tem;
2165 break;
2167 case MULT:
2168 if (trueop1 == constm1_rtx)
2169 return simplify_gen_unary (NEG, mode, op0, mode);
2171 if (GET_CODE (op0) == NEG)
2173 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2174 if (temp)
2175 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2177 if (GET_CODE (op1) == NEG)
2179 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2180 if (temp)
2181 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2184 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2185 x is NaN, since x * 0 is then also NaN. Nor is it valid
2186 when the mode has signed zeros, since multiplying a negative
2187 number by 0 will give -0, not 0. */
2188 if (!HONOR_NANS (mode)
2189 && !HONOR_SIGNED_ZEROS (mode)
2190 && trueop1 == CONST0_RTX (mode)
2191 && ! side_effects_p (op0))
2192 return op1;
2194 /* In IEEE floating point, x*1 is not equivalent to x for
2195 signalling NaNs. */
2196 if (!HONOR_SNANS (mode)
2197 && trueop1 == CONST1_RTX (mode))
2198 return op0;
2200 /* Convert multiply by constant power of two into shift unless
2201 we are still generating RTL. This test is a kludge. */
2202 if (CONST_INT_P (trueop1)
2203 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2204 /* If the mode is larger than the host word size, and the
2205 uppermost bit is set, then this isn't a power of two due
2206 to implicit sign extension. */
2207 && (width <= HOST_BITS_PER_WIDE_INT
2208 || val != HOST_BITS_PER_WIDE_INT - 1))
2209 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2211 /* Likewise for multipliers wider than a word. */
2212 if (GET_CODE (trueop1) == CONST_DOUBLE
2213 && (GET_MODE (trueop1) == VOIDmode
2214 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2215 && GET_MODE (op0) == mode
2216 && CONST_DOUBLE_LOW (trueop1) == 0
2217 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2218 return simplify_gen_binary (ASHIFT, mode, op0,
2219 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2221 /* x*2 is x+x and x*(-1) is -x */
2222 if (GET_CODE (trueop1) == CONST_DOUBLE
2223 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2224 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2225 && GET_MODE (op0) == mode)
2227 REAL_VALUE_TYPE d;
2228 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2230 if (REAL_VALUES_EQUAL (d, dconst2))
2231 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2233 if (!HONOR_SNANS (mode)
2234 && REAL_VALUES_EQUAL (d, dconstm1))
2235 return simplify_gen_unary (NEG, mode, op0, mode);
2238 /* Optimize -x * -x as x * x. */
2239 if (FLOAT_MODE_P (mode)
2240 && GET_CODE (op0) == NEG
2241 && GET_CODE (op1) == NEG
2242 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2243 && !side_effects_p (XEXP (op0, 0)))
2244 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2246 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2247 if (SCALAR_FLOAT_MODE_P (mode)
2248 && GET_CODE (op0) == ABS
2249 && GET_CODE (op1) == ABS
2250 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2251 && !side_effects_p (XEXP (op0, 0)))
2252 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2254 /* Reassociate multiplication, but for floating point MULTs
2255 only when the user specifies unsafe math optimizations. */
2256 if (! FLOAT_MODE_P (mode)
2257 || flag_unsafe_math_optimizations)
2259 tem = simplify_associative_operation (code, mode, op0, op1);
2260 if (tem)
2261 return tem;
2263 break;
2265 case IOR:
2266 if (trueop1 == CONST0_RTX (mode))
2267 return op0;
2268 if (CONST_INT_P (trueop1)
2269 && ((UINTVAL (trueop1) & GET_MODE_MASK (mode))
2270 == GET_MODE_MASK (mode)))
2271 return op1;
2272 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2273 return op0;
2274 /* A | (~A) -> -1 */
2275 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2276 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2277 && ! side_effects_p (op0)
2278 && SCALAR_INT_MODE_P (mode))
2279 return constm1_rtx;
2281 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2282 if (CONST_INT_P (op1)
2283 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2284 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0)
2285 return op1;
2287 /* Canonicalize (X & C1) | C2. */
2288 if (GET_CODE (op0) == AND
2289 && CONST_INT_P (trueop1)
2290 && CONST_INT_P (XEXP (op0, 1)))
2292 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2293 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2294 HOST_WIDE_INT c2 = INTVAL (trueop1);
2296 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2297 if ((c1 & c2) == c1
2298 && !side_effects_p (XEXP (op0, 0)))
2299 return trueop1;
2301 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2302 if (((c1|c2) & mask) == mask)
2303 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2305 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2306 if (((c1 & ~c2) & mask) != (c1 & mask))
2308 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2309 gen_int_mode (c1 & ~c2, mode));
2310 return simplify_gen_binary (IOR, mode, tem, op1);
2314 /* Convert (A & B) | A to A. */
2315 if (GET_CODE (op0) == AND
2316 && (rtx_equal_p (XEXP (op0, 0), op1)
2317 || rtx_equal_p (XEXP (op0, 1), op1))
2318 && ! side_effects_p (XEXP (op0, 0))
2319 && ! side_effects_p (XEXP (op0, 1)))
2320 return op1;
2322 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2323 mode size to (rotate A CX). */
2325 if (GET_CODE (op1) == ASHIFT
2326 || GET_CODE (op1) == SUBREG)
2328 opleft = op1;
2329 opright = op0;
2331 else
2333 opright = op1;
2334 opleft = op0;
2337 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2338 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2339 && CONST_INT_P (XEXP (opleft, 1))
2340 && CONST_INT_P (XEXP (opright, 1))
2341 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2342 == GET_MODE_BITSIZE (mode)))
2343 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2345 /* Same, but for ashift that has been "simplified" to a wider mode
2346 by simplify_shift_const. */
2348 if (GET_CODE (opleft) == SUBREG
2349 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2350 && GET_CODE (opright) == LSHIFTRT
2351 && GET_CODE (XEXP (opright, 0)) == SUBREG
2352 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2353 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2354 && (GET_MODE_SIZE (GET_MODE (opleft))
2355 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2356 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2357 SUBREG_REG (XEXP (opright, 0)))
2358 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2359 && CONST_INT_P (XEXP (opright, 1))
2360 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2361 == GET_MODE_BITSIZE (mode)))
2362 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2363 XEXP (SUBREG_REG (opleft), 1));
2365 /* If we have (ior (and (X C1) C2)), simplify this by making
2366 C1 as small as possible if C1 actually changes. */
2367 if (CONST_INT_P (op1)
2368 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2369 || INTVAL (op1) > 0)
2370 && GET_CODE (op0) == AND
2371 && CONST_INT_P (XEXP (op0, 1))
2372 && CONST_INT_P (op1)
2373 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2374 return simplify_gen_binary (IOR, mode,
2375 simplify_gen_binary
2376 (AND, mode, XEXP (op0, 0),
2377 GEN_INT (UINTVAL (XEXP (op0, 1))
2378 & ~UINTVAL (op1))),
2379 op1);
2381 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2382 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2383 the PLUS does not affect any of the bits in OP1: then we can do
2384 the IOR as a PLUS and we can associate. This is valid if OP1
2385 can be safely shifted left C bits. */
2386 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2387 && GET_CODE (XEXP (op0, 0)) == PLUS
2388 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2389 && CONST_INT_P (XEXP (op0, 1))
2390 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2392 int count = INTVAL (XEXP (op0, 1));
2393 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2395 if (mask >> count == INTVAL (trueop1)
2396 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2397 return simplify_gen_binary (ASHIFTRT, mode,
2398 plus_constant (XEXP (op0, 0), mask),
2399 XEXP (op0, 1));
2402 tem = simplify_associative_operation (code, mode, op0, op1);
2403 if (tem)
2404 return tem;
2405 break;
2407 case XOR:
2408 if (trueop1 == CONST0_RTX (mode))
2409 return op0;
2410 if (CONST_INT_P (trueop1)
2411 && ((UINTVAL (trueop1) & GET_MODE_MASK (mode))
2412 == GET_MODE_MASK (mode)))
2413 return simplify_gen_unary (NOT, mode, op0, mode);
2414 if (rtx_equal_p (trueop0, trueop1)
2415 && ! side_effects_p (op0)
2416 && GET_MODE_CLASS (mode) != MODE_CC)
2417 return CONST0_RTX (mode);
2419 /* Canonicalize XOR of the most significant bit to PLUS. */
2420 if ((CONST_INT_P (op1)
2421 || GET_CODE (op1) == CONST_DOUBLE)
2422 && mode_signbit_p (mode, op1))
2423 return simplify_gen_binary (PLUS, mode, op0, op1);
2424 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2425 if ((CONST_INT_P (op1)
2426 || GET_CODE (op1) == CONST_DOUBLE)
2427 && GET_CODE (op0) == PLUS
2428 && (CONST_INT_P (XEXP (op0, 1))
2429 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2430 && mode_signbit_p (mode, XEXP (op0, 1)))
2431 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2432 simplify_gen_binary (XOR, mode, op1,
2433 XEXP (op0, 1)));
2435 /* If we are XORing two things that have no bits in common,
2436 convert them into an IOR. This helps to detect rotation encoded
2437 using those methods and possibly other simplifications. */
2439 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2440 && (nonzero_bits (op0, mode)
2441 & nonzero_bits (op1, mode)) == 0)
2442 return (simplify_gen_binary (IOR, mode, op0, op1));
2444 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2445 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2446 (NOT y). */
2448 int num_negated = 0;
2450 if (GET_CODE (op0) == NOT)
2451 num_negated++, op0 = XEXP (op0, 0);
2452 if (GET_CODE (op1) == NOT)
2453 num_negated++, op1 = XEXP (op1, 0);
2455 if (num_negated == 2)
2456 return simplify_gen_binary (XOR, mode, op0, op1);
2457 else if (num_negated == 1)
2458 return simplify_gen_unary (NOT, mode,
2459 simplify_gen_binary (XOR, mode, op0, op1),
2460 mode);
2463 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2464 correspond to a machine insn or result in further simplifications
2465 if B is a constant. */
2467 if (GET_CODE (op0) == AND
2468 && rtx_equal_p (XEXP (op0, 1), op1)
2469 && ! side_effects_p (op1))
2470 return simplify_gen_binary (AND, mode,
2471 simplify_gen_unary (NOT, mode,
2472 XEXP (op0, 0), mode),
2473 op1);
2475 else if (GET_CODE (op0) == AND
2476 && rtx_equal_p (XEXP (op0, 0), op1)
2477 && ! side_effects_p (op1))
2478 return simplify_gen_binary (AND, mode,
2479 simplify_gen_unary (NOT, mode,
2480 XEXP (op0, 1), mode),
2481 op1);
2483 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2484 we can transform like this:
2485 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2486 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2487 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2488 Attempt a few simplifications when B and C are both constants. */
2489 if (GET_CODE (op0) == AND
2490 && CONST_INT_P (op1)
2491 && CONST_INT_P (XEXP (op0, 1)))
2493 rtx a = XEXP (op0, 0);
2494 rtx b = XEXP (op0, 1);
2495 rtx c = op1;
2496 HOST_WIDE_INT bval = INTVAL (b);
2497 HOST_WIDE_INT cval = INTVAL (c);
2499 rtx na_c
2500 = simplify_binary_operation (AND, mode,
2501 simplify_gen_unary (NOT, mode, a, mode),
2503 if ((~cval & bval) == 0)
2505 /* Try to simplify ~A&C | ~B&C. */
2506 if (na_c != NULL_RTX)
2507 return simplify_gen_binary (IOR, mode, na_c,
2508 GEN_INT (~bval & cval));
2510 else
2512 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2513 if (na_c == const0_rtx)
2515 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2516 GEN_INT (~cval & bval));
2517 return simplify_gen_binary (IOR, mode, a_nc_b,
2518 GEN_INT (~bval & cval));
2523 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2524 comparison if STORE_FLAG_VALUE is 1. */
2525 if (STORE_FLAG_VALUE == 1
2526 && trueop1 == const1_rtx
2527 && COMPARISON_P (op0)
2528 && (reversed = reversed_comparison (op0, mode)))
2529 return reversed;
2531 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2532 is (lt foo (const_int 0)), so we can perform the above
2533 simplification if STORE_FLAG_VALUE is 1. */
2535 if (STORE_FLAG_VALUE == 1
2536 && trueop1 == const1_rtx
2537 && GET_CODE (op0) == LSHIFTRT
2538 && CONST_INT_P (XEXP (op0, 1))
2539 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2540 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2542 /* (xor (comparison foo bar) (const_int sign-bit))
2543 when STORE_FLAG_VALUE is the sign bit. */
2544 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2545 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2546 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2547 && trueop1 == const_true_rtx
2548 && COMPARISON_P (op0)
2549 && (reversed = reversed_comparison (op0, mode)))
2550 return reversed;
2552 tem = simplify_associative_operation (code, mode, op0, op1);
2553 if (tem)
2554 return tem;
2555 break;
2557 case AND:
2558 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2559 return trueop1;
2560 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2562 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2563 HOST_WIDE_INT nzop1;
2564 if (CONST_INT_P (trueop1))
2566 HOST_WIDE_INT val1 = INTVAL (trueop1);
2567 /* If we are turning off bits already known off in OP0, we need
2568 not do an AND. */
2569 if ((nzop0 & ~val1) == 0)
2570 return op0;
2572 nzop1 = nonzero_bits (trueop1, mode);
2573 /* If we are clearing all the nonzero bits, the result is zero. */
2574 if ((nzop1 & nzop0) == 0
2575 && !side_effects_p (op0) && !side_effects_p (op1))
2576 return CONST0_RTX (mode);
2578 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2579 && GET_MODE_CLASS (mode) != MODE_CC)
2580 return op0;
2581 /* A & (~A) -> 0 */
2582 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2583 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2584 && ! side_effects_p (op0)
2585 && GET_MODE_CLASS (mode) != MODE_CC)
2586 return CONST0_RTX (mode);
2588 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2589 there are no nonzero bits of C outside of X's mode. */
2590 if ((GET_CODE (op0) == SIGN_EXTEND
2591 || GET_CODE (op0) == ZERO_EXTEND)
2592 && CONST_INT_P (trueop1)
2593 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2594 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2595 & UINTVAL (trueop1)) == 0)
2597 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2598 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2599 gen_int_mode (INTVAL (trueop1),
2600 imode));
2601 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2604 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2605 we might be able to further simplify the AND with X and potentially
2606 remove the truncation altogether. */
2607 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2609 rtx x = XEXP (op0, 0);
2610 enum machine_mode xmode = GET_MODE (x);
2611 tem = simplify_gen_binary (AND, xmode, x,
2612 gen_int_mode (INTVAL (trueop1), xmode));
2613 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2616 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2617 if (GET_CODE (op0) == IOR
2618 && CONST_INT_P (trueop1)
2619 && CONST_INT_P (XEXP (op0, 1)))
2621 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2622 return simplify_gen_binary (IOR, mode,
2623 simplify_gen_binary (AND, mode,
2624 XEXP (op0, 0), op1),
2625 gen_int_mode (tmp, mode));
2628 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2629 insn (and may simplify more). */
2630 if (GET_CODE (op0) == XOR
2631 && rtx_equal_p (XEXP (op0, 0), op1)
2632 && ! side_effects_p (op1))
2633 return simplify_gen_binary (AND, mode,
2634 simplify_gen_unary (NOT, mode,
2635 XEXP (op0, 1), mode),
2636 op1);
2638 if (GET_CODE (op0) == XOR
2639 && rtx_equal_p (XEXP (op0, 1), op1)
2640 && ! side_effects_p (op1))
2641 return simplify_gen_binary (AND, mode,
2642 simplify_gen_unary (NOT, mode,
2643 XEXP (op0, 0), mode),
2644 op1);
2646 /* Similarly for (~(A ^ B)) & A. */
2647 if (GET_CODE (op0) == NOT
2648 && GET_CODE (XEXP (op0, 0)) == XOR
2649 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2650 && ! side_effects_p (op1))
2651 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2653 if (GET_CODE (op0) == NOT
2654 && GET_CODE (XEXP (op0, 0)) == XOR
2655 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2656 && ! side_effects_p (op1))
2657 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2659 /* Convert (A | B) & A to A. */
2660 if (GET_CODE (op0) == IOR
2661 && (rtx_equal_p (XEXP (op0, 0), op1)
2662 || rtx_equal_p (XEXP (op0, 1), op1))
2663 && ! side_effects_p (XEXP (op0, 0))
2664 && ! side_effects_p (XEXP (op0, 1)))
2665 return op1;
2667 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2668 ((A & N) + B) & M -> (A + B) & M
2669 Similarly if (N & M) == 0,
2670 ((A | N) + B) & M -> (A + B) & M
2671 and for - instead of + and/or ^ instead of |.
2672 Also, if (N & M) == 0, then
2673 (A +- N) & M -> A & M. */
2674 if (CONST_INT_P (trueop1)
2675 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2676 && ~UINTVAL (trueop1)
2677 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2678 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2680 rtx pmop[2];
2681 int which;
2683 pmop[0] = XEXP (op0, 0);
2684 pmop[1] = XEXP (op0, 1);
2686 if (CONST_INT_P (pmop[1])
2687 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2688 return simplify_gen_binary (AND, mode, pmop[0], op1);
2690 for (which = 0; which < 2; which++)
2692 tem = pmop[which];
2693 switch (GET_CODE (tem))
2695 case AND:
2696 if (CONST_INT_P (XEXP (tem, 1))
2697 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2698 == UINTVAL (trueop1))
2699 pmop[which] = XEXP (tem, 0);
2700 break;
2701 case IOR:
2702 case XOR:
2703 if (CONST_INT_P (XEXP (tem, 1))
2704 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2705 pmop[which] = XEXP (tem, 0);
2706 break;
2707 default:
2708 break;
2712 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2714 tem = simplify_gen_binary (GET_CODE (op0), mode,
2715 pmop[0], pmop[1]);
2716 return simplify_gen_binary (code, mode, tem, op1);
2720 /* (and X (ior (not X) Y) -> (and X Y) */
2721 if (GET_CODE (op1) == IOR
2722 && GET_CODE (XEXP (op1, 0)) == NOT
2723 && op0 == XEXP (XEXP (op1, 0), 0))
2724 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2726 /* (and (ior (not X) Y) X) -> (and X Y) */
2727 if (GET_CODE (op0) == IOR
2728 && GET_CODE (XEXP (op0, 0)) == NOT
2729 && op1 == XEXP (XEXP (op0, 0), 0))
2730 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2732 tem = simplify_associative_operation (code, mode, op0, op1);
2733 if (tem)
2734 return tem;
2735 break;
2737 case UDIV:
2738 /* 0/x is 0 (or x&0 if x has side-effects). */
2739 if (trueop0 == CONST0_RTX (mode))
2741 if (side_effects_p (op1))
2742 return simplify_gen_binary (AND, mode, op1, trueop0);
2743 return trueop0;
2745 /* x/1 is x. */
2746 if (trueop1 == CONST1_RTX (mode))
2747 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2748 /* Convert divide by power of two into shift. */
2749 if (CONST_INT_P (trueop1)
2750 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2751 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2752 break;
2754 case DIV:
2755 /* Handle floating point and integers separately. */
2756 if (SCALAR_FLOAT_MODE_P (mode))
2758 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2759 safe for modes with NaNs, since 0.0 / 0.0 will then be
2760 NaN rather than 0.0. Nor is it safe for modes with signed
2761 zeros, since dividing 0 by a negative number gives -0.0 */
2762 if (trueop0 == CONST0_RTX (mode)
2763 && !HONOR_NANS (mode)
2764 && !HONOR_SIGNED_ZEROS (mode)
2765 && ! side_effects_p (op1))
2766 return op0;
2767 /* x/1.0 is x. */
2768 if (trueop1 == CONST1_RTX (mode)
2769 && !HONOR_SNANS (mode))
2770 return op0;
2772 if (GET_CODE (trueop1) == CONST_DOUBLE
2773 && trueop1 != CONST0_RTX (mode))
2775 REAL_VALUE_TYPE d;
2776 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2778 /* x/-1.0 is -x. */
2779 if (REAL_VALUES_EQUAL (d, dconstm1)
2780 && !HONOR_SNANS (mode))
2781 return simplify_gen_unary (NEG, mode, op0, mode);
2783 /* Change FP division by a constant into multiplication.
2784 Only do this with -freciprocal-math. */
2785 if (flag_reciprocal_math
2786 && !REAL_VALUES_EQUAL (d, dconst0))
2788 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2789 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2790 return simplify_gen_binary (MULT, mode, op0, tem);
2794 else
2796 /* 0/x is 0 (or x&0 if x has side-effects). */
2797 if (trueop0 == CONST0_RTX (mode)
2798 && !cfun->can_throw_non_call_exceptions)
2800 if (side_effects_p (op1))
2801 return simplify_gen_binary (AND, mode, op1, trueop0);
2802 return trueop0;
2804 /* x/1 is x. */
2805 if (trueop1 == CONST1_RTX (mode))
2806 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2807 /* x/-1 is -x. */
2808 if (trueop1 == constm1_rtx)
2810 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2811 return simplify_gen_unary (NEG, mode, x, mode);
2814 break;
2816 case UMOD:
2817 /* 0%x is 0 (or x&0 if x has side-effects). */
2818 if (trueop0 == CONST0_RTX (mode))
2820 if (side_effects_p (op1))
2821 return simplify_gen_binary (AND, mode, op1, trueop0);
2822 return trueop0;
2824 /* x%1 is 0 (of x&0 if x has side-effects). */
2825 if (trueop1 == CONST1_RTX (mode))
2827 if (side_effects_p (op0))
2828 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2829 return CONST0_RTX (mode);
2831 /* Implement modulus by power of two as AND. */
2832 if (CONST_INT_P (trueop1)
2833 && exact_log2 (UINTVAL (trueop1)) > 0)
2834 return simplify_gen_binary (AND, mode, op0,
2835 GEN_INT (INTVAL (op1) - 1));
2836 break;
2838 case MOD:
2839 /* 0%x is 0 (or x&0 if x has side-effects). */
2840 if (trueop0 == CONST0_RTX (mode))
2842 if (side_effects_p (op1))
2843 return simplify_gen_binary (AND, mode, op1, trueop0);
2844 return trueop0;
2846 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2847 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2849 if (side_effects_p (op0))
2850 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2851 return CONST0_RTX (mode);
2853 break;
2855 case ROTATERT:
2856 case ROTATE:
2857 case ASHIFTRT:
2858 if (trueop1 == CONST0_RTX (mode))
2859 return op0;
2860 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2861 return op0;
2862 /* Rotating ~0 always results in ~0. */
2863 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2864 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
2865 && ! side_effects_p (op1))
2866 return op0;
2867 canonicalize_shift:
2868 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2870 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2871 if (val != INTVAL (op1))
2872 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2874 break;
2876 case ASHIFT:
2877 case SS_ASHIFT:
2878 case US_ASHIFT:
2879 if (trueop1 == CONST0_RTX (mode))
2880 return op0;
2881 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2882 return op0;
2883 goto canonicalize_shift;
2885 case LSHIFTRT:
2886 if (trueop1 == CONST0_RTX (mode))
2887 return op0;
2888 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2889 return op0;
2890 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2891 if (GET_CODE (op0) == CLZ
2892 && CONST_INT_P (trueop1)
2893 && STORE_FLAG_VALUE == 1
2894 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2896 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2897 unsigned HOST_WIDE_INT zero_val = 0;
2899 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2900 && zero_val == GET_MODE_BITSIZE (imode)
2901 && INTVAL (trueop1) == exact_log2 (zero_val))
2902 return simplify_gen_relational (EQ, mode, imode,
2903 XEXP (op0, 0), const0_rtx);
2905 goto canonicalize_shift;
2907 case SMIN:
2908 if (width <= HOST_BITS_PER_WIDE_INT
2909 && CONST_INT_P (trueop1)
2910 && UINTVAL (trueop1) == (unsigned HOST_WIDE_INT) 1 << (width -1)
2911 && ! side_effects_p (op0))
2912 return op1;
2913 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2914 return op0;
2915 tem = simplify_associative_operation (code, mode, op0, op1);
2916 if (tem)
2917 return tem;
2918 break;
2920 case SMAX:
2921 if (width <= HOST_BITS_PER_WIDE_INT
2922 && CONST_INT_P (trueop1)
2923 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
2924 && ! side_effects_p (op0))
2925 return op1;
2926 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2927 return op0;
2928 tem = simplify_associative_operation (code, mode, op0, op1);
2929 if (tem)
2930 return tem;
2931 break;
2933 case UMIN:
2934 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2935 return op1;
2936 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2937 return op0;
2938 tem = simplify_associative_operation (code, mode, op0, op1);
2939 if (tem)
2940 return tem;
2941 break;
2943 case UMAX:
2944 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2945 return op1;
2946 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2947 return op0;
2948 tem = simplify_associative_operation (code, mode, op0, op1);
2949 if (tem)
2950 return tem;
2951 break;
2953 case SS_PLUS:
2954 case US_PLUS:
2955 case SS_MINUS:
2956 case US_MINUS:
2957 case SS_MULT:
2958 case US_MULT:
2959 case SS_DIV:
2960 case US_DIV:
2961 /* ??? There are simplifications that can be done. */
2962 return 0;
2964 case VEC_SELECT:
2965 if (!VECTOR_MODE_P (mode))
2967 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2968 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2969 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2970 gcc_assert (XVECLEN (trueop1, 0) == 1);
2971 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
2973 if (GET_CODE (trueop0) == CONST_VECTOR)
2974 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2975 (trueop1, 0, 0)));
2977 /* Extract a scalar element from a nested VEC_SELECT expression
2978 (with optional nested VEC_CONCAT expression). Some targets
2979 (i386) extract scalar element from a vector using chain of
2980 nested VEC_SELECT expressions. When input operand is a memory
2981 operand, this operation can be simplified to a simple scalar
2982 load from an offseted memory address. */
2983 if (GET_CODE (trueop0) == VEC_SELECT)
2985 rtx op0 = XEXP (trueop0, 0);
2986 rtx op1 = XEXP (trueop0, 1);
2988 enum machine_mode opmode = GET_MODE (op0);
2989 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2990 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2992 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2993 int elem;
2995 rtvec vec;
2996 rtx tmp_op, tmp;
2998 gcc_assert (GET_CODE (op1) == PARALLEL);
2999 gcc_assert (i < n_elts);
3001 /* Select element, pointed by nested selector. */
3002 elem = INTVAL (XVECEXP (op1, 0, i));
3004 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3005 if (GET_CODE (op0) == VEC_CONCAT)
3007 rtx op00 = XEXP (op0, 0);
3008 rtx op01 = XEXP (op0, 1);
3010 enum machine_mode mode00, mode01;
3011 int n_elts00, n_elts01;
3013 mode00 = GET_MODE (op00);
3014 mode01 = GET_MODE (op01);
3016 /* Find out number of elements of each operand. */
3017 if (VECTOR_MODE_P (mode00))
3019 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3020 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3022 else
3023 n_elts00 = 1;
3025 if (VECTOR_MODE_P (mode01))
3027 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3028 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3030 else
3031 n_elts01 = 1;
3033 gcc_assert (n_elts == n_elts00 + n_elts01);
3035 /* Select correct operand of VEC_CONCAT
3036 and adjust selector. */
3037 if (elem < n_elts01)
3038 tmp_op = op00;
3039 else
3041 tmp_op = op01;
3042 elem -= n_elts00;
3045 else
3046 tmp_op = op0;
3048 vec = rtvec_alloc (1);
3049 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3051 tmp = gen_rtx_fmt_ee (code, mode,
3052 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3053 return tmp;
3055 if (GET_CODE (trueop0) == VEC_DUPLICATE
3056 && GET_MODE (XEXP (trueop0, 0)) == mode)
3057 return XEXP (trueop0, 0);
3059 else
3061 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3062 gcc_assert (GET_MODE_INNER (mode)
3063 == GET_MODE_INNER (GET_MODE (trueop0)));
3064 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3066 if (GET_CODE (trueop0) == CONST_VECTOR)
3068 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3069 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3070 rtvec v = rtvec_alloc (n_elts);
3071 unsigned int i;
3073 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3074 for (i = 0; i < n_elts; i++)
3076 rtx x = XVECEXP (trueop1, 0, i);
3078 gcc_assert (CONST_INT_P (x));
3079 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3080 INTVAL (x));
3083 return gen_rtx_CONST_VECTOR (mode, v);
3087 if (XVECLEN (trueop1, 0) == 1
3088 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3089 && GET_CODE (trueop0) == VEC_CONCAT)
3091 rtx vec = trueop0;
3092 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3094 /* Try to find the element in the VEC_CONCAT. */
3095 while (GET_MODE (vec) != mode
3096 && GET_CODE (vec) == VEC_CONCAT)
3098 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3099 if (offset < vec_size)
3100 vec = XEXP (vec, 0);
3101 else
3103 offset -= vec_size;
3104 vec = XEXP (vec, 1);
3106 vec = avoid_constant_pool_reference (vec);
3109 if (GET_MODE (vec) == mode)
3110 return vec;
3113 return 0;
3114 case VEC_CONCAT:
3116 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3117 ? GET_MODE (trueop0)
3118 : GET_MODE_INNER (mode));
3119 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3120 ? GET_MODE (trueop1)
3121 : GET_MODE_INNER (mode));
3123 gcc_assert (VECTOR_MODE_P (mode));
3124 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3125 == GET_MODE_SIZE (mode));
3127 if (VECTOR_MODE_P (op0_mode))
3128 gcc_assert (GET_MODE_INNER (mode)
3129 == GET_MODE_INNER (op0_mode));
3130 else
3131 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3133 if (VECTOR_MODE_P (op1_mode))
3134 gcc_assert (GET_MODE_INNER (mode)
3135 == GET_MODE_INNER (op1_mode));
3136 else
3137 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3139 if ((GET_CODE (trueop0) == CONST_VECTOR
3140 || CONST_INT_P (trueop0)
3141 || GET_CODE (trueop0) == CONST_DOUBLE)
3142 && (GET_CODE (trueop1) == CONST_VECTOR
3143 || CONST_INT_P (trueop1)
3144 || GET_CODE (trueop1) == CONST_DOUBLE))
3146 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3147 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3148 rtvec v = rtvec_alloc (n_elts);
3149 unsigned int i;
3150 unsigned in_n_elts = 1;
3152 if (VECTOR_MODE_P (op0_mode))
3153 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3154 for (i = 0; i < n_elts; i++)
3156 if (i < in_n_elts)
3158 if (!VECTOR_MODE_P (op0_mode))
3159 RTVEC_ELT (v, i) = trueop0;
3160 else
3161 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3163 else
3165 if (!VECTOR_MODE_P (op1_mode))
3166 RTVEC_ELT (v, i) = trueop1;
3167 else
3168 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3169 i - in_n_elts);
3173 return gen_rtx_CONST_VECTOR (mode, v);
3176 return 0;
3178 default:
3179 gcc_unreachable ();
3182 return 0;
3186 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3187 rtx op0, rtx op1)
3189 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3190 HOST_WIDE_INT val;
3191 unsigned int width = GET_MODE_BITSIZE (mode);
3193 if (VECTOR_MODE_P (mode)
3194 && code != VEC_CONCAT
3195 && GET_CODE (op0) == CONST_VECTOR
3196 && GET_CODE (op1) == CONST_VECTOR)
3198 unsigned n_elts = GET_MODE_NUNITS (mode);
3199 enum machine_mode op0mode = GET_MODE (op0);
3200 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3201 enum machine_mode op1mode = GET_MODE (op1);
3202 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3203 rtvec v = rtvec_alloc (n_elts);
3204 unsigned int i;
3206 gcc_assert (op0_n_elts == n_elts);
3207 gcc_assert (op1_n_elts == n_elts);
3208 for (i = 0; i < n_elts; i++)
3210 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3211 CONST_VECTOR_ELT (op0, i),
3212 CONST_VECTOR_ELT (op1, i));
3213 if (!x)
3214 return 0;
3215 RTVEC_ELT (v, i) = x;
3218 return gen_rtx_CONST_VECTOR (mode, v);
3221 if (VECTOR_MODE_P (mode)
3222 && code == VEC_CONCAT
3223 && (CONST_INT_P (op0)
3224 || GET_CODE (op0) == CONST_DOUBLE
3225 || GET_CODE (op0) == CONST_FIXED)
3226 && (CONST_INT_P (op1)
3227 || GET_CODE (op1) == CONST_DOUBLE
3228 || GET_CODE (op1) == CONST_FIXED))
3230 unsigned n_elts = GET_MODE_NUNITS (mode);
3231 rtvec v = rtvec_alloc (n_elts);
3233 gcc_assert (n_elts >= 2);
3234 if (n_elts == 2)
3236 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3237 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3239 RTVEC_ELT (v, 0) = op0;
3240 RTVEC_ELT (v, 1) = op1;
3242 else
3244 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3245 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3246 unsigned i;
3248 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3249 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3250 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3252 for (i = 0; i < op0_n_elts; ++i)
3253 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3254 for (i = 0; i < op1_n_elts; ++i)
3255 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3258 return gen_rtx_CONST_VECTOR (mode, v);
3261 if (SCALAR_FLOAT_MODE_P (mode)
3262 && GET_CODE (op0) == CONST_DOUBLE
3263 && GET_CODE (op1) == CONST_DOUBLE
3264 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3266 if (code == AND
3267 || code == IOR
3268 || code == XOR)
3270 long tmp0[4];
3271 long tmp1[4];
3272 REAL_VALUE_TYPE r;
3273 int i;
3275 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3276 GET_MODE (op0));
3277 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3278 GET_MODE (op1));
3279 for (i = 0; i < 4; i++)
3281 switch (code)
3283 case AND:
3284 tmp0[i] &= tmp1[i];
3285 break;
3286 case IOR:
3287 tmp0[i] |= tmp1[i];
3288 break;
3289 case XOR:
3290 tmp0[i] ^= tmp1[i];
3291 break;
3292 default:
3293 gcc_unreachable ();
3296 real_from_target (&r, tmp0, mode);
3297 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3299 else
3301 REAL_VALUE_TYPE f0, f1, value, result;
3302 bool inexact;
3304 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3305 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3306 real_convert (&f0, mode, &f0);
3307 real_convert (&f1, mode, &f1);
3309 if (HONOR_SNANS (mode)
3310 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3311 return 0;
3313 if (code == DIV
3314 && REAL_VALUES_EQUAL (f1, dconst0)
3315 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3316 return 0;
3318 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3319 && flag_trapping_math
3320 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3322 int s0 = REAL_VALUE_NEGATIVE (f0);
3323 int s1 = REAL_VALUE_NEGATIVE (f1);
3325 switch (code)
3327 case PLUS:
3328 /* Inf + -Inf = NaN plus exception. */
3329 if (s0 != s1)
3330 return 0;
3331 break;
3332 case MINUS:
3333 /* Inf - Inf = NaN plus exception. */
3334 if (s0 == s1)
3335 return 0;
3336 break;
3337 case DIV:
3338 /* Inf / Inf = NaN plus exception. */
3339 return 0;
3340 default:
3341 break;
3345 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3346 && flag_trapping_math
3347 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3348 || (REAL_VALUE_ISINF (f1)
3349 && REAL_VALUES_EQUAL (f0, dconst0))))
3350 /* Inf * 0 = NaN plus exception. */
3351 return 0;
3353 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3354 &f0, &f1);
3355 real_convert (&result, mode, &value);
3357 /* Don't constant fold this floating point operation if
3358 the result has overflowed and flag_trapping_math. */
3360 if (flag_trapping_math
3361 && MODE_HAS_INFINITIES (mode)
3362 && REAL_VALUE_ISINF (result)
3363 && !REAL_VALUE_ISINF (f0)
3364 && !REAL_VALUE_ISINF (f1))
3365 /* Overflow plus exception. */
3366 return 0;
3368 /* Don't constant fold this floating point operation if the
3369 result may dependent upon the run-time rounding mode and
3370 flag_rounding_math is set, or if GCC's software emulation
3371 is unable to accurately represent the result. */
3373 if ((flag_rounding_math
3374 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3375 && (inexact || !real_identical (&result, &value)))
3376 return NULL_RTX;
3378 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3382 /* We can fold some multi-word operations. */
3383 if (GET_MODE_CLASS (mode) == MODE_INT
3384 && width == HOST_BITS_PER_DOUBLE_INT
3385 && (CONST_DOUBLE_P (op0) || CONST_INT_P (op0))
3386 && (CONST_DOUBLE_P (op1) || CONST_INT_P (op1)))
3388 double_int o0, o1, res, tmp;
3390 o0 = rtx_to_double_int (op0);
3391 o1 = rtx_to_double_int (op1);
3393 switch (code)
3395 case MINUS:
3396 /* A - B == A + (-B). */
3397 o1 = double_int_neg (o1);
3399 /* Fall through.... */
3401 case PLUS:
3402 res = double_int_add (o0, o1);
3403 break;
3405 case MULT:
3406 res = double_int_mul (o0, o1);
3407 break;
3409 case DIV:
3410 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3411 o0.low, o0.high, o1.low, o1.high,
3412 &res.low, &res.high,
3413 &tmp.low, &tmp.high))
3414 return 0;
3415 break;
3417 case MOD:
3418 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3419 o0.low, o0.high, o1.low, o1.high,
3420 &tmp.low, &tmp.high,
3421 &res.low, &res.high))
3422 return 0;
3423 break;
3425 case UDIV:
3426 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3427 o0.low, o0.high, o1.low, o1.high,
3428 &res.low, &res.high,
3429 &tmp.low, &tmp.high))
3430 return 0;
3431 break;
3433 case UMOD:
3434 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3435 o0.low, o0.high, o1.low, o1.high,
3436 &tmp.low, &tmp.high,
3437 &res.low, &res.high))
3438 return 0;
3439 break;
3441 case AND:
3442 res = double_int_and (o0, o1);
3443 break;
3445 case IOR:
3446 res = double_int_ior (o0, o1);
3447 break;
3449 case XOR:
3450 res = double_int_xor (o0, o1);
3451 break;
3453 case SMIN:
3454 res = double_int_smin (o0, o1);
3455 break;
3457 case SMAX:
3458 res = double_int_smax (o0, o1);
3459 break;
3461 case UMIN:
3462 res = double_int_umin (o0, o1);
3463 break;
3465 case UMAX:
3466 res = double_int_umax (o0, o1);
3467 break;
3469 case LSHIFTRT: case ASHIFTRT:
3470 case ASHIFT:
3471 case ROTATE: case ROTATERT:
3473 unsigned HOST_WIDE_INT cnt;
3475 if (SHIFT_COUNT_TRUNCATED)
3476 o1 = double_int_zext (o1, GET_MODE_BITSIZE (mode));
3478 if (!double_int_fits_in_uhwi_p (o1)
3479 || double_int_to_uhwi (o1) >= GET_MODE_BITSIZE (mode))
3480 return 0;
3482 cnt = double_int_to_uhwi (o1);
3484 if (code == LSHIFTRT || code == ASHIFTRT)
3485 res = double_int_rshift (o0, cnt, GET_MODE_BITSIZE (mode),
3486 code == ASHIFTRT);
3487 else if (code == ASHIFT)
3488 res = double_int_lshift (o0, cnt, GET_MODE_BITSIZE (mode),
3489 true);
3490 else if (code == ROTATE)
3491 res = double_int_lrotate (o0, cnt, GET_MODE_BITSIZE (mode));
3492 else /* code == ROTATERT */
3493 res = double_int_rrotate (o0, cnt, GET_MODE_BITSIZE (mode));
3495 break;
3497 default:
3498 return 0;
3501 return immed_double_int_const (res, mode);
3504 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3505 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3507 /* Get the integer argument values in two forms:
3508 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3510 arg0 = INTVAL (op0);
3511 arg1 = INTVAL (op1);
3513 if (width < HOST_BITS_PER_WIDE_INT)
3515 arg0 &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
3516 arg1 &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
3518 arg0s = arg0;
3519 if (arg0s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
3520 arg0s |= ((unsigned HOST_WIDE_INT) (-1) << width);
3522 arg1s = arg1;
3523 if (arg1s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
3524 arg1s |= ((unsigned HOST_WIDE_INT) (-1) << width);
3526 else
3528 arg0s = arg0;
3529 arg1s = arg1;
3532 /* Compute the value of the arithmetic. */
3534 switch (code)
3536 case PLUS:
3537 val = arg0s + arg1s;
3538 break;
3540 case MINUS:
3541 val = arg0s - arg1s;
3542 break;
3544 case MULT:
3545 val = arg0s * arg1s;
3546 break;
3548 case DIV:
3549 if (arg1s == 0
3550 || ((unsigned HOST_WIDE_INT) arg0s
3551 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3552 && arg1s == -1))
3553 return 0;
3554 val = arg0s / arg1s;
3555 break;
3557 case MOD:
3558 if (arg1s == 0
3559 || ((unsigned HOST_WIDE_INT) arg0s
3560 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3561 && arg1s == -1))
3562 return 0;
3563 val = arg0s % arg1s;
3564 break;
3566 case UDIV:
3567 if (arg1 == 0
3568 || ((unsigned HOST_WIDE_INT) arg0s
3569 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3570 && arg1s == -1))
3571 return 0;
3572 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3573 break;
3575 case UMOD:
3576 if (arg1 == 0
3577 || ((unsigned HOST_WIDE_INT) arg0s
3578 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3579 && arg1s == -1))
3580 return 0;
3581 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3582 break;
3584 case AND:
3585 val = arg0 & arg1;
3586 break;
3588 case IOR:
3589 val = arg0 | arg1;
3590 break;
3592 case XOR:
3593 val = arg0 ^ arg1;
3594 break;
3596 case LSHIFTRT:
3597 case ASHIFT:
3598 case ASHIFTRT:
3599 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3600 the value is in range. We can't return any old value for
3601 out-of-range arguments because either the middle-end (via
3602 shift_truncation_mask) or the back-end might be relying on
3603 target-specific knowledge. Nor can we rely on
3604 shift_truncation_mask, since the shift might not be part of an
3605 ashlM3, lshrM3 or ashrM3 instruction. */
3606 if (SHIFT_COUNT_TRUNCATED)
3607 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3608 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3609 return 0;
3611 val = (code == ASHIFT
3612 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3613 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3615 /* Sign-extend the result for arithmetic right shifts. */
3616 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3617 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
3618 break;
3620 case ROTATERT:
3621 if (arg1 < 0)
3622 return 0;
3624 arg1 %= width;
3625 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3626 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3627 break;
3629 case ROTATE:
3630 if (arg1 < 0)
3631 return 0;
3633 arg1 %= width;
3634 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3635 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3636 break;
3638 case COMPARE:
3639 /* Do nothing here. */
3640 return 0;
3642 case SMIN:
3643 val = arg0s <= arg1s ? arg0s : arg1s;
3644 break;
3646 case UMIN:
3647 val = ((unsigned HOST_WIDE_INT) arg0
3648 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3649 break;
3651 case SMAX:
3652 val = arg0s > arg1s ? arg0s : arg1s;
3653 break;
3655 case UMAX:
3656 val = ((unsigned HOST_WIDE_INT) arg0
3657 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3658 break;
3660 case SS_PLUS:
3661 case US_PLUS:
3662 case SS_MINUS:
3663 case US_MINUS:
3664 case SS_MULT:
3665 case US_MULT:
3666 case SS_DIV:
3667 case US_DIV:
3668 case SS_ASHIFT:
3669 case US_ASHIFT:
3670 /* ??? There are simplifications that can be done. */
3671 return 0;
3673 default:
3674 gcc_unreachable ();
3677 return gen_int_mode (val, mode);
3680 return NULL_RTX;
3685 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3686 PLUS or MINUS.
3688 Rather than test for specific case, we do this by a brute-force method
3689 and do all possible simplifications until no more changes occur. Then
3690 we rebuild the operation. */
3692 struct simplify_plus_minus_op_data
3694 rtx op;
3695 short neg;
3698 static bool
3699 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3701 int result;
3703 result = (commutative_operand_precedence (y)
3704 - commutative_operand_precedence (x));
3705 if (result)
3706 return result > 0;
3708 /* Group together equal REGs to do more simplification. */
3709 if (REG_P (x) && REG_P (y))
3710 return REGNO (x) > REGNO (y);
3711 else
3712 return false;
3715 static rtx
3716 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3717 rtx op1)
3719 struct simplify_plus_minus_op_data ops[8];
3720 rtx result, tem;
3721 int n_ops = 2, input_ops = 2;
3722 int changed, n_constants = 0, canonicalized = 0;
3723 int i, j;
3725 memset (ops, 0, sizeof ops);
3727 /* Set up the two operands and then expand them until nothing has been
3728 changed. If we run out of room in our array, give up; this should
3729 almost never happen. */
3731 ops[0].op = op0;
3732 ops[0].neg = 0;
3733 ops[1].op = op1;
3734 ops[1].neg = (code == MINUS);
3738 changed = 0;
3740 for (i = 0; i < n_ops; i++)
3742 rtx this_op = ops[i].op;
3743 int this_neg = ops[i].neg;
3744 enum rtx_code this_code = GET_CODE (this_op);
3746 switch (this_code)
3748 case PLUS:
3749 case MINUS:
3750 if (n_ops == 7)
3751 return NULL_RTX;
3753 ops[n_ops].op = XEXP (this_op, 1);
3754 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3755 n_ops++;
3757 ops[i].op = XEXP (this_op, 0);
3758 input_ops++;
3759 changed = 1;
3760 canonicalized |= this_neg;
3761 break;
3763 case NEG:
3764 ops[i].op = XEXP (this_op, 0);
3765 ops[i].neg = ! this_neg;
3766 changed = 1;
3767 canonicalized = 1;
3768 break;
3770 case CONST:
3771 if (n_ops < 7
3772 && GET_CODE (XEXP (this_op, 0)) == PLUS
3773 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3774 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3776 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3777 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3778 ops[n_ops].neg = this_neg;
3779 n_ops++;
3780 changed = 1;
3781 canonicalized = 1;
3783 break;
3785 case NOT:
3786 /* ~a -> (-a - 1) */
3787 if (n_ops != 7)
3789 ops[n_ops].op = constm1_rtx;
3790 ops[n_ops++].neg = this_neg;
3791 ops[i].op = XEXP (this_op, 0);
3792 ops[i].neg = !this_neg;
3793 changed = 1;
3794 canonicalized = 1;
3796 break;
3798 case CONST_INT:
3799 n_constants++;
3800 if (this_neg)
3802 ops[i].op = neg_const_int (mode, this_op);
3803 ops[i].neg = 0;
3804 changed = 1;
3805 canonicalized = 1;
3807 break;
3809 default:
3810 break;
3814 while (changed);
3816 if (n_constants > 1)
3817 canonicalized = 1;
3819 gcc_assert (n_ops >= 2);
3821 /* If we only have two operands, we can avoid the loops. */
3822 if (n_ops == 2)
3824 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3825 rtx lhs, rhs;
3827 /* Get the two operands. Be careful with the order, especially for
3828 the cases where code == MINUS. */
3829 if (ops[0].neg && ops[1].neg)
3831 lhs = gen_rtx_NEG (mode, ops[0].op);
3832 rhs = ops[1].op;
3834 else if (ops[0].neg)
3836 lhs = ops[1].op;
3837 rhs = ops[0].op;
3839 else
3841 lhs = ops[0].op;
3842 rhs = ops[1].op;
3845 return simplify_const_binary_operation (code, mode, lhs, rhs);
3848 /* Now simplify each pair of operands until nothing changes. */
3851 /* Insertion sort is good enough for an eight-element array. */
3852 for (i = 1; i < n_ops; i++)
3854 struct simplify_plus_minus_op_data save;
3855 j = i - 1;
3856 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3857 continue;
3859 canonicalized = 1;
3860 save = ops[i];
3862 ops[j + 1] = ops[j];
3863 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3864 ops[j + 1] = save;
3867 changed = 0;
3868 for (i = n_ops - 1; i > 0; i--)
3869 for (j = i - 1; j >= 0; j--)
3871 rtx lhs = ops[j].op, rhs = ops[i].op;
3872 int lneg = ops[j].neg, rneg = ops[i].neg;
3874 if (lhs != 0 && rhs != 0)
3876 enum rtx_code ncode = PLUS;
3878 if (lneg != rneg)
3880 ncode = MINUS;
3881 if (lneg)
3882 tem = lhs, lhs = rhs, rhs = tem;
3884 else if (swap_commutative_operands_p (lhs, rhs))
3885 tem = lhs, lhs = rhs, rhs = tem;
3887 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
3888 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
3890 rtx tem_lhs, tem_rhs;
3892 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3893 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3894 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3896 if (tem && !CONSTANT_P (tem))
3897 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3899 else
3900 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3902 /* Reject "simplifications" that just wrap the two
3903 arguments in a CONST. Failure to do so can result
3904 in infinite recursion with simplify_binary_operation
3905 when it calls us to simplify CONST operations. */
3906 if (tem
3907 && ! (GET_CODE (tem) == CONST
3908 && GET_CODE (XEXP (tem, 0)) == ncode
3909 && XEXP (XEXP (tem, 0), 0) == lhs
3910 && XEXP (XEXP (tem, 0), 1) == rhs))
3912 lneg &= rneg;
3913 if (GET_CODE (tem) == NEG)
3914 tem = XEXP (tem, 0), lneg = !lneg;
3915 if (CONST_INT_P (tem) && lneg)
3916 tem = neg_const_int (mode, tem), lneg = 0;
3918 ops[i].op = tem;
3919 ops[i].neg = lneg;
3920 ops[j].op = NULL_RTX;
3921 changed = 1;
3922 canonicalized = 1;
3927 /* If nothing changed, fail. */
3928 if (!canonicalized)
3929 return NULL_RTX;
3931 /* Pack all the operands to the lower-numbered entries. */
3932 for (i = 0, j = 0; j < n_ops; j++)
3933 if (ops[j].op)
3935 ops[i] = ops[j];
3936 i++;
3938 n_ops = i;
3940 while (changed);
3942 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3943 if (n_ops == 2
3944 && CONST_INT_P (ops[1].op)
3945 && CONSTANT_P (ops[0].op)
3946 && ops[0].neg)
3947 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3949 /* We suppressed creation of trivial CONST expressions in the
3950 combination loop to avoid recursion. Create one manually now.
3951 The combination loop should have ensured that there is exactly
3952 one CONST_INT, and the sort will have ensured that it is last
3953 in the array and that any other constant will be next-to-last. */
3955 if (n_ops > 1
3956 && CONST_INT_P (ops[n_ops - 1].op)
3957 && CONSTANT_P (ops[n_ops - 2].op))
3959 rtx value = ops[n_ops - 1].op;
3960 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3961 value = neg_const_int (mode, value);
3962 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3963 n_ops--;
3966 /* Put a non-negated operand first, if possible. */
3968 for (i = 0; i < n_ops && ops[i].neg; i++)
3969 continue;
3970 if (i == n_ops)
3971 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3972 else if (i != 0)
3974 tem = ops[0].op;
3975 ops[0] = ops[i];
3976 ops[i].op = tem;
3977 ops[i].neg = 1;
3980 /* Now make the result by performing the requested operations. */
3981 result = ops[0].op;
3982 for (i = 1; i < n_ops; i++)
3983 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3984 mode, result, ops[i].op);
3986 return result;
3989 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3990 static bool
3991 plus_minus_operand_p (const_rtx x)
3993 return GET_CODE (x) == PLUS
3994 || GET_CODE (x) == MINUS
3995 || (GET_CODE (x) == CONST
3996 && GET_CODE (XEXP (x, 0)) == PLUS
3997 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3998 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4001 /* Like simplify_binary_operation except used for relational operators.
4002 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4003 not also be VOIDmode.
4005 CMP_MODE specifies in which mode the comparison is done in, so it is
4006 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4007 the operands or, if both are VOIDmode, the operands are compared in
4008 "infinite precision". */
4010 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4011 enum machine_mode cmp_mode, rtx op0, rtx op1)
4013 rtx tem, trueop0, trueop1;
4015 if (cmp_mode == VOIDmode)
4016 cmp_mode = GET_MODE (op0);
4017 if (cmp_mode == VOIDmode)
4018 cmp_mode = GET_MODE (op1);
4020 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4021 if (tem)
4023 if (SCALAR_FLOAT_MODE_P (mode))
4025 if (tem == const0_rtx)
4026 return CONST0_RTX (mode);
4027 #ifdef FLOAT_STORE_FLAG_VALUE
4029 REAL_VALUE_TYPE val;
4030 val = FLOAT_STORE_FLAG_VALUE (mode);
4031 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4033 #else
4034 return NULL_RTX;
4035 #endif
4037 if (VECTOR_MODE_P (mode))
4039 if (tem == const0_rtx)
4040 return CONST0_RTX (mode);
4041 #ifdef VECTOR_STORE_FLAG_VALUE
4043 int i, units;
4044 rtvec v;
4046 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4047 if (val == NULL_RTX)
4048 return NULL_RTX;
4049 if (val == const1_rtx)
4050 return CONST1_RTX (mode);
4052 units = GET_MODE_NUNITS (mode);
4053 v = rtvec_alloc (units);
4054 for (i = 0; i < units; i++)
4055 RTVEC_ELT (v, i) = val;
4056 return gen_rtx_raw_CONST_VECTOR (mode, v);
4058 #else
4059 return NULL_RTX;
4060 #endif
4063 return tem;
4066 /* For the following tests, ensure const0_rtx is op1. */
4067 if (swap_commutative_operands_p (op0, op1)
4068 || (op0 == const0_rtx && op1 != const0_rtx))
4069 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4071 /* If op0 is a compare, extract the comparison arguments from it. */
4072 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4073 return simplify_gen_relational (code, mode, VOIDmode,
4074 XEXP (op0, 0), XEXP (op0, 1));
4076 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4077 || CC0_P (op0))
4078 return NULL_RTX;
4080 trueop0 = avoid_constant_pool_reference (op0);
4081 trueop1 = avoid_constant_pool_reference (op1);
4082 return simplify_relational_operation_1 (code, mode, cmp_mode,
4083 trueop0, trueop1);
4086 /* This part of simplify_relational_operation is only used when CMP_MODE
4087 is not in class MODE_CC (i.e. it is a real comparison).
4089 MODE is the mode of the result, while CMP_MODE specifies in which
4090 mode the comparison is done in, so it is the mode of the operands. */
4092 static rtx
4093 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4094 enum machine_mode cmp_mode, rtx op0, rtx op1)
4096 enum rtx_code op0code = GET_CODE (op0);
4098 if (op1 == const0_rtx && COMPARISON_P (op0))
4100 /* If op0 is a comparison, extract the comparison arguments
4101 from it. */
4102 if (code == NE)
4104 if (GET_MODE (op0) == mode)
4105 return simplify_rtx (op0);
4106 else
4107 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4108 XEXP (op0, 0), XEXP (op0, 1));
4110 else if (code == EQ)
4112 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4113 if (new_code != UNKNOWN)
4114 return simplify_gen_relational (new_code, mode, VOIDmode,
4115 XEXP (op0, 0), XEXP (op0, 1));
4119 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4120 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4121 if ((code == LTU || code == GEU)
4122 && GET_CODE (op0) == PLUS
4123 && CONST_INT_P (XEXP (op0, 1))
4124 && (rtx_equal_p (op1, XEXP (op0, 0))
4125 || rtx_equal_p (op1, XEXP (op0, 1))))
4127 rtx new_cmp
4128 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4129 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4130 cmp_mode, XEXP (op0, 0), new_cmp);
4133 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4134 if ((code == LTU || code == GEU)
4135 && GET_CODE (op0) == PLUS
4136 && rtx_equal_p (op1, XEXP (op0, 1))
4137 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4138 && !rtx_equal_p (op1, XEXP (op0, 0)))
4139 return simplify_gen_relational (code, mode, cmp_mode, op0,
4140 copy_rtx (XEXP (op0, 0)));
4142 if (op1 == const0_rtx)
4144 /* Canonicalize (GTU x 0) as (NE x 0). */
4145 if (code == GTU)
4146 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4147 /* Canonicalize (LEU x 0) as (EQ x 0). */
4148 if (code == LEU)
4149 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4151 else if (op1 == const1_rtx)
4153 switch (code)
4155 case GE:
4156 /* Canonicalize (GE x 1) as (GT x 0). */
4157 return simplify_gen_relational (GT, mode, cmp_mode,
4158 op0, const0_rtx);
4159 case GEU:
4160 /* Canonicalize (GEU x 1) as (NE x 0). */
4161 return simplify_gen_relational (NE, mode, cmp_mode,
4162 op0, const0_rtx);
4163 case LT:
4164 /* Canonicalize (LT x 1) as (LE x 0). */
4165 return simplify_gen_relational (LE, mode, cmp_mode,
4166 op0, const0_rtx);
4167 case LTU:
4168 /* Canonicalize (LTU x 1) as (EQ x 0). */
4169 return simplify_gen_relational (EQ, mode, cmp_mode,
4170 op0, const0_rtx);
4171 default:
4172 break;
4175 else if (op1 == constm1_rtx)
4177 /* Canonicalize (LE x -1) as (LT x 0). */
4178 if (code == LE)
4179 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4180 /* Canonicalize (GT x -1) as (GE x 0). */
4181 if (code == GT)
4182 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4185 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4186 if ((code == EQ || code == NE)
4187 && (op0code == PLUS || op0code == MINUS)
4188 && CONSTANT_P (op1)
4189 && CONSTANT_P (XEXP (op0, 1))
4190 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4192 rtx x = XEXP (op0, 0);
4193 rtx c = XEXP (op0, 1);
4195 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
4196 cmp_mode, op1, c);
4197 return simplify_gen_relational (code, mode, cmp_mode, x, c);
4200 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4201 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4202 if (code == NE
4203 && op1 == const0_rtx
4204 && GET_MODE_CLASS (mode) == MODE_INT
4205 && cmp_mode != VOIDmode
4206 /* ??? Work-around BImode bugs in the ia64 backend. */
4207 && mode != BImode
4208 && cmp_mode != BImode
4209 && nonzero_bits (op0, cmp_mode) == 1
4210 && STORE_FLAG_VALUE == 1)
4211 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4212 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4213 : lowpart_subreg (mode, op0, cmp_mode);
4215 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4216 if ((code == EQ || code == NE)
4217 && op1 == const0_rtx
4218 && op0code == XOR)
4219 return simplify_gen_relational (code, mode, cmp_mode,
4220 XEXP (op0, 0), XEXP (op0, 1));
4222 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4223 if ((code == EQ || code == NE)
4224 && op0code == XOR
4225 && rtx_equal_p (XEXP (op0, 0), op1)
4226 && !side_effects_p (XEXP (op0, 0)))
4227 return simplify_gen_relational (code, mode, cmp_mode,
4228 XEXP (op0, 1), const0_rtx);
4230 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4231 if ((code == EQ || code == NE)
4232 && op0code == XOR
4233 && rtx_equal_p (XEXP (op0, 1), op1)
4234 && !side_effects_p (XEXP (op0, 1)))
4235 return simplify_gen_relational (code, mode, cmp_mode,
4236 XEXP (op0, 0), const0_rtx);
4238 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4239 if ((code == EQ || code == NE)
4240 && op0code == XOR
4241 && (CONST_INT_P (op1)
4242 || GET_CODE (op1) == CONST_DOUBLE)
4243 && (CONST_INT_P (XEXP (op0, 1))
4244 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4245 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4246 simplify_gen_binary (XOR, cmp_mode,
4247 XEXP (op0, 1), op1));
4249 if (op0code == POPCOUNT && op1 == const0_rtx)
4250 switch (code)
4252 case EQ:
4253 case LE:
4254 case LEU:
4255 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4256 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4257 XEXP (op0, 0), const0_rtx);
4259 case NE:
4260 case GT:
4261 case GTU:
4262 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4263 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4264 XEXP (op0, 0), const0_rtx);
4266 default:
4267 break;
4270 return NULL_RTX;
4273 enum
4275 CMP_EQ = 1,
4276 CMP_LT = 2,
4277 CMP_GT = 4,
4278 CMP_LTU = 8,
4279 CMP_GTU = 16
4283 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4284 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4285 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4286 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4287 For floating-point comparisons, assume that the operands were ordered. */
4289 static rtx
4290 comparison_result (enum rtx_code code, int known_results)
4292 switch (code)
4294 case EQ:
4295 case UNEQ:
4296 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4297 case NE:
4298 case LTGT:
4299 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4301 case LT:
4302 case UNLT:
4303 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4304 case GE:
4305 case UNGE:
4306 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4308 case GT:
4309 case UNGT:
4310 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4311 case LE:
4312 case UNLE:
4313 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4315 case LTU:
4316 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4317 case GEU:
4318 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4320 case GTU:
4321 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4322 case LEU:
4323 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4325 case ORDERED:
4326 return const_true_rtx;
4327 case UNORDERED:
4328 return const0_rtx;
4329 default:
4330 gcc_unreachable ();
4334 /* Check if the given comparison (done in the given MODE) is actually a
4335 tautology or a contradiction.
4336 If no simplification is possible, this function returns zero.
4337 Otherwise, it returns either const_true_rtx or const0_rtx. */
4340 simplify_const_relational_operation (enum rtx_code code,
4341 enum machine_mode mode,
4342 rtx op0, rtx op1)
4344 rtx tem;
4345 rtx trueop0;
4346 rtx trueop1;
4348 gcc_assert (mode != VOIDmode
4349 || (GET_MODE (op0) == VOIDmode
4350 && GET_MODE (op1) == VOIDmode));
4352 /* If op0 is a compare, extract the comparison arguments from it. */
4353 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4355 op1 = XEXP (op0, 1);
4356 op0 = XEXP (op0, 0);
4358 if (GET_MODE (op0) != VOIDmode)
4359 mode = GET_MODE (op0);
4360 else if (GET_MODE (op1) != VOIDmode)
4361 mode = GET_MODE (op1);
4362 else
4363 return 0;
4366 /* We can't simplify MODE_CC values since we don't know what the
4367 actual comparison is. */
4368 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4369 return 0;
4371 /* Make sure the constant is second. */
4372 if (swap_commutative_operands_p (op0, op1))
4374 tem = op0, op0 = op1, op1 = tem;
4375 code = swap_condition (code);
4378 trueop0 = avoid_constant_pool_reference (op0);
4379 trueop1 = avoid_constant_pool_reference (op1);
4381 /* For integer comparisons of A and B maybe we can simplify A - B and can
4382 then simplify a comparison of that with zero. If A and B are both either
4383 a register or a CONST_INT, this can't help; testing for these cases will
4384 prevent infinite recursion here and speed things up.
4386 We can only do this for EQ and NE comparisons as otherwise we may
4387 lose or introduce overflow which we cannot disregard as undefined as
4388 we do not know the signedness of the operation on either the left or
4389 the right hand side of the comparison. */
4391 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4392 && (code == EQ || code == NE)
4393 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4394 && (REG_P (op1) || CONST_INT_P (trueop1)))
4395 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4396 /* We cannot do this if tem is a nonzero address. */
4397 && ! nonzero_address_p (tem))
4398 return simplify_const_relational_operation (signed_condition (code),
4399 mode, tem, const0_rtx);
4401 if (! HONOR_NANS (mode) && code == ORDERED)
4402 return const_true_rtx;
4404 if (! HONOR_NANS (mode) && code == UNORDERED)
4405 return const0_rtx;
4407 /* For modes without NaNs, if the two operands are equal, we know the
4408 result except if they have side-effects. Even with NaNs we know
4409 the result of unordered comparisons and, if signaling NaNs are
4410 irrelevant, also the result of LT/GT/LTGT. */
4411 if ((! HONOR_NANS (GET_MODE (trueop0))
4412 || code == UNEQ || code == UNLE || code == UNGE
4413 || ((code == LT || code == GT || code == LTGT)
4414 && ! HONOR_SNANS (GET_MODE (trueop0))))
4415 && rtx_equal_p (trueop0, trueop1)
4416 && ! side_effects_p (trueop0))
4417 return comparison_result (code, CMP_EQ);
4419 /* If the operands are floating-point constants, see if we can fold
4420 the result. */
4421 if (GET_CODE (trueop0) == CONST_DOUBLE
4422 && GET_CODE (trueop1) == CONST_DOUBLE
4423 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4425 REAL_VALUE_TYPE d0, d1;
4427 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4428 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4430 /* Comparisons are unordered iff at least one of the values is NaN. */
4431 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4432 switch (code)
4434 case UNEQ:
4435 case UNLT:
4436 case UNGT:
4437 case UNLE:
4438 case UNGE:
4439 case NE:
4440 case UNORDERED:
4441 return const_true_rtx;
4442 case EQ:
4443 case LT:
4444 case GT:
4445 case LE:
4446 case GE:
4447 case LTGT:
4448 case ORDERED:
4449 return const0_rtx;
4450 default:
4451 return 0;
4454 return comparison_result (code,
4455 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4456 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4459 /* Otherwise, see if the operands are both integers. */
4460 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4461 && (GET_CODE (trueop0) == CONST_DOUBLE
4462 || CONST_INT_P (trueop0))
4463 && (GET_CODE (trueop1) == CONST_DOUBLE
4464 || CONST_INT_P (trueop1)))
4466 int width = GET_MODE_BITSIZE (mode);
4467 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4468 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4470 /* Get the two words comprising each integer constant. */
4471 if (GET_CODE (trueop0) == CONST_DOUBLE)
4473 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4474 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4476 else
4478 l0u = l0s = INTVAL (trueop0);
4479 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4482 if (GET_CODE (trueop1) == CONST_DOUBLE)
4484 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4485 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4487 else
4489 l1u = l1s = INTVAL (trueop1);
4490 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4493 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4494 we have to sign or zero-extend the values. */
4495 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4497 l0u &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4498 l1u &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4500 if (l0s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
4501 l0s |= ((unsigned HOST_WIDE_INT) (-1) << width);
4503 if (l1s & ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
4504 l1s |= ((unsigned HOST_WIDE_INT) (-1) << width);
4506 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4507 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4509 if (h0u == h1u && l0u == l1u)
4510 return comparison_result (code, CMP_EQ);
4511 else
4513 int cr;
4514 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4515 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4516 return comparison_result (code, cr);
4520 /* Optimize comparisons with upper and lower bounds. */
4521 if (SCALAR_INT_MODE_P (mode)
4522 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4523 && CONST_INT_P (trueop1))
4525 int sign;
4526 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4527 HOST_WIDE_INT val = INTVAL (trueop1);
4528 HOST_WIDE_INT mmin, mmax;
4530 if (code == GEU
4531 || code == LEU
4532 || code == GTU
4533 || code == LTU)
4534 sign = 0;
4535 else
4536 sign = 1;
4538 /* Get a reduced range if the sign bit is zero. */
4539 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4541 mmin = 0;
4542 mmax = nonzero;
4544 else
4546 rtx mmin_rtx, mmax_rtx;
4547 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4549 mmin = INTVAL (mmin_rtx);
4550 mmax = INTVAL (mmax_rtx);
4551 if (sign)
4553 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4555 mmin >>= (sign_copies - 1);
4556 mmax >>= (sign_copies - 1);
4560 switch (code)
4562 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4563 case GEU:
4564 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4565 return const_true_rtx;
4566 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4567 return const0_rtx;
4568 break;
4569 case GE:
4570 if (val <= mmin)
4571 return const_true_rtx;
4572 if (val > mmax)
4573 return const0_rtx;
4574 break;
4576 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4577 case LEU:
4578 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4579 return const_true_rtx;
4580 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4581 return const0_rtx;
4582 break;
4583 case LE:
4584 if (val >= mmax)
4585 return const_true_rtx;
4586 if (val < mmin)
4587 return const0_rtx;
4588 break;
4590 case EQ:
4591 /* x == y is always false for y out of range. */
4592 if (val < mmin || val > mmax)
4593 return const0_rtx;
4594 break;
4596 /* x > y is always false for y >= mmax, always true for y < mmin. */
4597 case GTU:
4598 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4599 return const0_rtx;
4600 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4601 return const_true_rtx;
4602 break;
4603 case GT:
4604 if (val >= mmax)
4605 return const0_rtx;
4606 if (val < mmin)
4607 return const_true_rtx;
4608 break;
4610 /* x < y is always false for y <= mmin, always true for y > mmax. */
4611 case LTU:
4612 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4613 return const0_rtx;
4614 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4615 return const_true_rtx;
4616 break;
4617 case LT:
4618 if (val <= mmin)
4619 return const0_rtx;
4620 if (val > mmax)
4621 return const_true_rtx;
4622 break;
4624 case NE:
4625 /* x != y is always true for y out of range. */
4626 if (val < mmin || val > mmax)
4627 return const_true_rtx;
4628 break;
4630 default:
4631 break;
4635 /* Optimize integer comparisons with zero. */
4636 if (trueop1 == const0_rtx)
4638 /* Some addresses are known to be nonzero. We don't know
4639 their sign, but equality comparisons are known. */
4640 if (nonzero_address_p (trueop0))
4642 if (code == EQ || code == LEU)
4643 return const0_rtx;
4644 if (code == NE || code == GTU)
4645 return const_true_rtx;
4648 /* See if the first operand is an IOR with a constant. If so, we
4649 may be able to determine the result of this comparison. */
4650 if (GET_CODE (op0) == IOR)
4652 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4653 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4655 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4656 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4657 && (UINTVAL (inner_const)
4658 & ((unsigned HOST_WIDE_INT) 1
4659 << sign_bitnum)));
4661 switch (code)
4663 case EQ:
4664 case LEU:
4665 return const0_rtx;
4666 case NE:
4667 case GTU:
4668 return const_true_rtx;
4669 case LT:
4670 case LE:
4671 if (has_sign)
4672 return const_true_rtx;
4673 break;
4674 case GT:
4675 case GE:
4676 if (has_sign)
4677 return const0_rtx;
4678 break;
4679 default:
4680 break;
4686 /* Optimize comparison of ABS with zero. */
4687 if (trueop1 == CONST0_RTX (mode)
4688 && (GET_CODE (trueop0) == ABS
4689 || (GET_CODE (trueop0) == FLOAT_EXTEND
4690 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4692 switch (code)
4694 case LT:
4695 /* Optimize abs(x) < 0.0. */
4696 if (!HONOR_SNANS (mode)
4697 && (!INTEGRAL_MODE_P (mode)
4698 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4700 if (INTEGRAL_MODE_P (mode)
4701 && (issue_strict_overflow_warning
4702 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4703 warning (OPT_Wstrict_overflow,
4704 ("assuming signed overflow does not occur when "
4705 "assuming abs (x) < 0 is false"));
4706 return const0_rtx;
4708 break;
4710 case GE:
4711 /* Optimize abs(x) >= 0.0. */
4712 if (!HONOR_NANS (mode)
4713 && (!INTEGRAL_MODE_P (mode)
4714 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4716 if (INTEGRAL_MODE_P (mode)
4717 && (issue_strict_overflow_warning
4718 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4719 warning (OPT_Wstrict_overflow,
4720 ("assuming signed overflow does not occur when "
4721 "assuming abs (x) >= 0 is true"));
4722 return const_true_rtx;
4724 break;
4726 case UNGE:
4727 /* Optimize ! (abs(x) < 0.0). */
4728 return const_true_rtx;
4730 default:
4731 break;
4735 return 0;
4738 /* Simplify CODE, an operation with result mode MODE and three operands,
4739 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4740 a constant. Return 0 if no simplifications is possible. */
4743 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4744 enum machine_mode op0_mode, rtx op0, rtx op1,
4745 rtx op2)
4747 unsigned int width = GET_MODE_BITSIZE (mode);
4748 bool any_change = false;
4749 rtx tem;
4751 /* VOIDmode means "infinite" precision. */
4752 if (width == 0)
4753 width = HOST_BITS_PER_WIDE_INT;
4755 switch (code)
4757 case FMA:
4758 /* Simplify negations around the multiplication. */
4759 /* -a * -b + c => a * b + c. */
4760 if (GET_CODE (op0) == NEG)
4762 tem = simplify_unary_operation (NEG, mode, op1, mode);
4763 if (tem)
4764 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4766 else if (GET_CODE (op1) == NEG)
4768 tem = simplify_unary_operation (NEG, mode, op0, mode);
4769 if (tem)
4770 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
4773 /* Canonicalize the two multiplication operands. */
4774 /* a * -b + c => -b * a + c. */
4775 if (swap_commutative_operands_p (op0, op1))
4776 tem = op0, op0 = op1, op1 = tem, any_change = true;
4778 if (any_change)
4779 return gen_rtx_FMA (mode, op0, op1, op2);
4780 return NULL_RTX;
4782 case SIGN_EXTRACT:
4783 case ZERO_EXTRACT:
4784 if (CONST_INT_P (op0)
4785 && CONST_INT_P (op1)
4786 && CONST_INT_P (op2)
4787 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4788 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4790 /* Extracting a bit-field from a constant */
4791 unsigned HOST_WIDE_INT val = UINTVAL (op0);
4793 if (BITS_BIG_ENDIAN)
4794 val >>= GET_MODE_BITSIZE (op0_mode) - INTVAL (op2) - INTVAL (op1);
4795 else
4796 val >>= INTVAL (op2);
4798 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4800 /* First zero-extend. */
4801 val &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4802 /* If desired, propagate sign bit. */
4803 if (code == SIGN_EXTRACT
4804 && (val & ((unsigned HOST_WIDE_INT) 1 << (INTVAL (op1) - 1)))
4805 != 0)
4806 val |= ~ (((unsigned HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4809 /* Clear the bits that don't belong in our mode,
4810 unless they and our sign bit are all one.
4811 So we get either a reasonable negative value or a reasonable
4812 unsigned value for this mode. */
4813 if (width < HOST_BITS_PER_WIDE_INT
4814 && ((val & ((unsigned HOST_WIDE_INT) (-1) << (width - 1)))
4815 != ((unsigned HOST_WIDE_INT) (-1) << (width - 1))))
4816 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
4818 return gen_int_mode (val, mode);
4820 break;
4822 case IF_THEN_ELSE:
4823 if (CONST_INT_P (op0))
4824 return op0 != const0_rtx ? op1 : op2;
4826 /* Convert c ? a : a into "a". */
4827 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4828 return op1;
4830 /* Convert a != b ? a : b into "a". */
4831 if (GET_CODE (op0) == NE
4832 && ! side_effects_p (op0)
4833 && ! HONOR_NANS (mode)
4834 && ! HONOR_SIGNED_ZEROS (mode)
4835 && ((rtx_equal_p (XEXP (op0, 0), op1)
4836 && rtx_equal_p (XEXP (op0, 1), op2))
4837 || (rtx_equal_p (XEXP (op0, 0), op2)
4838 && rtx_equal_p (XEXP (op0, 1), op1))))
4839 return op1;
4841 /* Convert a == b ? a : b into "b". */
4842 if (GET_CODE (op0) == EQ
4843 && ! side_effects_p (op0)
4844 && ! HONOR_NANS (mode)
4845 && ! HONOR_SIGNED_ZEROS (mode)
4846 && ((rtx_equal_p (XEXP (op0, 0), op1)
4847 && rtx_equal_p (XEXP (op0, 1), op2))
4848 || (rtx_equal_p (XEXP (op0, 0), op2)
4849 && rtx_equal_p (XEXP (op0, 1), op1))))
4850 return op2;
4852 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4854 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4855 ? GET_MODE (XEXP (op0, 1))
4856 : GET_MODE (XEXP (op0, 0)));
4857 rtx temp;
4859 /* Look for happy constants in op1 and op2. */
4860 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4862 HOST_WIDE_INT t = INTVAL (op1);
4863 HOST_WIDE_INT f = INTVAL (op2);
4865 if (t == STORE_FLAG_VALUE && f == 0)
4866 code = GET_CODE (op0);
4867 else if (t == 0 && f == STORE_FLAG_VALUE)
4869 enum rtx_code tmp;
4870 tmp = reversed_comparison_code (op0, NULL_RTX);
4871 if (tmp == UNKNOWN)
4872 break;
4873 code = tmp;
4875 else
4876 break;
4878 return simplify_gen_relational (code, mode, cmp_mode,
4879 XEXP (op0, 0), XEXP (op0, 1));
4882 if (cmp_mode == VOIDmode)
4883 cmp_mode = op0_mode;
4884 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4885 cmp_mode, XEXP (op0, 0),
4886 XEXP (op0, 1));
4888 /* See if any simplifications were possible. */
4889 if (temp)
4891 if (CONST_INT_P (temp))
4892 return temp == const0_rtx ? op2 : op1;
4893 else if (temp)
4894 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4897 break;
4899 case VEC_MERGE:
4900 gcc_assert (GET_MODE (op0) == mode);
4901 gcc_assert (GET_MODE (op1) == mode);
4902 gcc_assert (VECTOR_MODE_P (mode));
4903 op2 = avoid_constant_pool_reference (op2);
4904 if (CONST_INT_P (op2))
4906 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4907 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4908 int mask = (1 << n_elts) - 1;
4910 if (!(INTVAL (op2) & mask))
4911 return op1;
4912 if ((INTVAL (op2) & mask) == mask)
4913 return op0;
4915 op0 = avoid_constant_pool_reference (op0);
4916 op1 = avoid_constant_pool_reference (op1);
4917 if (GET_CODE (op0) == CONST_VECTOR
4918 && GET_CODE (op1) == CONST_VECTOR)
4920 rtvec v = rtvec_alloc (n_elts);
4921 unsigned int i;
4923 for (i = 0; i < n_elts; i++)
4924 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4925 ? CONST_VECTOR_ELT (op0, i)
4926 : CONST_VECTOR_ELT (op1, i));
4927 return gen_rtx_CONST_VECTOR (mode, v);
4930 break;
4932 default:
4933 gcc_unreachable ();
4936 return 0;
4939 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4940 or CONST_VECTOR,
4941 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4943 Works by unpacking OP into a collection of 8-bit values
4944 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4945 and then repacking them again for OUTERMODE. */
4947 static rtx
4948 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4949 enum machine_mode innermode, unsigned int byte)
4951 /* We support up to 512-bit values (for V8DFmode). */
4952 enum {
4953 max_bitsize = 512,
4954 value_bit = 8,
4955 value_mask = (1 << value_bit) - 1
4957 unsigned char value[max_bitsize / value_bit];
4958 int value_start;
4959 int i;
4960 int elem;
4962 int num_elem;
4963 rtx * elems;
4964 int elem_bitsize;
4965 rtx result_s;
4966 rtvec result_v = NULL;
4967 enum mode_class outer_class;
4968 enum machine_mode outer_submode;
4970 /* Some ports misuse CCmode. */
4971 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
4972 return op;
4974 /* We have no way to represent a complex constant at the rtl level. */
4975 if (COMPLEX_MODE_P (outermode))
4976 return NULL_RTX;
4978 /* Unpack the value. */
4980 if (GET_CODE (op) == CONST_VECTOR)
4982 num_elem = CONST_VECTOR_NUNITS (op);
4983 elems = &CONST_VECTOR_ELT (op, 0);
4984 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4986 else
4988 num_elem = 1;
4989 elems = &op;
4990 elem_bitsize = max_bitsize;
4992 /* If this asserts, it is too complicated; reducing value_bit may help. */
4993 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4994 /* I don't know how to handle endianness of sub-units. */
4995 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4997 for (elem = 0; elem < num_elem; elem++)
4999 unsigned char * vp;
5000 rtx el = elems[elem];
5002 /* Vectors are kept in target memory order. (This is probably
5003 a mistake.) */
5005 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5006 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5007 / BITS_PER_UNIT);
5008 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5009 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5010 unsigned bytele = (subword_byte % UNITS_PER_WORD
5011 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5012 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5015 switch (GET_CODE (el))
5017 case CONST_INT:
5018 for (i = 0;
5019 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5020 i += value_bit)
5021 *vp++ = INTVAL (el) >> i;
5022 /* CONST_INTs are always logically sign-extended. */
5023 for (; i < elem_bitsize; i += value_bit)
5024 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5025 break;
5027 case CONST_DOUBLE:
5028 if (GET_MODE (el) == VOIDmode)
5030 /* If this triggers, someone should have generated a
5031 CONST_INT instead. */
5032 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5034 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5035 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5036 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
5038 *vp++
5039 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5040 i += value_bit;
5042 /* It shouldn't matter what's done here, so fill it with
5043 zero. */
5044 for (; i < elem_bitsize; i += value_bit)
5045 *vp++ = 0;
5047 else
5049 long tmp[max_bitsize / 32];
5050 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5052 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5053 gcc_assert (bitsize <= elem_bitsize);
5054 gcc_assert (bitsize % value_bit == 0);
5056 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5057 GET_MODE (el));
5059 /* real_to_target produces its result in words affected by
5060 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5061 and use WORDS_BIG_ENDIAN instead; see the documentation
5062 of SUBREG in rtl.texi. */
5063 for (i = 0; i < bitsize; i += value_bit)
5065 int ibase;
5066 if (WORDS_BIG_ENDIAN)
5067 ibase = bitsize - 1 - i;
5068 else
5069 ibase = i;
5070 *vp++ = tmp[ibase / 32] >> i % 32;
5073 /* It shouldn't matter what's done here, so fill it with
5074 zero. */
5075 for (; i < elem_bitsize; i += value_bit)
5076 *vp++ = 0;
5078 break;
5080 case CONST_FIXED:
5081 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5083 for (i = 0; i < elem_bitsize; i += value_bit)
5084 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5086 else
5088 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5089 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5090 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5091 i += value_bit)
5092 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5093 >> (i - HOST_BITS_PER_WIDE_INT);
5094 for (; i < elem_bitsize; i += value_bit)
5095 *vp++ = 0;
5097 break;
5099 default:
5100 gcc_unreachable ();
5104 /* Now, pick the right byte to start with. */
5105 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5106 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5107 will already have offset 0. */
5108 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5110 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5111 - byte);
5112 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5113 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5114 byte = (subword_byte % UNITS_PER_WORD
5115 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5118 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5119 so if it's become negative it will instead be very large.) */
5120 gcc_assert (byte < GET_MODE_SIZE (innermode));
5122 /* Convert from bytes to chunks of size value_bit. */
5123 value_start = byte * (BITS_PER_UNIT / value_bit);
5125 /* Re-pack the value. */
5127 if (VECTOR_MODE_P (outermode))
5129 num_elem = GET_MODE_NUNITS (outermode);
5130 result_v = rtvec_alloc (num_elem);
5131 elems = &RTVEC_ELT (result_v, 0);
5132 outer_submode = GET_MODE_INNER (outermode);
5134 else
5136 num_elem = 1;
5137 elems = &result_s;
5138 outer_submode = outermode;
5141 outer_class = GET_MODE_CLASS (outer_submode);
5142 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5144 gcc_assert (elem_bitsize % value_bit == 0);
5145 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5147 for (elem = 0; elem < num_elem; elem++)
5149 unsigned char *vp;
5151 /* Vectors are stored in target memory order. (This is probably
5152 a mistake.) */
5154 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5155 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5156 / BITS_PER_UNIT);
5157 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5158 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5159 unsigned bytele = (subword_byte % UNITS_PER_WORD
5160 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5161 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5164 switch (outer_class)
5166 case MODE_INT:
5167 case MODE_PARTIAL_INT:
5169 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5171 for (i = 0;
5172 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5173 i += value_bit)
5174 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5175 for (; i < elem_bitsize; i += value_bit)
5176 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5177 << (i - HOST_BITS_PER_WIDE_INT);
5179 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5180 know why. */
5181 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5182 elems[elem] = gen_int_mode (lo, outer_submode);
5183 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5184 elems[elem] = immed_double_const (lo, hi, outer_submode);
5185 else
5186 return NULL_RTX;
5188 break;
5190 case MODE_FLOAT:
5191 case MODE_DECIMAL_FLOAT:
5193 REAL_VALUE_TYPE r;
5194 long tmp[max_bitsize / 32];
5196 /* real_from_target wants its input in words affected by
5197 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5198 and use WORDS_BIG_ENDIAN instead; see the documentation
5199 of SUBREG in rtl.texi. */
5200 for (i = 0; i < max_bitsize / 32; i++)
5201 tmp[i] = 0;
5202 for (i = 0; i < elem_bitsize; i += value_bit)
5204 int ibase;
5205 if (WORDS_BIG_ENDIAN)
5206 ibase = elem_bitsize - 1 - i;
5207 else
5208 ibase = i;
5209 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5212 real_from_target (&r, tmp, outer_submode);
5213 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5215 break;
5217 case MODE_FRACT:
5218 case MODE_UFRACT:
5219 case MODE_ACCUM:
5220 case MODE_UACCUM:
5222 FIXED_VALUE_TYPE f;
5223 f.data.low = 0;
5224 f.data.high = 0;
5225 f.mode = outer_submode;
5227 for (i = 0;
5228 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5229 i += value_bit)
5230 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5231 for (; i < elem_bitsize; i += value_bit)
5232 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5233 << (i - HOST_BITS_PER_WIDE_INT));
5235 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5237 break;
5239 default:
5240 gcc_unreachable ();
5243 if (VECTOR_MODE_P (outermode))
5244 return gen_rtx_CONST_VECTOR (outermode, result_v);
5245 else
5246 return result_s;
5249 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5250 Return 0 if no simplifications are possible. */
5252 simplify_subreg (enum machine_mode outermode, rtx op,
5253 enum machine_mode innermode, unsigned int byte)
5255 /* Little bit of sanity checking. */
5256 gcc_assert (innermode != VOIDmode);
5257 gcc_assert (outermode != VOIDmode);
5258 gcc_assert (innermode != BLKmode);
5259 gcc_assert (outermode != BLKmode);
5261 gcc_assert (GET_MODE (op) == innermode
5262 || GET_MODE (op) == VOIDmode);
5264 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5265 gcc_assert (byte < GET_MODE_SIZE (innermode));
5267 if (outermode == innermode && !byte)
5268 return op;
5270 if (CONST_INT_P (op)
5271 || GET_CODE (op) == CONST_DOUBLE
5272 || GET_CODE (op) == CONST_FIXED
5273 || GET_CODE (op) == CONST_VECTOR)
5274 return simplify_immed_subreg (outermode, op, innermode, byte);
5276 /* Changing mode twice with SUBREG => just change it once,
5277 or not at all if changing back op starting mode. */
5278 if (GET_CODE (op) == SUBREG)
5280 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5281 int final_offset = byte + SUBREG_BYTE (op);
5282 rtx newx;
5284 if (outermode == innermostmode
5285 && byte == 0 && SUBREG_BYTE (op) == 0)
5286 return SUBREG_REG (op);
5288 /* The SUBREG_BYTE represents offset, as if the value were stored
5289 in memory. Irritating exception is paradoxical subreg, where
5290 we define SUBREG_BYTE to be 0. On big endian machines, this
5291 value should be negative. For a moment, undo this exception. */
5292 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5294 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5295 if (WORDS_BIG_ENDIAN)
5296 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5297 if (BYTES_BIG_ENDIAN)
5298 final_offset += difference % UNITS_PER_WORD;
5300 if (SUBREG_BYTE (op) == 0
5301 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5303 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5304 if (WORDS_BIG_ENDIAN)
5305 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5306 if (BYTES_BIG_ENDIAN)
5307 final_offset += difference % UNITS_PER_WORD;
5310 /* See whether resulting subreg will be paradoxical. */
5311 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5313 /* In nonparadoxical subregs we can't handle negative offsets. */
5314 if (final_offset < 0)
5315 return NULL_RTX;
5316 /* Bail out in case resulting subreg would be incorrect. */
5317 if (final_offset % GET_MODE_SIZE (outermode)
5318 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5319 return NULL_RTX;
5321 else
5323 int offset = 0;
5324 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5326 /* In paradoxical subreg, see if we are still looking on lower part.
5327 If so, our SUBREG_BYTE will be 0. */
5328 if (WORDS_BIG_ENDIAN)
5329 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5330 if (BYTES_BIG_ENDIAN)
5331 offset += difference % UNITS_PER_WORD;
5332 if (offset == final_offset)
5333 final_offset = 0;
5334 else
5335 return NULL_RTX;
5338 /* Recurse for further possible simplifications. */
5339 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5340 final_offset);
5341 if (newx)
5342 return newx;
5343 if (validate_subreg (outermode, innermostmode,
5344 SUBREG_REG (op), final_offset))
5346 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5347 if (SUBREG_PROMOTED_VAR_P (op)
5348 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5349 && GET_MODE_CLASS (outermode) == MODE_INT
5350 && IN_RANGE (GET_MODE_SIZE (outermode),
5351 GET_MODE_SIZE (innermode),
5352 GET_MODE_SIZE (innermostmode))
5353 && subreg_lowpart_p (newx))
5355 SUBREG_PROMOTED_VAR_P (newx) = 1;
5356 SUBREG_PROMOTED_UNSIGNED_SET
5357 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5359 return newx;
5361 return NULL_RTX;
5364 /* Merge implicit and explicit truncations. */
5366 if (GET_CODE (op) == TRUNCATE
5367 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5368 && subreg_lowpart_offset (outermode, innermode) == byte)
5369 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5370 GET_MODE (XEXP (op, 0)));
5372 /* SUBREG of a hard register => just change the register number
5373 and/or mode. If the hard register is not valid in that mode,
5374 suppress this simplification. If the hard register is the stack,
5375 frame, or argument pointer, leave this as a SUBREG. */
5377 if (REG_P (op) && HARD_REGISTER_P (op))
5379 unsigned int regno, final_regno;
5381 regno = REGNO (op);
5382 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5383 if (HARD_REGISTER_NUM_P (final_regno))
5385 rtx x;
5386 int final_offset = byte;
5388 /* Adjust offset for paradoxical subregs. */
5389 if (byte == 0
5390 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5392 int difference = (GET_MODE_SIZE (innermode)
5393 - GET_MODE_SIZE (outermode));
5394 if (WORDS_BIG_ENDIAN)
5395 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5396 if (BYTES_BIG_ENDIAN)
5397 final_offset += difference % UNITS_PER_WORD;
5400 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5402 /* Propagate original regno. We don't have any way to specify
5403 the offset inside original regno, so do so only for lowpart.
5404 The information is used only by alias analysis that can not
5405 grog partial register anyway. */
5407 if (subreg_lowpart_offset (outermode, innermode) == byte)
5408 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5409 return x;
5413 /* If we have a SUBREG of a register that we are replacing and we are
5414 replacing it with a MEM, make a new MEM and try replacing the
5415 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5416 or if we would be widening it. */
5418 if (MEM_P (op)
5419 && ! mode_dependent_address_p (XEXP (op, 0))
5420 /* Allow splitting of volatile memory references in case we don't
5421 have instruction to move the whole thing. */
5422 && (! MEM_VOLATILE_P (op)
5423 || ! have_insn_for (SET, innermode))
5424 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5425 return adjust_address_nv (op, outermode, byte);
5427 /* Handle complex values represented as CONCAT
5428 of real and imaginary part. */
5429 if (GET_CODE (op) == CONCAT)
5431 unsigned int part_size, final_offset;
5432 rtx part, res;
5434 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5435 if (byte < part_size)
5437 part = XEXP (op, 0);
5438 final_offset = byte;
5440 else
5442 part = XEXP (op, 1);
5443 final_offset = byte - part_size;
5446 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5447 return NULL_RTX;
5449 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5450 if (res)
5451 return res;
5452 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5453 return gen_rtx_SUBREG (outermode, part, final_offset);
5454 return NULL_RTX;
5457 /* Optimize SUBREG truncations of zero and sign extended values. */
5458 if ((GET_CODE (op) == ZERO_EXTEND
5459 || GET_CODE (op) == SIGN_EXTEND)
5460 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5462 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5464 /* If we're requesting the lowpart of a zero or sign extension,
5465 there are three possibilities. If the outermode is the same
5466 as the origmode, we can omit both the extension and the subreg.
5467 If the outermode is not larger than the origmode, we can apply
5468 the truncation without the extension. Finally, if the outermode
5469 is larger than the origmode, but both are integer modes, we
5470 can just extend to the appropriate mode. */
5471 if (bitpos == 0)
5473 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5474 if (outermode == origmode)
5475 return XEXP (op, 0);
5476 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5477 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5478 subreg_lowpart_offset (outermode,
5479 origmode));
5480 if (SCALAR_INT_MODE_P (outermode))
5481 return simplify_gen_unary (GET_CODE (op), outermode,
5482 XEXP (op, 0), origmode);
5485 /* A SUBREG resulting from a zero extension may fold to zero if
5486 it extracts higher bits that the ZERO_EXTEND's source bits. */
5487 if (GET_CODE (op) == ZERO_EXTEND
5488 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5489 return CONST0_RTX (outermode);
5492 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5493 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5494 the outer subreg is effectively a truncation to the original mode. */
5495 if ((GET_CODE (op) == LSHIFTRT
5496 || GET_CODE (op) == ASHIFTRT)
5497 && SCALAR_INT_MODE_P (outermode)
5498 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5499 to avoid the possibility that an outer LSHIFTRT shifts by more
5500 than the sign extension's sign_bit_copies and introduces zeros
5501 into the high bits of the result. */
5502 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5503 && CONST_INT_P (XEXP (op, 1))
5504 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5505 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5506 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5507 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5508 return simplify_gen_binary (ASHIFTRT, outermode,
5509 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5511 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5512 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5513 the outer subreg is effectively a truncation to the original mode. */
5514 if ((GET_CODE (op) == LSHIFTRT
5515 || GET_CODE (op) == ASHIFTRT)
5516 && SCALAR_INT_MODE_P (outermode)
5517 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5518 && CONST_INT_P (XEXP (op, 1))
5519 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5520 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5521 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5522 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5523 return simplify_gen_binary (LSHIFTRT, outermode,
5524 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5526 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5527 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5528 the outer subreg is effectively a truncation to the original mode. */
5529 if (GET_CODE (op) == ASHIFT
5530 && SCALAR_INT_MODE_P (outermode)
5531 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5532 && CONST_INT_P (XEXP (op, 1))
5533 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5534 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5535 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5536 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5537 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5538 return simplify_gen_binary (ASHIFT, outermode,
5539 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5541 /* Recognize a word extraction from a multi-word subreg. */
5542 if ((GET_CODE (op) == LSHIFTRT
5543 || GET_CODE (op) == ASHIFTRT)
5544 && SCALAR_INT_MODE_P (outermode)
5545 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5546 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5547 && CONST_INT_P (XEXP (op, 1))
5548 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5549 && INTVAL (XEXP (op, 1)) >= 0
5550 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5551 && byte == subreg_lowpart_offset (outermode, innermode))
5553 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5554 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5555 (WORDS_BIG_ENDIAN
5556 ? byte - shifted_bytes
5557 : byte + shifted_bytes));
5560 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5561 and try replacing the SUBREG and shift with it. Don't do this if
5562 the MEM has a mode-dependent address or if we would be widening it. */
5564 if ((GET_CODE (op) == LSHIFTRT
5565 || GET_CODE (op) == ASHIFTRT)
5566 && MEM_P (XEXP (op, 0))
5567 && CONST_INT_P (XEXP (op, 1))
5568 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
5569 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
5570 && INTVAL (XEXP (op, 1)) > 0
5571 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5572 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0))
5573 && ! MEM_VOLATILE_P (XEXP (op, 0))
5574 && byte == subreg_lowpart_offset (outermode, innermode)
5575 && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
5576 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
5578 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5579 return adjust_address_nv (XEXP (op, 0), outermode,
5580 (WORDS_BIG_ENDIAN
5581 ? byte - shifted_bytes
5582 : byte + shifted_bytes));
5585 return NULL_RTX;
5588 /* Make a SUBREG operation or equivalent if it folds. */
5591 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5592 enum machine_mode innermode, unsigned int byte)
5594 rtx newx;
5596 newx = simplify_subreg (outermode, op, innermode, byte);
5597 if (newx)
5598 return newx;
5600 if (GET_CODE (op) == SUBREG
5601 || GET_CODE (op) == CONCAT
5602 || GET_MODE (op) == VOIDmode)
5603 return NULL_RTX;
5605 if (validate_subreg (outermode, innermode, op, byte))
5606 return gen_rtx_SUBREG (outermode, op, byte);
5608 return NULL_RTX;
5611 /* Simplify X, an rtx expression.
5613 Return the simplified expression or NULL if no simplifications
5614 were possible.
5616 This is the preferred entry point into the simplification routines;
5617 however, we still allow passes to call the more specific routines.
5619 Right now GCC has three (yes, three) major bodies of RTL simplification
5620 code that need to be unified.
5622 1. fold_rtx in cse.c. This code uses various CSE specific
5623 information to aid in RTL simplification.
5625 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5626 it uses combine specific information to aid in RTL
5627 simplification.
5629 3. The routines in this file.
5632 Long term we want to only have one body of simplification code; to
5633 get to that state I recommend the following steps:
5635 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5636 which are not pass dependent state into these routines.
5638 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5639 use this routine whenever possible.
5641 3. Allow for pass dependent state to be provided to these
5642 routines and add simplifications based on the pass dependent
5643 state. Remove code from cse.c & combine.c that becomes
5644 redundant/dead.
5646 It will take time, but ultimately the compiler will be easier to
5647 maintain and improve. It's totally silly that when we add a
5648 simplification that it needs to be added to 4 places (3 for RTL
5649 simplification and 1 for tree simplification. */
5652 simplify_rtx (const_rtx x)
5654 const enum rtx_code code = GET_CODE (x);
5655 const enum machine_mode mode = GET_MODE (x);
5657 switch (GET_RTX_CLASS (code))
5659 case RTX_UNARY:
5660 return simplify_unary_operation (code, mode,
5661 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5662 case RTX_COMM_ARITH:
5663 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5664 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5666 /* Fall through.... */
5668 case RTX_BIN_ARITH:
5669 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5671 case RTX_TERNARY:
5672 case RTX_BITFIELD_OPS:
5673 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5674 XEXP (x, 0), XEXP (x, 1),
5675 XEXP (x, 2));
5677 case RTX_COMPARE:
5678 case RTX_COMM_COMPARE:
5679 return simplify_relational_operation (code, mode,
5680 ((GET_MODE (XEXP (x, 0))
5681 != VOIDmode)
5682 ? GET_MODE (XEXP (x, 0))
5683 : GET_MODE (XEXP (x, 1))),
5684 XEXP (x, 0),
5685 XEXP (x, 1));
5687 case RTX_EXTRA:
5688 if (code == SUBREG)
5689 return simplify_subreg (mode, SUBREG_REG (x),
5690 GET_MODE (SUBREG_REG (x)),
5691 SUBREG_BYTE (x));
5692 break;
5694 case RTX_OBJ:
5695 if (code == LO_SUM)
5697 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5698 if (GET_CODE (XEXP (x, 0)) == HIGH
5699 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5700 return XEXP (x, 1);
5702 break;
5704 default:
5705 break;
5707 return NULL;