PR/56490
[official-gcc.git] / gcc / simplify-rtx.c
blob3f04b8bdeb3c0114a55cfd359f9566bb805643ce
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "recog.h"
33 #include "function.h"
34 #include "expr.h"
35 #include "diagnostic-core.h"
36 #include "ggc.h"
37 #include "target.h"
39 /* Simplification and canonicalization of RTL. */
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
44 signed wide int. */
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
48 static rtx neg_const_int (enum machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
51 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
52 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
53 unsigned int);
54 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
55 rtx, rtx);
56 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
57 enum machine_mode, rtx, rtx);
58 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
59 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
60 rtx, rtx, rtx, rtx);
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
64 static rtx
65 neg_const_int (enum machine_mode mode, const_rtx i)
67 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
70 /* Test whether expression, X, is an immediate constant that represents
71 the most significant bit of machine mode MODE. */
73 bool
74 mode_signbit_p (enum machine_mode mode, const_rtx x)
76 unsigned HOST_WIDE_INT val;
77 unsigned int width;
79 if (GET_MODE_CLASS (mode) != MODE_INT)
80 return false;
82 width = GET_MODE_PRECISION (mode);
83 if (width == 0)
84 return false;
86 if (width <= HOST_BITS_PER_WIDE_INT
87 && CONST_INT_P (x))
88 val = INTVAL (x);
89 else if (width <= HOST_BITS_PER_DOUBLE_INT
90 && CONST_DOUBLE_AS_INT_P (x)
91 && CONST_DOUBLE_LOW (x) == 0)
93 val = CONST_DOUBLE_HIGH (x);
94 width -= HOST_BITS_PER_WIDE_INT;
96 else
97 /* FIXME: We don't yet have a representation for wider modes. */
98 return false;
100 if (width < HOST_BITS_PER_WIDE_INT)
101 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
102 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
105 /* Test whether VAL is equal to the most significant bit of mode MODE
106 (after masking with the mode mask of MODE). Returns false if the
107 precision of MODE is too large to handle. */
109 bool
110 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
112 unsigned int width;
114 if (GET_MODE_CLASS (mode) != MODE_INT)
115 return false;
117 width = GET_MODE_PRECISION (mode);
118 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
119 return false;
121 val &= GET_MODE_MASK (mode);
122 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
125 /* Test whether the most significant bit of mode MODE is set in VAL.
126 Returns false if the precision of MODE is too large to handle. */
127 bool
128 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
130 unsigned int width;
132 if (GET_MODE_CLASS (mode) != MODE_INT)
133 return false;
135 width = GET_MODE_PRECISION (mode);
136 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
137 return false;
139 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
140 return val != 0;
143 /* Test whether the most significant bit of mode MODE is clear in VAL.
144 Returns false if the precision of MODE is too large to handle. */
145 bool
146 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
148 unsigned int width;
150 if (GET_MODE_CLASS (mode) != MODE_INT)
151 return false;
153 width = GET_MODE_PRECISION (mode);
154 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
155 return false;
157 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
158 return val == 0;
161 /* Make a binary operation by properly ordering the operands and
162 seeing if the expression folds. */
165 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
166 rtx op1)
168 rtx tem;
170 /* If this simplifies, do it. */
171 tem = simplify_binary_operation (code, mode, op0, op1);
172 if (tem)
173 return tem;
175 /* Put complex operands first and constants second if commutative. */
176 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
177 && swap_commutative_operands_p (op0, op1))
178 tem = op0, op0 = op1, op1 = tem;
180 return gen_rtx_fmt_ee (code, mode, op0, op1);
183 /* If X is a MEM referencing the constant pool, return the real value.
184 Otherwise return X. */
186 avoid_constant_pool_reference (rtx x)
188 rtx c, tmp, addr;
189 enum machine_mode cmode;
190 HOST_WIDE_INT offset = 0;
192 switch (GET_CODE (x))
194 case MEM:
195 break;
197 case FLOAT_EXTEND:
198 /* Handle float extensions of constant pool references. */
199 tmp = XEXP (x, 0);
200 c = avoid_constant_pool_reference (tmp);
201 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
203 REAL_VALUE_TYPE d;
205 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
206 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
208 return x;
210 default:
211 return x;
214 if (GET_MODE (x) == BLKmode)
215 return x;
217 addr = XEXP (x, 0);
219 /* Call target hook to avoid the effects of -fpic etc.... */
220 addr = targetm.delegitimize_address (addr);
222 /* Split the address into a base and integer offset. */
223 if (GET_CODE (addr) == CONST
224 && GET_CODE (XEXP (addr, 0)) == PLUS
225 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
227 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
228 addr = XEXP (XEXP (addr, 0), 0);
231 if (GET_CODE (addr) == LO_SUM)
232 addr = XEXP (addr, 1);
234 /* If this is a constant pool reference, we can turn it into its
235 constant and hope that simplifications happen. */
236 if (GET_CODE (addr) == SYMBOL_REF
237 && CONSTANT_POOL_ADDRESS_P (addr))
239 c = get_pool_constant (addr);
240 cmode = get_pool_mode (addr);
242 /* If we're accessing the constant in a different mode than it was
243 originally stored, attempt to fix that up via subreg simplifications.
244 If that fails we have no choice but to return the original memory. */
245 if ((offset != 0 || cmode != GET_MODE (x))
246 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
248 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
249 if (tem && CONSTANT_P (tem))
250 return tem;
252 else
253 return c;
256 return x;
259 /* Simplify a MEM based on its attributes. This is the default
260 delegitimize_address target hook, and it's recommended that every
261 overrider call it. */
264 delegitimize_mem_from_attrs (rtx x)
266 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
267 use their base addresses as equivalent. */
268 if (MEM_P (x)
269 && MEM_EXPR (x)
270 && MEM_OFFSET_KNOWN_P (x))
272 tree decl = MEM_EXPR (x);
273 enum machine_mode mode = GET_MODE (x);
274 HOST_WIDE_INT offset = 0;
276 switch (TREE_CODE (decl))
278 default:
279 decl = NULL;
280 break;
282 case VAR_DECL:
283 break;
285 case ARRAY_REF:
286 case ARRAY_RANGE_REF:
287 case COMPONENT_REF:
288 case BIT_FIELD_REF:
289 case REALPART_EXPR:
290 case IMAGPART_EXPR:
291 case VIEW_CONVERT_EXPR:
293 HOST_WIDE_INT bitsize, bitpos;
294 tree toffset;
295 int unsignedp, volatilep = 0;
297 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
298 &mode, &unsignedp, &volatilep, false);
299 if (bitsize != GET_MODE_BITSIZE (mode)
300 || (bitpos % BITS_PER_UNIT)
301 || (toffset && !host_integerp (toffset, 0)))
302 decl = NULL;
303 else
305 offset += bitpos / BITS_PER_UNIT;
306 if (toffset)
307 offset += TREE_INT_CST_LOW (toffset);
309 break;
313 if (decl
314 && mode == GET_MODE (x)
315 && TREE_CODE (decl) == VAR_DECL
316 && (TREE_STATIC (decl)
317 || DECL_THREAD_LOCAL_P (decl))
318 && DECL_RTL_SET_P (decl)
319 && MEM_P (DECL_RTL (decl)))
321 rtx newx;
323 offset += MEM_OFFSET (x);
325 newx = DECL_RTL (decl);
327 if (MEM_P (newx))
329 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
331 /* Avoid creating a new MEM needlessly if we already had
332 the same address. We do if there's no OFFSET and the
333 old address X is identical to NEWX, or if X is of the
334 form (plus NEWX OFFSET), or the NEWX is of the form
335 (plus Y (const_int Z)) and X is that with the offset
336 added: (plus Y (const_int Z+OFFSET)). */
337 if (!((offset == 0
338 || (GET_CODE (o) == PLUS
339 && GET_CODE (XEXP (o, 1)) == CONST_INT
340 && (offset == INTVAL (XEXP (o, 1))
341 || (GET_CODE (n) == PLUS
342 && GET_CODE (XEXP (n, 1)) == CONST_INT
343 && (INTVAL (XEXP (n, 1)) + offset
344 == INTVAL (XEXP (o, 1)))
345 && (n = XEXP (n, 0))))
346 && (o = XEXP (o, 0))))
347 && rtx_equal_p (o, n)))
348 x = adjust_address_nv (newx, mode, offset);
350 else if (GET_MODE (x) == GET_MODE (newx)
351 && offset == 0)
352 x = newx;
356 return x;
359 /* Make a unary operation by first seeing if it folds and otherwise making
360 the specified operation. */
363 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
364 enum machine_mode op_mode)
366 rtx tem;
368 /* If this simplifies, use it. */
369 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
370 return tem;
372 return gen_rtx_fmt_e (code, mode, op);
375 /* Likewise for ternary operations. */
378 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
379 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
381 rtx tem;
383 /* If this simplifies, use it. */
384 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
385 op0, op1, op2)))
386 return tem;
388 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
391 /* Likewise, for relational operations.
392 CMP_MODE specifies mode comparison is done in. */
395 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
396 enum machine_mode cmp_mode, rtx op0, rtx op1)
398 rtx tem;
400 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
401 op0, op1)))
402 return tem;
404 return gen_rtx_fmt_ee (code, mode, op0, op1);
407 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
408 and simplify the result. If FN is non-NULL, call this callback on each
409 X, if it returns non-NULL, replace X with its return value and simplify the
410 result. */
413 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
414 rtx (*fn) (rtx, const_rtx, void *), void *data)
416 enum rtx_code code = GET_CODE (x);
417 enum machine_mode mode = GET_MODE (x);
418 enum machine_mode op_mode;
419 const char *fmt;
420 rtx op0, op1, op2, newx, op;
421 rtvec vec, newvec;
422 int i, j;
424 if (__builtin_expect (fn != NULL, 0))
426 newx = fn (x, old_rtx, data);
427 if (newx)
428 return newx;
430 else if (rtx_equal_p (x, old_rtx))
431 return copy_rtx ((rtx) data);
433 switch (GET_RTX_CLASS (code))
435 case RTX_UNARY:
436 op0 = XEXP (x, 0);
437 op_mode = GET_MODE (op0);
438 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
439 if (op0 == XEXP (x, 0))
440 return x;
441 return simplify_gen_unary (code, mode, op0, op_mode);
443 case RTX_BIN_ARITH:
444 case RTX_COMM_ARITH:
445 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
446 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
447 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
448 return x;
449 return simplify_gen_binary (code, mode, op0, op1);
451 case RTX_COMPARE:
452 case RTX_COMM_COMPARE:
453 op0 = XEXP (x, 0);
454 op1 = XEXP (x, 1);
455 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
456 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
457 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
458 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
459 return x;
460 return simplify_gen_relational (code, mode, op_mode, op0, op1);
462 case RTX_TERNARY:
463 case RTX_BITFIELD_OPS:
464 op0 = XEXP (x, 0);
465 op_mode = GET_MODE (op0);
466 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
467 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
468 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
469 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
470 return x;
471 if (op_mode == VOIDmode)
472 op_mode = GET_MODE (op0);
473 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
475 case RTX_EXTRA:
476 if (code == SUBREG)
478 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
479 if (op0 == SUBREG_REG (x))
480 return x;
481 op0 = simplify_gen_subreg (GET_MODE (x), op0,
482 GET_MODE (SUBREG_REG (x)),
483 SUBREG_BYTE (x));
484 return op0 ? op0 : x;
486 break;
488 case RTX_OBJ:
489 if (code == MEM)
491 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
492 if (op0 == XEXP (x, 0))
493 return x;
494 return replace_equiv_address_nv (x, op0);
496 else if (code == LO_SUM)
498 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
499 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
501 /* (lo_sum (high x) x) -> x */
502 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
503 return op1;
505 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
506 return x;
507 return gen_rtx_LO_SUM (mode, op0, op1);
509 break;
511 default:
512 break;
515 newx = x;
516 fmt = GET_RTX_FORMAT (code);
517 for (i = 0; fmt[i]; i++)
518 switch (fmt[i])
520 case 'E':
521 vec = XVEC (x, i);
522 newvec = XVEC (newx, i);
523 for (j = 0; j < GET_NUM_ELEM (vec); j++)
525 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
526 old_rtx, fn, data);
527 if (op != RTVEC_ELT (vec, j))
529 if (newvec == vec)
531 newvec = shallow_copy_rtvec (vec);
532 if (x == newx)
533 newx = shallow_copy_rtx (x);
534 XVEC (newx, i) = newvec;
536 RTVEC_ELT (newvec, j) = op;
539 break;
541 case 'e':
542 if (XEXP (x, i))
544 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
545 if (op != XEXP (x, i))
547 if (x == newx)
548 newx = shallow_copy_rtx (x);
549 XEXP (newx, i) = op;
552 break;
554 return newx;
557 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
558 resulting RTX. Return a new RTX which is as simplified as possible. */
561 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
563 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
566 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
567 Only handle cases where the truncated value is inherently an rvalue.
569 RTL provides two ways of truncating a value:
571 1. a lowpart subreg. This form is only a truncation when both
572 the outer and inner modes (here MODE and OP_MODE respectively)
573 are scalar integers, and only then when the subreg is used as
574 an rvalue.
576 It is only valid to form such truncating subregs if the
577 truncation requires no action by the target. The onus for
578 proving this is on the creator of the subreg -- e.g. the
579 caller to simplify_subreg or simplify_gen_subreg -- and typically
580 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
582 2. a TRUNCATE. This form handles both scalar and compound integers.
584 The first form is preferred where valid. However, the TRUNCATE
585 handling in simplify_unary_operation turns the second form into the
586 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
587 so it is generally safe to form rvalue truncations using:
589 simplify_gen_unary (TRUNCATE, ...)
591 and leave simplify_unary_operation to work out which representation
592 should be used.
594 Because of the proof requirements on (1), simplify_truncation must
595 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
596 regardless of whether the outer truncation came from a SUBREG or a
597 TRUNCATE. For example, if the caller has proven that an SImode
598 truncation of:
600 (and:DI X Y)
602 is a no-op and can be represented as a subreg, it does not follow
603 that SImode truncations of X and Y are also no-ops. On a target
604 like 64-bit MIPS that requires SImode values to be stored in
605 sign-extended form, an SImode truncation of:
607 (and:DI (reg:DI X) (const_int 63))
609 is trivially a no-op because only the lower 6 bits can be set.
610 However, X is still an arbitrary 64-bit number and so we cannot
611 assume that truncating it too is a no-op. */
613 static rtx
614 simplify_truncation (enum machine_mode mode, rtx op,
615 enum machine_mode op_mode)
617 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
618 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
619 gcc_assert (precision <= op_precision);
621 /* Optimize truncations of zero and sign extended values. */
622 if (GET_CODE (op) == ZERO_EXTEND
623 || GET_CODE (op) == SIGN_EXTEND)
625 /* There are three possibilities. If MODE is the same as the
626 origmode, we can omit both the extension and the subreg.
627 If MODE is not larger than the origmode, we can apply the
628 truncation without the extension. Finally, if the outermode
629 is larger than the origmode, we can just extend to the appropriate
630 mode. */
631 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
632 if (mode == origmode)
633 return XEXP (op, 0);
634 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
635 return simplify_gen_unary (TRUNCATE, mode,
636 XEXP (op, 0), origmode);
637 else
638 return simplify_gen_unary (GET_CODE (op), mode,
639 XEXP (op, 0), origmode);
642 /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
643 to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
644 if (GET_CODE (op) == PLUS
645 || GET_CODE (op) == MINUS
646 || GET_CODE (op) == MULT)
648 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
649 if (op0)
651 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
652 if (op1)
653 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
657 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
658 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
659 the outer subreg is effectively a truncation to the original mode. */
660 if ((GET_CODE (op) == LSHIFTRT
661 || GET_CODE (op) == ASHIFTRT)
662 /* Ensure that OP_MODE is at least twice as wide as MODE
663 to avoid the possibility that an outer LSHIFTRT shifts by more
664 than the sign extension's sign_bit_copies and introduces zeros
665 into the high bits of the result. */
666 && 2 * precision <= op_precision
667 && CONST_INT_P (XEXP (op, 1))
668 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
669 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
670 && UINTVAL (XEXP (op, 1)) < precision)
671 return simplify_gen_binary (ASHIFTRT, mode,
672 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
674 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
675 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
676 the outer subreg is effectively a truncation to the original mode. */
677 if ((GET_CODE (op) == LSHIFTRT
678 || GET_CODE (op) == ASHIFTRT)
679 && CONST_INT_P (XEXP (op, 1))
680 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
681 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
682 && UINTVAL (XEXP (op, 1)) < precision)
683 return simplify_gen_binary (LSHIFTRT, mode,
684 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
686 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
687 to (ashift:QI (x:QI) C), where C is a suitable small constant and
688 the outer subreg is effectively a truncation to the original mode. */
689 if (GET_CODE (op) == ASHIFT
690 && CONST_INT_P (XEXP (op, 1))
691 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
692 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
693 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
694 && UINTVAL (XEXP (op, 1)) < precision)
695 return simplify_gen_binary (ASHIFT, mode,
696 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
698 /* Recognize a word extraction from a multi-word subreg. */
699 if ((GET_CODE (op) == LSHIFTRT
700 || GET_CODE (op) == ASHIFTRT)
701 && SCALAR_INT_MODE_P (mode)
702 && SCALAR_INT_MODE_P (op_mode)
703 && precision >= BITS_PER_WORD
704 && 2 * precision <= op_precision
705 && CONST_INT_P (XEXP (op, 1))
706 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
707 && UINTVAL (XEXP (op, 1)) < op_precision)
709 int byte = subreg_lowpart_offset (mode, op_mode);
710 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
711 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
712 (WORDS_BIG_ENDIAN
713 ? byte - shifted_bytes
714 : byte + shifted_bytes));
717 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
718 and try replacing the TRUNCATE and shift with it. Don't do this
719 if the MEM has a mode-dependent address. */
720 if ((GET_CODE (op) == LSHIFTRT
721 || GET_CODE (op) == ASHIFTRT)
722 && SCALAR_INT_MODE_P (op_mode)
723 && MEM_P (XEXP (op, 0))
724 && CONST_INT_P (XEXP (op, 1))
725 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
726 && INTVAL (XEXP (op, 1)) > 0
727 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
728 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
729 MEM_ADDR_SPACE (XEXP (op, 0)))
730 && ! MEM_VOLATILE_P (XEXP (op, 0))
731 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
732 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
734 int byte = subreg_lowpart_offset (mode, op_mode);
735 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
736 return adjust_address_nv (XEXP (op, 0), mode,
737 (WORDS_BIG_ENDIAN
738 ? byte - shifted_bytes
739 : byte + shifted_bytes));
742 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
743 (OP:SI foo:SI) if OP is NEG or ABS. */
744 if ((GET_CODE (op) == ABS
745 || GET_CODE (op) == NEG)
746 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
747 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
748 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
749 return simplify_gen_unary (GET_CODE (op), mode,
750 XEXP (XEXP (op, 0), 0), mode);
752 /* (truncate:A (subreg:B (truncate:C X) 0)) is
753 (truncate:A X). */
754 if (GET_CODE (op) == SUBREG
755 && SCALAR_INT_MODE_P (mode)
756 && SCALAR_INT_MODE_P (op_mode)
757 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
758 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
759 && subreg_lowpart_p (op))
760 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
761 GET_MODE (XEXP (SUBREG_REG (op), 0)));
763 /* (truncate:A (truncate:B X)) is (truncate:A X). */
764 if (GET_CODE (op) == TRUNCATE)
765 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
766 GET_MODE (XEXP (op, 0)));
768 return NULL_RTX;
771 /* Try to simplify a unary operation CODE whose output mode is to be
772 MODE with input operand OP whose mode was originally OP_MODE.
773 Return zero if no simplification can be made. */
775 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
776 rtx op, enum machine_mode op_mode)
778 rtx trueop, tem;
780 trueop = avoid_constant_pool_reference (op);
782 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
783 if (tem)
784 return tem;
786 return simplify_unary_operation_1 (code, mode, op);
789 /* Perform some simplifications we can do even if the operands
790 aren't constant. */
791 static rtx
792 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
794 enum rtx_code reversed;
795 rtx temp;
797 switch (code)
799 case NOT:
800 /* (not (not X)) == X. */
801 if (GET_CODE (op) == NOT)
802 return XEXP (op, 0);
804 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
805 comparison is all ones. */
806 if (COMPARISON_P (op)
807 && (mode == BImode || STORE_FLAG_VALUE == -1)
808 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
809 return simplify_gen_relational (reversed, mode, VOIDmode,
810 XEXP (op, 0), XEXP (op, 1));
812 /* (not (plus X -1)) can become (neg X). */
813 if (GET_CODE (op) == PLUS
814 && XEXP (op, 1) == constm1_rtx)
815 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
817 /* Similarly, (not (neg X)) is (plus X -1). */
818 if (GET_CODE (op) == NEG)
819 return plus_constant (mode, XEXP (op, 0), -1);
821 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
822 if (GET_CODE (op) == XOR
823 && CONST_INT_P (XEXP (op, 1))
824 && (temp = simplify_unary_operation (NOT, mode,
825 XEXP (op, 1), mode)) != 0)
826 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
828 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
829 if (GET_CODE (op) == PLUS
830 && CONST_INT_P (XEXP (op, 1))
831 && mode_signbit_p (mode, XEXP (op, 1))
832 && (temp = simplify_unary_operation (NOT, mode,
833 XEXP (op, 1), mode)) != 0)
834 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
837 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
838 operands other than 1, but that is not valid. We could do a
839 similar simplification for (not (lshiftrt C X)) where C is
840 just the sign bit, but this doesn't seem common enough to
841 bother with. */
842 if (GET_CODE (op) == ASHIFT
843 && XEXP (op, 0) == const1_rtx)
845 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
846 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
849 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
850 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
851 so we can perform the above simplification. */
853 if (STORE_FLAG_VALUE == -1
854 && GET_CODE (op) == ASHIFTRT
855 && GET_CODE (XEXP (op, 1))
856 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
857 return simplify_gen_relational (GE, mode, VOIDmode,
858 XEXP (op, 0), const0_rtx);
861 if (GET_CODE (op) == SUBREG
862 && subreg_lowpart_p (op)
863 && (GET_MODE_SIZE (GET_MODE (op))
864 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
865 && GET_CODE (SUBREG_REG (op)) == ASHIFT
866 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
868 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
869 rtx x;
871 x = gen_rtx_ROTATE (inner_mode,
872 simplify_gen_unary (NOT, inner_mode, const1_rtx,
873 inner_mode),
874 XEXP (SUBREG_REG (op), 1));
875 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
876 if (temp)
877 return temp;
880 /* Apply De Morgan's laws to reduce number of patterns for machines
881 with negating logical insns (and-not, nand, etc.). If result has
882 only one NOT, put it first, since that is how the patterns are
883 coded. */
885 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
887 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
888 enum machine_mode op_mode;
890 op_mode = GET_MODE (in1);
891 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
893 op_mode = GET_MODE (in2);
894 if (op_mode == VOIDmode)
895 op_mode = mode;
896 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
898 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
900 rtx tem = in2;
901 in2 = in1; in1 = tem;
904 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
905 mode, in1, in2);
907 break;
909 case NEG:
910 /* (neg (neg X)) == X. */
911 if (GET_CODE (op) == NEG)
912 return XEXP (op, 0);
914 /* (neg (plus X 1)) can become (not X). */
915 if (GET_CODE (op) == PLUS
916 && XEXP (op, 1) == const1_rtx)
917 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
919 /* Similarly, (neg (not X)) is (plus X 1). */
920 if (GET_CODE (op) == NOT)
921 return plus_constant (mode, XEXP (op, 0), 1);
923 /* (neg (minus X Y)) can become (minus Y X). This transformation
924 isn't safe for modes with signed zeros, since if X and Y are
925 both +0, (minus Y X) is the same as (minus X Y). If the
926 rounding mode is towards +infinity (or -infinity) then the two
927 expressions will be rounded differently. */
928 if (GET_CODE (op) == MINUS
929 && !HONOR_SIGNED_ZEROS (mode)
930 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
931 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
933 if (GET_CODE (op) == PLUS
934 && !HONOR_SIGNED_ZEROS (mode)
935 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
937 /* (neg (plus A C)) is simplified to (minus -C A). */
938 if (CONST_SCALAR_INT_P (XEXP (op, 1))
939 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
941 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
942 if (temp)
943 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
946 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
947 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
948 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
951 /* (neg (mult A B)) becomes (mult A (neg B)).
952 This works even for floating-point values. */
953 if (GET_CODE (op) == MULT
954 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
956 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
957 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
960 /* NEG commutes with ASHIFT since it is multiplication. Only do
961 this if we can then eliminate the NEG (e.g., if the operand
962 is a constant). */
963 if (GET_CODE (op) == ASHIFT)
965 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
966 if (temp)
967 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
970 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
971 C is equal to the width of MODE minus 1. */
972 if (GET_CODE (op) == ASHIFTRT
973 && CONST_INT_P (XEXP (op, 1))
974 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
975 return simplify_gen_binary (LSHIFTRT, mode,
976 XEXP (op, 0), XEXP (op, 1));
978 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
979 C is equal to the width of MODE minus 1. */
980 if (GET_CODE (op) == LSHIFTRT
981 && CONST_INT_P (XEXP (op, 1))
982 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
983 return simplify_gen_binary (ASHIFTRT, mode,
984 XEXP (op, 0), XEXP (op, 1));
986 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
987 if (GET_CODE (op) == XOR
988 && XEXP (op, 1) == const1_rtx
989 && nonzero_bits (XEXP (op, 0), mode) == 1)
990 return plus_constant (mode, XEXP (op, 0), -1);
992 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
993 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
994 if (GET_CODE (op) == LT
995 && XEXP (op, 1) == const0_rtx
996 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
998 enum machine_mode inner = GET_MODE (XEXP (op, 0));
999 int isize = GET_MODE_PRECISION (inner);
1000 if (STORE_FLAG_VALUE == 1)
1002 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1003 GEN_INT (isize - 1));
1004 if (mode == inner)
1005 return temp;
1006 if (GET_MODE_PRECISION (mode) > isize)
1007 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1008 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1010 else if (STORE_FLAG_VALUE == -1)
1012 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1013 GEN_INT (isize - 1));
1014 if (mode == inner)
1015 return temp;
1016 if (GET_MODE_PRECISION (mode) > isize)
1017 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1018 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1021 break;
1023 case TRUNCATE:
1024 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1025 with the umulXi3_highpart patterns. */
1026 if (GET_CODE (op) == LSHIFTRT
1027 && GET_CODE (XEXP (op, 0)) == MULT)
1028 break;
1030 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1032 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1034 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1035 if (temp)
1036 return temp;
1038 /* We can't handle truncation to a partial integer mode here
1039 because we don't know the real bitsize of the partial
1040 integer mode. */
1041 break;
1044 if (GET_MODE (op) != VOIDmode)
1046 temp = simplify_truncation (mode, op, GET_MODE (op));
1047 if (temp)
1048 return temp;
1051 /* If we know that the value is already truncated, we can
1052 replace the TRUNCATE with a SUBREG. */
1053 if (GET_MODE_NUNITS (mode) == 1
1054 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1055 || truncated_to_mode (mode, op)))
1057 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1058 if (temp)
1059 return temp;
1062 /* A truncate of a comparison can be replaced with a subreg if
1063 STORE_FLAG_VALUE permits. This is like the previous test,
1064 but it works even if the comparison is done in a mode larger
1065 than HOST_BITS_PER_WIDE_INT. */
1066 if (HWI_COMPUTABLE_MODE_P (mode)
1067 && COMPARISON_P (op)
1068 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1070 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1071 if (temp)
1072 return temp;
1075 /* A truncate of a memory is just loading the low part of the memory
1076 if we are not changing the meaning of the address. */
1077 if (GET_CODE (op) == MEM
1078 && !VECTOR_MODE_P (mode)
1079 && !MEM_VOLATILE_P (op)
1080 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1082 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1083 if (temp)
1084 return temp;
1087 break;
1089 case FLOAT_TRUNCATE:
1090 if (DECIMAL_FLOAT_MODE_P (mode))
1091 break;
1093 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1094 if (GET_CODE (op) == FLOAT_EXTEND
1095 && GET_MODE (XEXP (op, 0)) == mode)
1096 return XEXP (op, 0);
1098 /* (float_truncate:SF (float_truncate:DF foo:XF))
1099 = (float_truncate:SF foo:XF).
1100 This may eliminate double rounding, so it is unsafe.
1102 (float_truncate:SF (float_extend:XF foo:DF))
1103 = (float_truncate:SF foo:DF).
1105 (float_truncate:DF (float_extend:XF foo:SF))
1106 = (float_extend:SF foo:DF). */
1107 if ((GET_CODE (op) == FLOAT_TRUNCATE
1108 && flag_unsafe_math_optimizations)
1109 || GET_CODE (op) == FLOAT_EXTEND)
1110 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1111 0)))
1112 > GET_MODE_SIZE (mode)
1113 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1114 mode,
1115 XEXP (op, 0), mode);
1117 /* (float_truncate (float x)) is (float x) */
1118 if (GET_CODE (op) == FLOAT
1119 && (flag_unsafe_math_optimizations
1120 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1121 && ((unsigned)significand_size (GET_MODE (op))
1122 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1123 - num_sign_bit_copies (XEXP (op, 0),
1124 GET_MODE (XEXP (op, 0))))))))
1125 return simplify_gen_unary (FLOAT, mode,
1126 XEXP (op, 0),
1127 GET_MODE (XEXP (op, 0)));
1129 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1130 (OP:SF foo:SF) if OP is NEG or ABS. */
1131 if ((GET_CODE (op) == ABS
1132 || GET_CODE (op) == NEG)
1133 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1134 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1135 return simplify_gen_unary (GET_CODE (op), mode,
1136 XEXP (XEXP (op, 0), 0), mode);
1138 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1139 is (float_truncate:SF x). */
1140 if (GET_CODE (op) == SUBREG
1141 && subreg_lowpart_p (op)
1142 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1143 return SUBREG_REG (op);
1144 break;
1146 case FLOAT_EXTEND:
1147 if (DECIMAL_FLOAT_MODE_P (mode))
1148 break;
1150 /* (float_extend (float_extend x)) is (float_extend x)
1152 (float_extend (float x)) is (float x) assuming that double
1153 rounding can't happen.
1155 if (GET_CODE (op) == FLOAT_EXTEND
1156 || (GET_CODE (op) == FLOAT
1157 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1158 && ((unsigned)significand_size (GET_MODE (op))
1159 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1160 - num_sign_bit_copies (XEXP (op, 0),
1161 GET_MODE (XEXP (op, 0)))))))
1162 return simplify_gen_unary (GET_CODE (op), mode,
1163 XEXP (op, 0),
1164 GET_MODE (XEXP (op, 0)));
1166 break;
1168 case ABS:
1169 /* (abs (neg <foo>)) -> (abs <foo>) */
1170 if (GET_CODE (op) == NEG)
1171 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1172 GET_MODE (XEXP (op, 0)));
1174 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1175 do nothing. */
1176 if (GET_MODE (op) == VOIDmode)
1177 break;
1179 /* If operand is something known to be positive, ignore the ABS. */
1180 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1181 || val_signbit_known_clear_p (GET_MODE (op),
1182 nonzero_bits (op, GET_MODE (op))))
1183 return op;
1185 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1186 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1187 return gen_rtx_NEG (mode, op);
1189 break;
1191 case FFS:
1192 /* (ffs (*_extend <X>)) = (ffs <X>) */
1193 if (GET_CODE (op) == SIGN_EXTEND
1194 || GET_CODE (op) == ZERO_EXTEND)
1195 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1196 GET_MODE (XEXP (op, 0)));
1197 break;
1199 case POPCOUNT:
1200 switch (GET_CODE (op))
1202 case BSWAP:
1203 case ZERO_EXTEND:
1204 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1205 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1206 GET_MODE (XEXP (op, 0)));
1208 case ROTATE:
1209 case ROTATERT:
1210 /* Rotations don't affect popcount. */
1211 if (!side_effects_p (XEXP (op, 1)))
1212 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1213 GET_MODE (XEXP (op, 0)));
1214 break;
1216 default:
1217 break;
1219 break;
1221 case PARITY:
1222 switch (GET_CODE (op))
1224 case NOT:
1225 case BSWAP:
1226 case ZERO_EXTEND:
1227 case SIGN_EXTEND:
1228 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1229 GET_MODE (XEXP (op, 0)));
1231 case ROTATE:
1232 case ROTATERT:
1233 /* Rotations don't affect parity. */
1234 if (!side_effects_p (XEXP (op, 1)))
1235 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1236 GET_MODE (XEXP (op, 0)));
1237 break;
1239 default:
1240 break;
1242 break;
1244 case BSWAP:
1245 /* (bswap (bswap x)) -> x. */
1246 if (GET_CODE (op) == BSWAP)
1247 return XEXP (op, 0);
1248 break;
1250 case FLOAT:
1251 /* (float (sign_extend <X>)) = (float <X>). */
1252 if (GET_CODE (op) == SIGN_EXTEND)
1253 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1254 GET_MODE (XEXP (op, 0)));
1255 break;
1257 case SIGN_EXTEND:
1258 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1259 becomes just the MINUS if its mode is MODE. This allows
1260 folding switch statements on machines using casesi (such as
1261 the VAX). */
1262 if (GET_CODE (op) == TRUNCATE
1263 && GET_MODE (XEXP (op, 0)) == mode
1264 && GET_CODE (XEXP (op, 0)) == MINUS
1265 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1266 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1267 return XEXP (op, 0);
1269 /* Extending a widening multiplication should be canonicalized to
1270 a wider widening multiplication. */
1271 if (GET_CODE (op) == MULT)
1273 rtx lhs = XEXP (op, 0);
1274 rtx rhs = XEXP (op, 1);
1275 enum rtx_code lcode = GET_CODE (lhs);
1276 enum rtx_code rcode = GET_CODE (rhs);
1278 /* Widening multiplies usually extend both operands, but sometimes
1279 they use a shift to extract a portion of a register. */
1280 if ((lcode == SIGN_EXTEND
1281 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1282 && (rcode == SIGN_EXTEND
1283 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1285 enum machine_mode lmode = GET_MODE (lhs);
1286 enum machine_mode rmode = GET_MODE (rhs);
1287 int bits;
1289 if (lcode == ASHIFTRT)
1290 /* Number of bits not shifted off the end. */
1291 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1292 else /* lcode == SIGN_EXTEND */
1293 /* Size of inner mode. */
1294 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1296 if (rcode == ASHIFTRT)
1297 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1298 else /* rcode == SIGN_EXTEND */
1299 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1301 /* We can only widen multiplies if the result is mathematiclly
1302 equivalent. I.e. if overflow was impossible. */
1303 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1304 return simplify_gen_binary
1305 (MULT, mode,
1306 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1307 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1311 /* Check for a sign extension of a subreg of a promoted
1312 variable, where the promotion is sign-extended, and the
1313 target mode is the same as the variable's promotion. */
1314 if (GET_CODE (op) == SUBREG
1315 && SUBREG_PROMOTED_VAR_P (op)
1316 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1317 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1319 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1320 if (temp)
1321 return temp;
1324 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1325 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1326 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1328 gcc_assert (GET_MODE_BITSIZE (mode)
1329 > GET_MODE_BITSIZE (GET_MODE (op)));
1330 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1331 GET_MODE (XEXP (op, 0)));
1334 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1335 is (sign_extend:M (subreg:O <X>)) if there is mode with
1336 GET_MODE_BITSIZE (N) - I bits.
1337 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1338 is similarly (zero_extend:M (subreg:O <X>)). */
1339 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1340 && GET_CODE (XEXP (op, 0)) == ASHIFT
1341 && CONST_INT_P (XEXP (op, 1))
1342 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1343 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1345 enum machine_mode tmode
1346 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1347 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1348 gcc_assert (GET_MODE_BITSIZE (mode)
1349 > GET_MODE_BITSIZE (GET_MODE (op)));
1350 if (tmode != BLKmode)
1352 rtx inner =
1353 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1354 if (inner)
1355 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1356 ? SIGN_EXTEND : ZERO_EXTEND,
1357 mode, inner, tmode);
1361 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1362 /* As we do not know which address space the pointer is referring to,
1363 we can do this only if the target does not support different pointer
1364 or address modes depending on the address space. */
1365 if (target_default_pointer_address_modes_p ()
1366 && ! POINTERS_EXTEND_UNSIGNED
1367 && mode == Pmode && GET_MODE (op) == ptr_mode
1368 && (CONSTANT_P (op)
1369 || (GET_CODE (op) == SUBREG
1370 && REG_P (SUBREG_REG (op))
1371 && REG_POINTER (SUBREG_REG (op))
1372 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1373 return convert_memory_address (Pmode, op);
1374 #endif
1375 break;
1377 case ZERO_EXTEND:
1378 /* Check for a zero extension of a subreg of a promoted
1379 variable, where the promotion is zero-extended, and the
1380 target mode is the same as the variable's promotion. */
1381 if (GET_CODE (op) == SUBREG
1382 && SUBREG_PROMOTED_VAR_P (op)
1383 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1384 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1386 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1387 if (temp)
1388 return temp;
1391 /* Extending a widening multiplication should be canonicalized to
1392 a wider widening multiplication. */
1393 if (GET_CODE (op) == MULT)
1395 rtx lhs = XEXP (op, 0);
1396 rtx rhs = XEXP (op, 1);
1397 enum rtx_code lcode = GET_CODE (lhs);
1398 enum rtx_code rcode = GET_CODE (rhs);
1400 /* Widening multiplies usually extend both operands, but sometimes
1401 they use a shift to extract a portion of a register. */
1402 if ((lcode == ZERO_EXTEND
1403 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1404 && (rcode == ZERO_EXTEND
1405 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1407 enum machine_mode lmode = GET_MODE (lhs);
1408 enum machine_mode rmode = GET_MODE (rhs);
1409 int bits;
1411 if (lcode == LSHIFTRT)
1412 /* Number of bits not shifted off the end. */
1413 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1414 else /* lcode == ZERO_EXTEND */
1415 /* Size of inner mode. */
1416 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1418 if (rcode == LSHIFTRT)
1419 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1420 else /* rcode == ZERO_EXTEND */
1421 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1423 /* We can only widen multiplies if the result is mathematiclly
1424 equivalent. I.e. if overflow was impossible. */
1425 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1426 return simplify_gen_binary
1427 (MULT, mode,
1428 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1429 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1433 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1434 if (GET_CODE (op) == ZERO_EXTEND)
1435 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1436 GET_MODE (XEXP (op, 0)));
1438 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1439 is (zero_extend:M (subreg:O <X>)) if there is mode with
1440 GET_MODE_BITSIZE (N) - I bits. */
1441 if (GET_CODE (op) == LSHIFTRT
1442 && GET_CODE (XEXP (op, 0)) == ASHIFT
1443 && CONST_INT_P (XEXP (op, 1))
1444 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1445 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1447 enum machine_mode tmode
1448 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1449 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1450 if (tmode != BLKmode)
1452 rtx inner =
1453 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1454 if (inner)
1455 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1459 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1460 /* As we do not know which address space the pointer is referring to,
1461 we can do this only if the target does not support different pointer
1462 or address modes depending on the address space. */
1463 if (target_default_pointer_address_modes_p ()
1464 && POINTERS_EXTEND_UNSIGNED > 0
1465 && mode == Pmode && GET_MODE (op) == ptr_mode
1466 && (CONSTANT_P (op)
1467 || (GET_CODE (op) == SUBREG
1468 && REG_P (SUBREG_REG (op))
1469 && REG_POINTER (SUBREG_REG (op))
1470 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1471 return convert_memory_address (Pmode, op);
1472 #endif
1473 break;
1475 default:
1476 break;
1479 return 0;
1482 /* Try to compute the value of a unary operation CODE whose output mode is to
1483 be MODE with input operand OP whose mode was originally OP_MODE.
1484 Return zero if the value cannot be computed. */
1486 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1487 rtx op, enum machine_mode op_mode)
1489 unsigned int width = GET_MODE_PRECISION (mode);
1490 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1492 if (code == VEC_DUPLICATE)
1494 gcc_assert (VECTOR_MODE_P (mode));
1495 if (GET_MODE (op) != VOIDmode)
1497 if (!VECTOR_MODE_P (GET_MODE (op)))
1498 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1499 else
1500 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1501 (GET_MODE (op)));
1503 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1504 || GET_CODE (op) == CONST_VECTOR)
1506 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1507 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1508 rtvec v = rtvec_alloc (n_elts);
1509 unsigned int i;
1511 if (GET_CODE (op) != CONST_VECTOR)
1512 for (i = 0; i < n_elts; i++)
1513 RTVEC_ELT (v, i) = op;
1514 else
1516 enum machine_mode inmode = GET_MODE (op);
1517 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1518 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1520 gcc_assert (in_n_elts < n_elts);
1521 gcc_assert ((n_elts % in_n_elts) == 0);
1522 for (i = 0; i < n_elts; i++)
1523 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1525 return gen_rtx_CONST_VECTOR (mode, v);
1529 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1531 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1532 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1533 enum machine_mode opmode = GET_MODE (op);
1534 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1535 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1536 rtvec v = rtvec_alloc (n_elts);
1537 unsigned int i;
1539 gcc_assert (op_n_elts == n_elts);
1540 for (i = 0; i < n_elts; i++)
1542 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1543 CONST_VECTOR_ELT (op, i),
1544 GET_MODE_INNER (opmode));
1545 if (!x)
1546 return 0;
1547 RTVEC_ELT (v, i) = x;
1549 return gen_rtx_CONST_VECTOR (mode, v);
1552 /* The order of these tests is critical so that, for example, we don't
1553 check the wrong mode (input vs. output) for a conversion operation,
1554 such as FIX. At some point, this should be simplified. */
1556 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1558 HOST_WIDE_INT hv, lv;
1559 REAL_VALUE_TYPE d;
1561 if (CONST_INT_P (op))
1562 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1563 else
1564 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1566 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1567 d = real_value_truncate (mode, d);
1568 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1570 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1572 HOST_WIDE_INT hv, lv;
1573 REAL_VALUE_TYPE d;
1575 if (CONST_INT_P (op))
1576 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1577 else
1578 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1580 if (op_mode == VOIDmode
1581 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1582 /* We should never get a negative number. */
1583 gcc_assert (hv >= 0);
1584 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1585 hv = 0, lv &= GET_MODE_MASK (op_mode);
1587 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1588 d = real_value_truncate (mode, d);
1589 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1592 if (CONST_INT_P (op)
1593 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1595 HOST_WIDE_INT arg0 = INTVAL (op);
1596 HOST_WIDE_INT val;
1598 switch (code)
1600 case NOT:
1601 val = ~ arg0;
1602 break;
1604 case NEG:
1605 val = - arg0;
1606 break;
1608 case ABS:
1609 val = (arg0 >= 0 ? arg0 : - arg0);
1610 break;
1612 case FFS:
1613 arg0 &= GET_MODE_MASK (mode);
1614 val = ffs_hwi (arg0);
1615 break;
1617 case CLZ:
1618 arg0 &= GET_MODE_MASK (mode);
1619 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1621 else
1622 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1623 break;
1625 case CLRSB:
1626 arg0 &= GET_MODE_MASK (mode);
1627 if (arg0 == 0)
1628 val = GET_MODE_PRECISION (mode) - 1;
1629 else if (arg0 >= 0)
1630 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1631 else if (arg0 < 0)
1632 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1633 break;
1635 case CTZ:
1636 arg0 &= GET_MODE_MASK (mode);
1637 if (arg0 == 0)
1639 /* Even if the value at zero is undefined, we have to come
1640 up with some replacement. Seems good enough. */
1641 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1642 val = GET_MODE_PRECISION (mode);
1644 else
1645 val = ctz_hwi (arg0);
1646 break;
1648 case POPCOUNT:
1649 arg0 &= GET_MODE_MASK (mode);
1650 val = 0;
1651 while (arg0)
1652 val++, arg0 &= arg0 - 1;
1653 break;
1655 case PARITY:
1656 arg0 &= GET_MODE_MASK (mode);
1657 val = 0;
1658 while (arg0)
1659 val++, arg0 &= arg0 - 1;
1660 val &= 1;
1661 break;
1663 case BSWAP:
1665 unsigned int s;
1667 val = 0;
1668 for (s = 0; s < width; s += 8)
1670 unsigned int d = width - s - 8;
1671 unsigned HOST_WIDE_INT byte;
1672 byte = (arg0 >> s) & 0xff;
1673 val |= byte << d;
1676 break;
1678 case TRUNCATE:
1679 val = arg0;
1680 break;
1682 case ZERO_EXTEND:
1683 /* When zero-extending a CONST_INT, we need to know its
1684 original mode. */
1685 gcc_assert (op_mode != VOIDmode);
1686 if (op_width == HOST_BITS_PER_WIDE_INT)
1688 /* If we were really extending the mode,
1689 we would have to distinguish between zero-extension
1690 and sign-extension. */
1691 gcc_assert (width == op_width);
1692 val = arg0;
1694 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1695 val = arg0 & GET_MODE_MASK (op_mode);
1696 else
1697 return 0;
1698 break;
1700 case SIGN_EXTEND:
1701 if (op_mode == VOIDmode)
1702 op_mode = mode;
1703 op_width = GET_MODE_PRECISION (op_mode);
1704 if (op_width == HOST_BITS_PER_WIDE_INT)
1706 /* If we were really extending the mode,
1707 we would have to distinguish between zero-extension
1708 and sign-extension. */
1709 gcc_assert (width == op_width);
1710 val = arg0;
1712 else if (op_width < HOST_BITS_PER_WIDE_INT)
1714 val = arg0 & GET_MODE_MASK (op_mode);
1715 if (val_signbit_known_set_p (op_mode, val))
1716 val |= ~GET_MODE_MASK (op_mode);
1718 else
1719 return 0;
1720 break;
1722 case SQRT:
1723 case FLOAT_EXTEND:
1724 case FLOAT_TRUNCATE:
1725 case SS_TRUNCATE:
1726 case US_TRUNCATE:
1727 case SS_NEG:
1728 case US_NEG:
1729 case SS_ABS:
1730 return 0;
1732 default:
1733 gcc_unreachable ();
1736 return gen_int_mode (val, mode);
1739 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1740 for a DImode operation on a CONST_INT. */
1741 else if (width <= HOST_BITS_PER_DOUBLE_INT
1742 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1744 double_int first, value;
1746 if (CONST_DOUBLE_AS_INT_P (op))
1747 first = double_int::from_pair (CONST_DOUBLE_HIGH (op),
1748 CONST_DOUBLE_LOW (op));
1749 else
1750 first = double_int::from_shwi (INTVAL (op));
1752 switch (code)
1754 case NOT:
1755 value = ~first;
1756 break;
1758 case NEG:
1759 value = -first;
1760 break;
1762 case ABS:
1763 if (first.is_negative ())
1764 value = -first;
1765 else
1766 value = first;
1767 break;
1769 case FFS:
1770 value.high = 0;
1771 if (first.low != 0)
1772 value.low = ffs_hwi (first.low);
1773 else if (first.high != 0)
1774 value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high);
1775 else
1776 value.low = 0;
1777 break;
1779 case CLZ:
1780 value.high = 0;
1781 if (first.high != 0)
1782 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1
1783 - HOST_BITS_PER_WIDE_INT;
1784 else if (first.low != 0)
1785 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1;
1786 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1787 value.low = GET_MODE_PRECISION (mode);
1788 break;
1790 case CTZ:
1791 value.high = 0;
1792 if (first.low != 0)
1793 value.low = ctz_hwi (first.low);
1794 else if (first.high != 0)
1795 value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high);
1796 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1797 value.low = GET_MODE_PRECISION (mode);
1798 break;
1800 case POPCOUNT:
1801 value = double_int_zero;
1802 while (first.low)
1804 value.low++;
1805 first.low &= first.low - 1;
1807 while (first.high)
1809 value.low++;
1810 first.high &= first.high - 1;
1812 break;
1814 case PARITY:
1815 value = double_int_zero;
1816 while (first.low)
1818 value.low++;
1819 first.low &= first.low - 1;
1821 while (first.high)
1823 value.low++;
1824 first.high &= first.high - 1;
1826 value.low &= 1;
1827 break;
1829 case BSWAP:
1831 unsigned int s;
1833 value = double_int_zero;
1834 for (s = 0; s < width; s += 8)
1836 unsigned int d = width - s - 8;
1837 unsigned HOST_WIDE_INT byte;
1839 if (s < HOST_BITS_PER_WIDE_INT)
1840 byte = (first.low >> s) & 0xff;
1841 else
1842 byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1844 if (d < HOST_BITS_PER_WIDE_INT)
1845 value.low |= byte << d;
1846 else
1847 value.high |= byte << (d - HOST_BITS_PER_WIDE_INT);
1850 break;
1852 case TRUNCATE:
1853 /* This is just a change-of-mode, so do nothing. */
1854 value = first;
1855 break;
1857 case ZERO_EXTEND:
1858 gcc_assert (op_mode != VOIDmode);
1860 if (op_width > HOST_BITS_PER_WIDE_INT)
1861 return 0;
1863 value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode));
1864 break;
1866 case SIGN_EXTEND:
1867 if (op_mode == VOIDmode
1868 || op_width > HOST_BITS_PER_WIDE_INT)
1869 return 0;
1870 else
1872 value.low = first.low & GET_MODE_MASK (op_mode);
1873 if (val_signbit_known_set_p (op_mode, value.low))
1874 value.low |= ~GET_MODE_MASK (op_mode);
1876 value.high = HWI_SIGN_EXTEND (value.low);
1878 break;
1880 case SQRT:
1881 return 0;
1883 default:
1884 return 0;
1887 return immed_double_int_const (value, mode);
1890 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1891 && SCALAR_FLOAT_MODE_P (mode)
1892 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1894 REAL_VALUE_TYPE d, t;
1895 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1897 switch (code)
1899 case SQRT:
1900 if (HONOR_SNANS (mode) && real_isnan (&d))
1901 return 0;
1902 real_sqrt (&t, mode, &d);
1903 d = t;
1904 break;
1905 case ABS:
1906 d = real_value_abs (&d);
1907 break;
1908 case NEG:
1909 d = real_value_negate (&d);
1910 break;
1911 case FLOAT_TRUNCATE:
1912 d = real_value_truncate (mode, d);
1913 break;
1914 case FLOAT_EXTEND:
1915 /* All this does is change the mode, unless changing
1916 mode class. */
1917 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1918 real_convert (&d, mode, &d);
1919 break;
1920 case FIX:
1921 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1922 break;
1923 case NOT:
1925 long tmp[4];
1926 int i;
1928 real_to_target (tmp, &d, GET_MODE (op));
1929 for (i = 0; i < 4; i++)
1930 tmp[i] = ~tmp[i];
1931 real_from_target (&d, tmp, mode);
1932 break;
1934 default:
1935 gcc_unreachable ();
1937 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1940 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1941 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1942 && GET_MODE_CLASS (mode) == MODE_INT
1943 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1945 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1946 operators are intentionally left unspecified (to ease implementation
1947 by target backends), for consistency, this routine implements the
1948 same semantics for constant folding as used by the middle-end. */
1950 /* This was formerly used only for non-IEEE float.
1951 eggert@twinsun.com says it is safe for IEEE also. */
1952 HOST_WIDE_INT xh, xl, th, tl;
1953 REAL_VALUE_TYPE x, t;
1954 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1955 switch (code)
1957 case FIX:
1958 if (REAL_VALUE_ISNAN (x))
1959 return const0_rtx;
1961 /* Test against the signed upper bound. */
1962 if (width > HOST_BITS_PER_WIDE_INT)
1964 th = ((unsigned HOST_WIDE_INT) 1
1965 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1966 tl = -1;
1968 else
1970 th = 0;
1971 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1973 real_from_integer (&t, VOIDmode, tl, th, 0);
1974 if (REAL_VALUES_LESS (t, x))
1976 xh = th;
1977 xl = tl;
1978 break;
1981 /* Test against the signed lower bound. */
1982 if (width > HOST_BITS_PER_WIDE_INT)
1984 th = (unsigned HOST_WIDE_INT) (-1)
1985 << (width - HOST_BITS_PER_WIDE_INT - 1);
1986 tl = 0;
1988 else
1990 th = -1;
1991 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1993 real_from_integer (&t, VOIDmode, tl, th, 0);
1994 if (REAL_VALUES_LESS (x, t))
1996 xh = th;
1997 xl = tl;
1998 break;
2000 REAL_VALUE_TO_INT (&xl, &xh, x);
2001 break;
2003 case UNSIGNED_FIX:
2004 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
2005 return const0_rtx;
2007 /* Test against the unsigned upper bound. */
2008 if (width == HOST_BITS_PER_DOUBLE_INT)
2010 th = -1;
2011 tl = -1;
2013 else if (width >= HOST_BITS_PER_WIDE_INT)
2015 th = ((unsigned HOST_WIDE_INT) 1
2016 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
2017 tl = -1;
2019 else
2021 th = 0;
2022 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
2024 real_from_integer (&t, VOIDmode, tl, th, 1);
2025 if (REAL_VALUES_LESS (t, x))
2027 xh = th;
2028 xl = tl;
2029 break;
2032 REAL_VALUE_TO_INT (&xl, &xh, x);
2033 break;
2035 default:
2036 gcc_unreachable ();
2038 return immed_double_const (xl, xh, mode);
2041 return NULL_RTX;
2044 /* Subroutine of simplify_binary_operation to simplify a commutative,
2045 associative binary operation CODE with result mode MODE, operating
2046 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2047 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2048 canonicalization is possible. */
2050 static rtx
2051 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
2052 rtx op0, rtx op1)
2054 rtx tem;
2056 /* Linearize the operator to the left. */
2057 if (GET_CODE (op1) == code)
2059 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2060 if (GET_CODE (op0) == code)
2062 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2063 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2066 /* "a op (b op c)" becomes "(b op c) op a". */
2067 if (! swap_commutative_operands_p (op1, op0))
2068 return simplify_gen_binary (code, mode, op1, op0);
2070 tem = op0;
2071 op0 = op1;
2072 op1 = tem;
2075 if (GET_CODE (op0) == code)
2077 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2078 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2080 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2081 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2084 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2085 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2086 if (tem != 0)
2087 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2089 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2090 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2091 if (tem != 0)
2092 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2095 return 0;
2099 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2100 and OP1. Return 0 if no simplification is possible.
2102 Don't use this for relational operations such as EQ or LT.
2103 Use simplify_relational_operation instead. */
2105 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
2106 rtx op0, rtx op1)
2108 rtx trueop0, trueop1;
2109 rtx tem;
2111 /* Relational operations don't work here. We must know the mode
2112 of the operands in order to do the comparison correctly.
2113 Assuming a full word can give incorrect results.
2114 Consider comparing 128 with -128 in QImode. */
2115 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2116 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2118 /* Make sure the constant is second. */
2119 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2120 && swap_commutative_operands_p (op0, op1))
2122 tem = op0, op0 = op1, op1 = tem;
2125 trueop0 = avoid_constant_pool_reference (op0);
2126 trueop1 = avoid_constant_pool_reference (op1);
2128 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2129 if (tem)
2130 return tem;
2131 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2134 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2135 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2136 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2137 actual constants. */
2139 static rtx
2140 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
2141 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2143 rtx tem, reversed, opleft, opright;
2144 HOST_WIDE_INT val;
2145 unsigned int width = GET_MODE_PRECISION (mode);
2147 /* Even if we can't compute a constant result,
2148 there are some cases worth simplifying. */
2150 switch (code)
2152 case PLUS:
2153 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2154 when x is NaN, infinite, or finite and nonzero. They aren't
2155 when x is -0 and the rounding mode is not towards -infinity,
2156 since (-0) + 0 is then 0. */
2157 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2158 return op0;
2160 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2161 transformations are safe even for IEEE. */
2162 if (GET_CODE (op0) == NEG)
2163 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2164 else if (GET_CODE (op1) == NEG)
2165 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2167 /* (~a) + 1 -> -a */
2168 if (INTEGRAL_MODE_P (mode)
2169 && GET_CODE (op0) == NOT
2170 && trueop1 == const1_rtx)
2171 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2173 /* Handle both-operands-constant cases. We can only add
2174 CONST_INTs to constants since the sum of relocatable symbols
2175 can't be handled by most assemblers. Don't add CONST_INT
2176 to CONST_INT since overflow won't be computed properly if wider
2177 than HOST_BITS_PER_WIDE_INT. */
2179 if ((GET_CODE (op0) == CONST
2180 || GET_CODE (op0) == SYMBOL_REF
2181 || GET_CODE (op0) == LABEL_REF)
2182 && CONST_INT_P (op1))
2183 return plus_constant (mode, op0, INTVAL (op1));
2184 else if ((GET_CODE (op1) == CONST
2185 || GET_CODE (op1) == SYMBOL_REF
2186 || GET_CODE (op1) == LABEL_REF)
2187 && CONST_INT_P (op0))
2188 return plus_constant (mode, op1, INTVAL (op0));
2190 /* See if this is something like X * C - X or vice versa or
2191 if the multiplication is written as a shift. If so, we can
2192 distribute and make a new multiply, shift, or maybe just
2193 have X (if C is 2 in the example above). But don't make
2194 something more expensive than we had before. */
2196 if (SCALAR_INT_MODE_P (mode))
2198 double_int coeff0, coeff1;
2199 rtx lhs = op0, rhs = op1;
2201 coeff0 = double_int_one;
2202 coeff1 = double_int_one;
2204 if (GET_CODE (lhs) == NEG)
2206 coeff0 = double_int_minus_one;
2207 lhs = XEXP (lhs, 0);
2209 else if (GET_CODE (lhs) == MULT
2210 && CONST_INT_P (XEXP (lhs, 1)))
2212 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2213 lhs = XEXP (lhs, 0);
2215 else if (GET_CODE (lhs) == ASHIFT
2216 && CONST_INT_P (XEXP (lhs, 1))
2217 && INTVAL (XEXP (lhs, 1)) >= 0
2218 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2220 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2221 lhs = XEXP (lhs, 0);
2224 if (GET_CODE (rhs) == NEG)
2226 coeff1 = double_int_minus_one;
2227 rhs = XEXP (rhs, 0);
2229 else if (GET_CODE (rhs) == MULT
2230 && CONST_INT_P (XEXP (rhs, 1)))
2232 coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
2233 rhs = XEXP (rhs, 0);
2235 else if (GET_CODE (rhs) == ASHIFT
2236 && CONST_INT_P (XEXP (rhs, 1))
2237 && INTVAL (XEXP (rhs, 1)) >= 0
2238 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2240 coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2241 rhs = XEXP (rhs, 0);
2244 if (rtx_equal_p (lhs, rhs))
2246 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2247 rtx coeff;
2248 double_int val;
2249 bool speed = optimize_function_for_speed_p (cfun);
2251 val = coeff0 + coeff1;
2252 coeff = immed_double_int_const (val, mode);
2254 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2255 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2256 ? tem : 0;
2260 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2261 if (CONST_SCALAR_INT_P (op1)
2262 && GET_CODE (op0) == XOR
2263 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2264 && mode_signbit_p (mode, op1))
2265 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2266 simplify_gen_binary (XOR, mode, op1,
2267 XEXP (op0, 1)));
2269 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2270 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2271 && GET_CODE (op0) == MULT
2272 && GET_CODE (XEXP (op0, 0)) == NEG)
2274 rtx in1, in2;
2276 in1 = XEXP (XEXP (op0, 0), 0);
2277 in2 = XEXP (op0, 1);
2278 return simplify_gen_binary (MINUS, mode, op1,
2279 simplify_gen_binary (MULT, mode,
2280 in1, in2));
2283 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2284 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2285 is 1. */
2286 if (COMPARISON_P (op0)
2287 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2288 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2289 && (reversed = reversed_comparison (op0, mode)))
2290 return
2291 simplify_gen_unary (NEG, mode, reversed, mode);
2293 /* If one of the operands is a PLUS or a MINUS, see if we can
2294 simplify this by the associative law.
2295 Don't use the associative law for floating point.
2296 The inaccuracy makes it nonassociative,
2297 and subtle programs can break if operations are associated. */
2299 if (INTEGRAL_MODE_P (mode)
2300 && (plus_minus_operand_p (op0)
2301 || plus_minus_operand_p (op1))
2302 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2303 return tem;
2305 /* Reassociate floating point addition only when the user
2306 specifies associative math operations. */
2307 if (FLOAT_MODE_P (mode)
2308 && flag_associative_math)
2310 tem = simplify_associative_operation (code, mode, op0, op1);
2311 if (tem)
2312 return tem;
2314 break;
2316 case COMPARE:
2317 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2318 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2319 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2320 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2322 rtx xop00 = XEXP (op0, 0);
2323 rtx xop10 = XEXP (op1, 0);
2325 #ifdef HAVE_cc0
2326 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2327 #else
2328 if (REG_P (xop00) && REG_P (xop10)
2329 && GET_MODE (xop00) == GET_MODE (xop10)
2330 && REGNO (xop00) == REGNO (xop10)
2331 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2332 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2333 #endif
2334 return xop00;
2336 break;
2338 case MINUS:
2339 /* We can't assume x-x is 0 even with non-IEEE floating point,
2340 but since it is zero except in very strange circumstances, we
2341 will treat it as zero with -ffinite-math-only. */
2342 if (rtx_equal_p (trueop0, trueop1)
2343 && ! side_effects_p (op0)
2344 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2345 return CONST0_RTX (mode);
2347 /* Change subtraction from zero into negation. (0 - x) is the
2348 same as -x when x is NaN, infinite, or finite and nonzero.
2349 But if the mode has signed zeros, and does not round towards
2350 -infinity, then 0 - 0 is 0, not -0. */
2351 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2352 return simplify_gen_unary (NEG, mode, op1, mode);
2354 /* (-1 - a) is ~a. */
2355 if (trueop0 == constm1_rtx)
2356 return simplify_gen_unary (NOT, mode, op1, mode);
2358 /* Subtracting 0 has no effect unless the mode has signed zeros
2359 and supports rounding towards -infinity. In such a case,
2360 0 - 0 is -0. */
2361 if (!(HONOR_SIGNED_ZEROS (mode)
2362 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2363 && trueop1 == CONST0_RTX (mode))
2364 return op0;
2366 /* See if this is something like X * C - X or vice versa or
2367 if the multiplication is written as a shift. If so, we can
2368 distribute and make a new multiply, shift, or maybe just
2369 have X (if C is 2 in the example above). But don't make
2370 something more expensive than we had before. */
2372 if (SCALAR_INT_MODE_P (mode))
2374 double_int coeff0, negcoeff1;
2375 rtx lhs = op0, rhs = op1;
2377 coeff0 = double_int_one;
2378 negcoeff1 = double_int_minus_one;
2380 if (GET_CODE (lhs) == NEG)
2382 coeff0 = double_int_minus_one;
2383 lhs = XEXP (lhs, 0);
2385 else if (GET_CODE (lhs) == MULT
2386 && CONST_INT_P (XEXP (lhs, 1)))
2388 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2389 lhs = XEXP (lhs, 0);
2391 else if (GET_CODE (lhs) == ASHIFT
2392 && CONST_INT_P (XEXP (lhs, 1))
2393 && INTVAL (XEXP (lhs, 1)) >= 0
2394 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2396 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2397 lhs = XEXP (lhs, 0);
2400 if (GET_CODE (rhs) == NEG)
2402 negcoeff1 = double_int_one;
2403 rhs = XEXP (rhs, 0);
2405 else if (GET_CODE (rhs) == MULT
2406 && CONST_INT_P (XEXP (rhs, 1)))
2408 negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
2409 rhs = XEXP (rhs, 0);
2411 else if (GET_CODE (rhs) == ASHIFT
2412 && CONST_INT_P (XEXP (rhs, 1))
2413 && INTVAL (XEXP (rhs, 1)) >= 0
2414 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2416 negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2417 negcoeff1 = -negcoeff1;
2418 rhs = XEXP (rhs, 0);
2421 if (rtx_equal_p (lhs, rhs))
2423 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2424 rtx coeff;
2425 double_int val;
2426 bool speed = optimize_function_for_speed_p (cfun);
2428 val = coeff0 + negcoeff1;
2429 coeff = immed_double_int_const (val, mode);
2431 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2432 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2433 ? tem : 0;
2437 /* (a - (-b)) -> (a + b). True even for IEEE. */
2438 if (GET_CODE (op1) == NEG)
2439 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2441 /* (-x - c) may be simplified as (-c - x). */
2442 if (GET_CODE (op0) == NEG
2443 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2445 tem = simplify_unary_operation (NEG, mode, op1, mode);
2446 if (tem)
2447 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2450 /* Don't let a relocatable value get a negative coeff. */
2451 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2452 return simplify_gen_binary (PLUS, mode,
2453 op0,
2454 neg_const_int (mode, op1));
2456 /* (x - (x & y)) -> (x & ~y) */
2457 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2459 if (rtx_equal_p (op0, XEXP (op1, 0)))
2461 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2462 GET_MODE (XEXP (op1, 1)));
2463 return simplify_gen_binary (AND, mode, op0, tem);
2465 if (rtx_equal_p (op0, XEXP (op1, 1)))
2467 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2468 GET_MODE (XEXP (op1, 0)));
2469 return simplify_gen_binary (AND, mode, op0, tem);
2473 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2474 by reversing the comparison code if valid. */
2475 if (STORE_FLAG_VALUE == 1
2476 && trueop0 == const1_rtx
2477 && COMPARISON_P (op1)
2478 && (reversed = reversed_comparison (op1, mode)))
2479 return reversed;
2481 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2482 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2483 && GET_CODE (op1) == MULT
2484 && GET_CODE (XEXP (op1, 0)) == NEG)
2486 rtx in1, in2;
2488 in1 = XEXP (XEXP (op1, 0), 0);
2489 in2 = XEXP (op1, 1);
2490 return simplify_gen_binary (PLUS, mode,
2491 simplify_gen_binary (MULT, mode,
2492 in1, in2),
2493 op0);
2496 /* Canonicalize (minus (neg A) (mult B C)) to
2497 (minus (mult (neg B) C) A). */
2498 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2499 && GET_CODE (op1) == MULT
2500 && GET_CODE (op0) == NEG)
2502 rtx in1, in2;
2504 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2505 in2 = XEXP (op1, 1);
2506 return simplify_gen_binary (MINUS, mode,
2507 simplify_gen_binary (MULT, mode,
2508 in1, in2),
2509 XEXP (op0, 0));
2512 /* If one of the operands is a PLUS or a MINUS, see if we can
2513 simplify this by the associative law. This will, for example,
2514 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2515 Don't use the associative law for floating point.
2516 The inaccuracy makes it nonassociative,
2517 and subtle programs can break if operations are associated. */
2519 if (INTEGRAL_MODE_P (mode)
2520 && (plus_minus_operand_p (op0)
2521 || plus_minus_operand_p (op1))
2522 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2523 return tem;
2524 break;
2526 case MULT:
2527 if (trueop1 == constm1_rtx)
2528 return simplify_gen_unary (NEG, mode, op0, mode);
2530 if (GET_CODE (op0) == NEG)
2532 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2533 /* If op1 is a MULT as well and simplify_unary_operation
2534 just moved the NEG to the second operand, simplify_gen_binary
2535 below could through simplify_associative_operation move
2536 the NEG around again and recurse endlessly. */
2537 if (temp
2538 && GET_CODE (op1) == MULT
2539 && GET_CODE (temp) == MULT
2540 && XEXP (op1, 0) == XEXP (temp, 0)
2541 && GET_CODE (XEXP (temp, 1)) == NEG
2542 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2543 temp = NULL_RTX;
2544 if (temp)
2545 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2547 if (GET_CODE (op1) == NEG)
2549 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2550 /* If op0 is a MULT as well and simplify_unary_operation
2551 just moved the NEG to the second operand, simplify_gen_binary
2552 below could through simplify_associative_operation move
2553 the NEG around again and recurse endlessly. */
2554 if (temp
2555 && GET_CODE (op0) == MULT
2556 && GET_CODE (temp) == MULT
2557 && XEXP (op0, 0) == XEXP (temp, 0)
2558 && GET_CODE (XEXP (temp, 1)) == NEG
2559 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2560 temp = NULL_RTX;
2561 if (temp)
2562 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2565 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2566 x is NaN, since x * 0 is then also NaN. Nor is it valid
2567 when the mode has signed zeros, since multiplying a negative
2568 number by 0 will give -0, not 0. */
2569 if (!HONOR_NANS (mode)
2570 && !HONOR_SIGNED_ZEROS (mode)
2571 && trueop1 == CONST0_RTX (mode)
2572 && ! side_effects_p (op0))
2573 return op1;
2575 /* In IEEE floating point, x*1 is not equivalent to x for
2576 signalling NaNs. */
2577 if (!HONOR_SNANS (mode)
2578 && trueop1 == CONST1_RTX (mode))
2579 return op0;
2581 /* Convert multiply by constant power of two into shift unless
2582 we are still generating RTL. This test is a kludge. */
2583 if (CONST_INT_P (trueop1)
2584 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2585 /* If the mode is larger than the host word size, and the
2586 uppermost bit is set, then this isn't a power of two due
2587 to implicit sign extension. */
2588 && (width <= HOST_BITS_PER_WIDE_INT
2589 || val != HOST_BITS_PER_WIDE_INT - 1))
2590 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2592 /* Likewise for multipliers wider than a word. */
2593 if (CONST_DOUBLE_AS_INT_P (trueop1)
2594 && GET_MODE (op0) == mode
2595 && CONST_DOUBLE_LOW (trueop1) == 0
2596 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2597 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2598 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2599 return simplify_gen_binary (ASHIFT, mode, op0,
2600 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2602 /* x*2 is x+x and x*(-1) is -x */
2603 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2604 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2605 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2606 && GET_MODE (op0) == mode)
2608 REAL_VALUE_TYPE d;
2609 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2611 if (REAL_VALUES_EQUAL (d, dconst2))
2612 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2614 if (!HONOR_SNANS (mode)
2615 && REAL_VALUES_EQUAL (d, dconstm1))
2616 return simplify_gen_unary (NEG, mode, op0, mode);
2619 /* Optimize -x * -x as x * x. */
2620 if (FLOAT_MODE_P (mode)
2621 && GET_CODE (op0) == NEG
2622 && GET_CODE (op1) == NEG
2623 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2624 && !side_effects_p (XEXP (op0, 0)))
2625 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2627 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2628 if (SCALAR_FLOAT_MODE_P (mode)
2629 && GET_CODE (op0) == ABS
2630 && GET_CODE (op1) == ABS
2631 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2632 && !side_effects_p (XEXP (op0, 0)))
2633 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2635 /* Reassociate multiplication, but for floating point MULTs
2636 only when the user specifies unsafe math optimizations. */
2637 if (! FLOAT_MODE_P (mode)
2638 || flag_unsafe_math_optimizations)
2640 tem = simplify_associative_operation (code, mode, op0, op1);
2641 if (tem)
2642 return tem;
2644 break;
2646 case IOR:
2647 if (trueop1 == CONST0_RTX (mode))
2648 return op0;
2649 if (INTEGRAL_MODE_P (mode)
2650 && trueop1 == CONSTM1_RTX (mode)
2651 && !side_effects_p (op0))
2652 return op1;
2653 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2654 return op0;
2655 /* A | (~A) -> -1 */
2656 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2657 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2658 && ! side_effects_p (op0)
2659 && SCALAR_INT_MODE_P (mode))
2660 return constm1_rtx;
2662 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2663 if (CONST_INT_P (op1)
2664 && HWI_COMPUTABLE_MODE_P (mode)
2665 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2666 && !side_effects_p (op0))
2667 return op1;
2669 /* Canonicalize (X & C1) | C2. */
2670 if (GET_CODE (op0) == AND
2671 && CONST_INT_P (trueop1)
2672 && CONST_INT_P (XEXP (op0, 1)))
2674 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2675 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2676 HOST_WIDE_INT c2 = INTVAL (trueop1);
2678 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2679 if ((c1 & c2) == c1
2680 && !side_effects_p (XEXP (op0, 0)))
2681 return trueop1;
2683 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2684 if (((c1|c2) & mask) == mask)
2685 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2687 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2688 if (((c1 & ~c2) & mask) != (c1 & mask))
2690 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2691 gen_int_mode (c1 & ~c2, mode));
2692 return simplify_gen_binary (IOR, mode, tem, op1);
2696 /* Convert (A & B) | A to A. */
2697 if (GET_CODE (op0) == AND
2698 && (rtx_equal_p (XEXP (op0, 0), op1)
2699 || rtx_equal_p (XEXP (op0, 1), op1))
2700 && ! side_effects_p (XEXP (op0, 0))
2701 && ! side_effects_p (XEXP (op0, 1)))
2702 return op1;
2704 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2705 mode size to (rotate A CX). */
2707 if (GET_CODE (op1) == ASHIFT
2708 || GET_CODE (op1) == SUBREG)
2710 opleft = op1;
2711 opright = op0;
2713 else
2715 opright = op1;
2716 opleft = op0;
2719 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2720 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2721 && CONST_INT_P (XEXP (opleft, 1))
2722 && CONST_INT_P (XEXP (opright, 1))
2723 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2724 == GET_MODE_PRECISION (mode)))
2725 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2727 /* Same, but for ashift that has been "simplified" to a wider mode
2728 by simplify_shift_const. */
2730 if (GET_CODE (opleft) == SUBREG
2731 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2732 && GET_CODE (opright) == LSHIFTRT
2733 && GET_CODE (XEXP (opright, 0)) == SUBREG
2734 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2735 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2736 && (GET_MODE_SIZE (GET_MODE (opleft))
2737 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2738 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2739 SUBREG_REG (XEXP (opright, 0)))
2740 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2741 && CONST_INT_P (XEXP (opright, 1))
2742 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2743 == GET_MODE_PRECISION (mode)))
2744 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2745 XEXP (SUBREG_REG (opleft), 1));
2747 /* If we have (ior (and (X C1) C2)), simplify this by making
2748 C1 as small as possible if C1 actually changes. */
2749 if (CONST_INT_P (op1)
2750 && (HWI_COMPUTABLE_MODE_P (mode)
2751 || INTVAL (op1) > 0)
2752 && GET_CODE (op0) == AND
2753 && CONST_INT_P (XEXP (op0, 1))
2754 && CONST_INT_P (op1)
2755 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2756 return simplify_gen_binary (IOR, mode,
2757 simplify_gen_binary
2758 (AND, mode, XEXP (op0, 0),
2759 GEN_INT (UINTVAL (XEXP (op0, 1))
2760 & ~UINTVAL (op1))),
2761 op1);
2763 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2764 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2765 the PLUS does not affect any of the bits in OP1: then we can do
2766 the IOR as a PLUS and we can associate. This is valid if OP1
2767 can be safely shifted left C bits. */
2768 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2769 && GET_CODE (XEXP (op0, 0)) == PLUS
2770 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2771 && CONST_INT_P (XEXP (op0, 1))
2772 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2774 int count = INTVAL (XEXP (op0, 1));
2775 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2777 if (mask >> count == INTVAL (trueop1)
2778 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2779 return simplify_gen_binary (ASHIFTRT, mode,
2780 plus_constant (mode, XEXP (op0, 0),
2781 mask),
2782 XEXP (op0, 1));
2785 tem = simplify_associative_operation (code, mode, op0, op1);
2786 if (tem)
2787 return tem;
2788 break;
2790 case XOR:
2791 if (trueop1 == CONST0_RTX (mode))
2792 return op0;
2793 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2794 return simplify_gen_unary (NOT, mode, op0, mode);
2795 if (rtx_equal_p (trueop0, trueop1)
2796 && ! side_effects_p (op0)
2797 && GET_MODE_CLASS (mode) != MODE_CC)
2798 return CONST0_RTX (mode);
2800 /* Canonicalize XOR of the most significant bit to PLUS. */
2801 if (CONST_SCALAR_INT_P (op1)
2802 && mode_signbit_p (mode, op1))
2803 return simplify_gen_binary (PLUS, mode, op0, op1);
2804 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2805 if (CONST_SCALAR_INT_P (op1)
2806 && GET_CODE (op0) == PLUS
2807 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2808 && mode_signbit_p (mode, XEXP (op0, 1)))
2809 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2810 simplify_gen_binary (XOR, mode, op1,
2811 XEXP (op0, 1)));
2813 /* If we are XORing two things that have no bits in common,
2814 convert them into an IOR. This helps to detect rotation encoded
2815 using those methods and possibly other simplifications. */
2817 if (HWI_COMPUTABLE_MODE_P (mode)
2818 && (nonzero_bits (op0, mode)
2819 & nonzero_bits (op1, mode)) == 0)
2820 return (simplify_gen_binary (IOR, mode, op0, op1));
2822 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2823 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2824 (NOT y). */
2826 int num_negated = 0;
2828 if (GET_CODE (op0) == NOT)
2829 num_negated++, op0 = XEXP (op0, 0);
2830 if (GET_CODE (op1) == NOT)
2831 num_negated++, op1 = XEXP (op1, 0);
2833 if (num_negated == 2)
2834 return simplify_gen_binary (XOR, mode, op0, op1);
2835 else if (num_negated == 1)
2836 return simplify_gen_unary (NOT, mode,
2837 simplify_gen_binary (XOR, mode, op0, op1),
2838 mode);
2841 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2842 correspond to a machine insn or result in further simplifications
2843 if B is a constant. */
2845 if (GET_CODE (op0) == AND
2846 && rtx_equal_p (XEXP (op0, 1), op1)
2847 && ! side_effects_p (op1))
2848 return simplify_gen_binary (AND, mode,
2849 simplify_gen_unary (NOT, mode,
2850 XEXP (op0, 0), mode),
2851 op1);
2853 else if (GET_CODE (op0) == AND
2854 && rtx_equal_p (XEXP (op0, 0), op1)
2855 && ! side_effects_p (op1))
2856 return simplify_gen_binary (AND, mode,
2857 simplify_gen_unary (NOT, mode,
2858 XEXP (op0, 1), mode),
2859 op1);
2861 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2862 we can transform like this:
2863 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2864 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2865 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2866 Attempt a few simplifications when B and C are both constants. */
2867 if (GET_CODE (op0) == AND
2868 && CONST_INT_P (op1)
2869 && CONST_INT_P (XEXP (op0, 1)))
2871 rtx a = XEXP (op0, 0);
2872 rtx b = XEXP (op0, 1);
2873 rtx c = op1;
2874 HOST_WIDE_INT bval = INTVAL (b);
2875 HOST_WIDE_INT cval = INTVAL (c);
2877 rtx na_c
2878 = simplify_binary_operation (AND, mode,
2879 simplify_gen_unary (NOT, mode, a, mode),
2881 if ((~cval & bval) == 0)
2883 /* Try to simplify ~A&C | ~B&C. */
2884 if (na_c != NULL_RTX)
2885 return simplify_gen_binary (IOR, mode, na_c,
2886 GEN_INT (~bval & cval));
2888 else
2890 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2891 if (na_c == const0_rtx)
2893 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2894 GEN_INT (~cval & bval));
2895 return simplify_gen_binary (IOR, mode, a_nc_b,
2896 GEN_INT (~bval & cval));
2901 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2902 comparison if STORE_FLAG_VALUE is 1. */
2903 if (STORE_FLAG_VALUE == 1
2904 && trueop1 == const1_rtx
2905 && COMPARISON_P (op0)
2906 && (reversed = reversed_comparison (op0, mode)))
2907 return reversed;
2909 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2910 is (lt foo (const_int 0)), so we can perform the above
2911 simplification if STORE_FLAG_VALUE is 1. */
2913 if (STORE_FLAG_VALUE == 1
2914 && trueop1 == const1_rtx
2915 && GET_CODE (op0) == LSHIFTRT
2916 && CONST_INT_P (XEXP (op0, 1))
2917 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2918 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2920 /* (xor (comparison foo bar) (const_int sign-bit))
2921 when STORE_FLAG_VALUE is the sign bit. */
2922 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2923 && trueop1 == const_true_rtx
2924 && COMPARISON_P (op0)
2925 && (reversed = reversed_comparison (op0, mode)))
2926 return reversed;
2928 tem = simplify_associative_operation (code, mode, op0, op1);
2929 if (tem)
2930 return tem;
2931 break;
2933 case AND:
2934 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2935 return trueop1;
2936 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2937 return op0;
2938 if (HWI_COMPUTABLE_MODE_P (mode))
2940 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2941 HOST_WIDE_INT nzop1;
2942 if (CONST_INT_P (trueop1))
2944 HOST_WIDE_INT val1 = INTVAL (trueop1);
2945 /* If we are turning off bits already known off in OP0, we need
2946 not do an AND. */
2947 if ((nzop0 & ~val1) == 0)
2948 return op0;
2950 nzop1 = nonzero_bits (trueop1, mode);
2951 /* If we are clearing all the nonzero bits, the result is zero. */
2952 if ((nzop1 & nzop0) == 0
2953 && !side_effects_p (op0) && !side_effects_p (op1))
2954 return CONST0_RTX (mode);
2956 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2957 && GET_MODE_CLASS (mode) != MODE_CC)
2958 return op0;
2959 /* A & (~A) -> 0 */
2960 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2961 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2962 && ! side_effects_p (op0)
2963 && GET_MODE_CLASS (mode) != MODE_CC)
2964 return CONST0_RTX (mode);
2966 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2967 there are no nonzero bits of C outside of X's mode. */
2968 if ((GET_CODE (op0) == SIGN_EXTEND
2969 || GET_CODE (op0) == ZERO_EXTEND)
2970 && CONST_INT_P (trueop1)
2971 && HWI_COMPUTABLE_MODE_P (mode)
2972 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2973 & UINTVAL (trueop1)) == 0)
2975 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2976 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2977 gen_int_mode (INTVAL (trueop1),
2978 imode));
2979 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2982 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2983 we might be able to further simplify the AND with X and potentially
2984 remove the truncation altogether. */
2985 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2987 rtx x = XEXP (op0, 0);
2988 enum machine_mode xmode = GET_MODE (x);
2989 tem = simplify_gen_binary (AND, xmode, x,
2990 gen_int_mode (INTVAL (trueop1), xmode));
2991 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2994 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2995 if (GET_CODE (op0) == IOR
2996 && CONST_INT_P (trueop1)
2997 && CONST_INT_P (XEXP (op0, 1)))
2999 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3000 return simplify_gen_binary (IOR, mode,
3001 simplify_gen_binary (AND, mode,
3002 XEXP (op0, 0), op1),
3003 gen_int_mode (tmp, mode));
3006 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3007 insn (and may simplify more). */
3008 if (GET_CODE (op0) == XOR
3009 && rtx_equal_p (XEXP (op0, 0), op1)
3010 && ! side_effects_p (op1))
3011 return simplify_gen_binary (AND, mode,
3012 simplify_gen_unary (NOT, mode,
3013 XEXP (op0, 1), mode),
3014 op1);
3016 if (GET_CODE (op0) == XOR
3017 && rtx_equal_p (XEXP (op0, 1), op1)
3018 && ! side_effects_p (op1))
3019 return simplify_gen_binary (AND, mode,
3020 simplify_gen_unary (NOT, mode,
3021 XEXP (op0, 0), mode),
3022 op1);
3024 /* Similarly for (~(A ^ B)) & A. */
3025 if (GET_CODE (op0) == NOT
3026 && GET_CODE (XEXP (op0, 0)) == XOR
3027 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3028 && ! side_effects_p (op1))
3029 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3031 if (GET_CODE (op0) == NOT
3032 && GET_CODE (XEXP (op0, 0)) == XOR
3033 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3034 && ! side_effects_p (op1))
3035 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3037 /* Convert (A | B) & A to A. */
3038 if (GET_CODE (op0) == IOR
3039 && (rtx_equal_p (XEXP (op0, 0), op1)
3040 || rtx_equal_p (XEXP (op0, 1), op1))
3041 && ! side_effects_p (XEXP (op0, 0))
3042 && ! side_effects_p (XEXP (op0, 1)))
3043 return op1;
3045 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3046 ((A & N) + B) & M -> (A + B) & M
3047 Similarly if (N & M) == 0,
3048 ((A | N) + B) & M -> (A + B) & M
3049 and for - instead of + and/or ^ instead of |.
3050 Also, if (N & M) == 0, then
3051 (A +- N) & M -> A & M. */
3052 if (CONST_INT_P (trueop1)
3053 && HWI_COMPUTABLE_MODE_P (mode)
3054 && ~UINTVAL (trueop1)
3055 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3056 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3058 rtx pmop[2];
3059 int which;
3061 pmop[0] = XEXP (op0, 0);
3062 pmop[1] = XEXP (op0, 1);
3064 if (CONST_INT_P (pmop[1])
3065 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3066 return simplify_gen_binary (AND, mode, pmop[0], op1);
3068 for (which = 0; which < 2; which++)
3070 tem = pmop[which];
3071 switch (GET_CODE (tem))
3073 case AND:
3074 if (CONST_INT_P (XEXP (tem, 1))
3075 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3076 == UINTVAL (trueop1))
3077 pmop[which] = XEXP (tem, 0);
3078 break;
3079 case IOR:
3080 case XOR:
3081 if (CONST_INT_P (XEXP (tem, 1))
3082 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3083 pmop[which] = XEXP (tem, 0);
3084 break;
3085 default:
3086 break;
3090 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3092 tem = simplify_gen_binary (GET_CODE (op0), mode,
3093 pmop[0], pmop[1]);
3094 return simplify_gen_binary (code, mode, tem, op1);
3098 /* (and X (ior (not X) Y) -> (and X Y) */
3099 if (GET_CODE (op1) == IOR
3100 && GET_CODE (XEXP (op1, 0)) == NOT
3101 && op0 == XEXP (XEXP (op1, 0), 0))
3102 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3104 /* (and (ior (not X) Y) X) -> (and X Y) */
3105 if (GET_CODE (op0) == IOR
3106 && GET_CODE (XEXP (op0, 0)) == NOT
3107 && op1 == XEXP (XEXP (op0, 0), 0))
3108 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3110 tem = simplify_associative_operation (code, mode, op0, op1);
3111 if (tem)
3112 return tem;
3113 break;
3115 case UDIV:
3116 /* 0/x is 0 (or x&0 if x has side-effects). */
3117 if (trueop0 == CONST0_RTX (mode))
3119 if (side_effects_p (op1))
3120 return simplify_gen_binary (AND, mode, op1, trueop0);
3121 return trueop0;
3123 /* x/1 is x. */
3124 if (trueop1 == CONST1_RTX (mode))
3126 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3127 if (tem)
3128 return tem;
3130 /* Convert divide by power of two into shift. */
3131 if (CONST_INT_P (trueop1)
3132 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3133 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3134 break;
3136 case DIV:
3137 /* Handle floating point and integers separately. */
3138 if (SCALAR_FLOAT_MODE_P (mode))
3140 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3141 safe for modes with NaNs, since 0.0 / 0.0 will then be
3142 NaN rather than 0.0. Nor is it safe for modes with signed
3143 zeros, since dividing 0 by a negative number gives -0.0 */
3144 if (trueop0 == CONST0_RTX (mode)
3145 && !HONOR_NANS (mode)
3146 && !HONOR_SIGNED_ZEROS (mode)
3147 && ! side_effects_p (op1))
3148 return op0;
3149 /* x/1.0 is x. */
3150 if (trueop1 == CONST1_RTX (mode)
3151 && !HONOR_SNANS (mode))
3152 return op0;
3154 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3155 && trueop1 != CONST0_RTX (mode))
3157 REAL_VALUE_TYPE d;
3158 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3160 /* x/-1.0 is -x. */
3161 if (REAL_VALUES_EQUAL (d, dconstm1)
3162 && !HONOR_SNANS (mode))
3163 return simplify_gen_unary (NEG, mode, op0, mode);
3165 /* Change FP division by a constant into multiplication.
3166 Only do this with -freciprocal-math. */
3167 if (flag_reciprocal_math
3168 && !REAL_VALUES_EQUAL (d, dconst0))
3170 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3171 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3172 return simplify_gen_binary (MULT, mode, op0, tem);
3176 else if (SCALAR_INT_MODE_P (mode))
3178 /* 0/x is 0 (or x&0 if x has side-effects). */
3179 if (trueop0 == CONST0_RTX (mode)
3180 && !cfun->can_throw_non_call_exceptions)
3182 if (side_effects_p (op1))
3183 return simplify_gen_binary (AND, mode, op1, trueop0);
3184 return trueop0;
3186 /* x/1 is x. */
3187 if (trueop1 == CONST1_RTX (mode))
3189 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3190 if (tem)
3191 return tem;
3193 /* x/-1 is -x. */
3194 if (trueop1 == constm1_rtx)
3196 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3197 if (x)
3198 return simplify_gen_unary (NEG, mode, x, mode);
3201 break;
3203 case UMOD:
3204 /* 0%x is 0 (or x&0 if x has side-effects). */
3205 if (trueop0 == CONST0_RTX (mode))
3207 if (side_effects_p (op1))
3208 return simplify_gen_binary (AND, mode, op1, trueop0);
3209 return trueop0;
3211 /* x%1 is 0 (of x&0 if x has side-effects). */
3212 if (trueop1 == CONST1_RTX (mode))
3214 if (side_effects_p (op0))
3215 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3216 return CONST0_RTX (mode);
3218 /* Implement modulus by power of two as AND. */
3219 if (CONST_INT_P (trueop1)
3220 && exact_log2 (UINTVAL (trueop1)) > 0)
3221 return simplify_gen_binary (AND, mode, op0,
3222 GEN_INT (INTVAL (op1) - 1));
3223 break;
3225 case MOD:
3226 /* 0%x is 0 (or x&0 if x has side-effects). */
3227 if (trueop0 == CONST0_RTX (mode))
3229 if (side_effects_p (op1))
3230 return simplify_gen_binary (AND, mode, op1, trueop0);
3231 return trueop0;
3233 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3234 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3236 if (side_effects_p (op0))
3237 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3238 return CONST0_RTX (mode);
3240 break;
3242 case ROTATERT:
3243 case ROTATE:
3244 case ASHIFTRT:
3245 if (trueop1 == CONST0_RTX (mode))
3246 return op0;
3247 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3248 return op0;
3249 /* Rotating ~0 always results in ~0. */
3250 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3251 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3252 && ! side_effects_p (op1))
3253 return op0;
3254 canonicalize_shift:
3255 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3257 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3258 if (val != INTVAL (op1))
3259 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3261 break;
3263 case ASHIFT:
3264 case SS_ASHIFT:
3265 case US_ASHIFT:
3266 if (trueop1 == CONST0_RTX (mode))
3267 return op0;
3268 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3269 return op0;
3270 goto canonicalize_shift;
3272 case LSHIFTRT:
3273 if (trueop1 == CONST0_RTX (mode))
3274 return op0;
3275 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3276 return op0;
3277 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3278 if (GET_CODE (op0) == CLZ
3279 && CONST_INT_P (trueop1)
3280 && STORE_FLAG_VALUE == 1
3281 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3283 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3284 unsigned HOST_WIDE_INT zero_val = 0;
3286 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3287 && zero_val == GET_MODE_PRECISION (imode)
3288 && INTVAL (trueop1) == exact_log2 (zero_val))
3289 return simplify_gen_relational (EQ, mode, imode,
3290 XEXP (op0, 0), const0_rtx);
3292 goto canonicalize_shift;
3294 case SMIN:
3295 if (width <= HOST_BITS_PER_WIDE_INT
3296 && mode_signbit_p (mode, trueop1)
3297 && ! side_effects_p (op0))
3298 return op1;
3299 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3300 return op0;
3301 tem = simplify_associative_operation (code, mode, op0, op1);
3302 if (tem)
3303 return tem;
3304 break;
3306 case SMAX:
3307 if (width <= HOST_BITS_PER_WIDE_INT
3308 && CONST_INT_P (trueop1)
3309 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3310 && ! side_effects_p (op0))
3311 return op1;
3312 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3313 return op0;
3314 tem = simplify_associative_operation (code, mode, op0, op1);
3315 if (tem)
3316 return tem;
3317 break;
3319 case UMIN:
3320 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3321 return op1;
3322 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3323 return op0;
3324 tem = simplify_associative_operation (code, mode, op0, op1);
3325 if (tem)
3326 return tem;
3327 break;
3329 case UMAX:
3330 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3331 return op1;
3332 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3333 return op0;
3334 tem = simplify_associative_operation (code, mode, op0, op1);
3335 if (tem)
3336 return tem;
3337 break;
3339 case SS_PLUS:
3340 case US_PLUS:
3341 case SS_MINUS:
3342 case US_MINUS:
3343 case SS_MULT:
3344 case US_MULT:
3345 case SS_DIV:
3346 case US_DIV:
3347 /* ??? There are simplifications that can be done. */
3348 return 0;
3350 case VEC_SELECT:
3351 if (!VECTOR_MODE_P (mode))
3353 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3354 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3355 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3356 gcc_assert (XVECLEN (trueop1, 0) == 1);
3357 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3359 if (GET_CODE (trueop0) == CONST_VECTOR)
3360 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3361 (trueop1, 0, 0)));
3363 /* Extract a scalar element from a nested VEC_SELECT expression
3364 (with optional nested VEC_CONCAT expression). Some targets
3365 (i386) extract scalar element from a vector using chain of
3366 nested VEC_SELECT expressions. When input operand is a memory
3367 operand, this operation can be simplified to a simple scalar
3368 load from an offseted memory address. */
3369 if (GET_CODE (trueop0) == VEC_SELECT)
3371 rtx op0 = XEXP (trueop0, 0);
3372 rtx op1 = XEXP (trueop0, 1);
3374 enum machine_mode opmode = GET_MODE (op0);
3375 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3376 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3378 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3379 int elem;
3381 rtvec vec;
3382 rtx tmp_op, tmp;
3384 gcc_assert (GET_CODE (op1) == PARALLEL);
3385 gcc_assert (i < n_elts);
3387 /* Select element, pointed by nested selector. */
3388 elem = INTVAL (XVECEXP (op1, 0, i));
3390 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3391 if (GET_CODE (op0) == VEC_CONCAT)
3393 rtx op00 = XEXP (op0, 0);
3394 rtx op01 = XEXP (op0, 1);
3396 enum machine_mode mode00, mode01;
3397 int n_elts00, n_elts01;
3399 mode00 = GET_MODE (op00);
3400 mode01 = GET_MODE (op01);
3402 /* Find out number of elements of each operand. */
3403 if (VECTOR_MODE_P (mode00))
3405 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3406 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3408 else
3409 n_elts00 = 1;
3411 if (VECTOR_MODE_P (mode01))
3413 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3414 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3416 else
3417 n_elts01 = 1;
3419 gcc_assert (n_elts == n_elts00 + n_elts01);
3421 /* Select correct operand of VEC_CONCAT
3422 and adjust selector. */
3423 if (elem < n_elts01)
3424 tmp_op = op00;
3425 else
3427 tmp_op = op01;
3428 elem -= n_elts00;
3431 else
3432 tmp_op = op0;
3434 vec = rtvec_alloc (1);
3435 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3437 tmp = gen_rtx_fmt_ee (code, mode,
3438 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3439 return tmp;
3441 if (GET_CODE (trueop0) == VEC_DUPLICATE
3442 && GET_MODE (XEXP (trueop0, 0)) == mode)
3443 return XEXP (trueop0, 0);
3445 else
3447 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3448 gcc_assert (GET_MODE_INNER (mode)
3449 == GET_MODE_INNER (GET_MODE (trueop0)));
3450 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3452 if (GET_CODE (trueop0) == CONST_VECTOR)
3454 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3455 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3456 rtvec v = rtvec_alloc (n_elts);
3457 unsigned int i;
3459 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3460 for (i = 0; i < n_elts; i++)
3462 rtx x = XVECEXP (trueop1, 0, i);
3464 gcc_assert (CONST_INT_P (x));
3465 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3466 INTVAL (x));
3469 return gen_rtx_CONST_VECTOR (mode, v);
3472 /* Recognize the identity. */
3473 if (GET_MODE (trueop0) == mode)
3475 bool maybe_ident = true;
3476 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3478 rtx j = XVECEXP (trueop1, 0, i);
3479 if (!CONST_INT_P (j) || INTVAL (j) != i)
3481 maybe_ident = false;
3482 break;
3485 if (maybe_ident)
3486 return trueop0;
3489 /* If we build {a,b} then permute it, build the result directly. */
3490 if (XVECLEN (trueop1, 0) == 2
3491 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3492 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3493 && GET_CODE (trueop0) == VEC_CONCAT
3494 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3495 && GET_MODE (XEXP (trueop0, 0)) == mode
3496 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3497 && GET_MODE (XEXP (trueop0, 1)) == mode)
3499 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3500 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3501 rtx subop0, subop1;
3503 gcc_assert (i0 < 4 && i1 < 4);
3504 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3505 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3507 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3510 if (XVECLEN (trueop1, 0) == 2
3511 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3512 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3513 && GET_CODE (trueop0) == VEC_CONCAT
3514 && GET_MODE (trueop0) == mode)
3516 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3517 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3518 rtx subop0, subop1;
3520 gcc_assert (i0 < 2 && i1 < 2);
3521 subop0 = XEXP (trueop0, i0);
3522 subop1 = XEXP (trueop0, i1);
3524 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3528 if (XVECLEN (trueop1, 0) == 1
3529 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3530 && GET_CODE (trueop0) == VEC_CONCAT)
3532 rtx vec = trueop0;
3533 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3535 /* Try to find the element in the VEC_CONCAT. */
3536 while (GET_MODE (vec) != mode
3537 && GET_CODE (vec) == VEC_CONCAT)
3539 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3540 if (offset < vec_size)
3541 vec = XEXP (vec, 0);
3542 else
3544 offset -= vec_size;
3545 vec = XEXP (vec, 1);
3547 vec = avoid_constant_pool_reference (vec);
3550 if (GET_MODE (vec) == mode)
3551 return vec;
3554 return 0;
3555 case VEC_CONCAT:
3557 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3558 ? GET_MODE (trueop0)
3559 : GET_MODE_INNER (mode));
3560 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3561 ? GET_MODE (trueop1)
3562 : GET_MODE_INNER (mode));
3564 gcc_assert (VECTOR_MODE_P (mode));
3565 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3566 == GET_MODE_SIZE (mode));
3568 if (VECTOR_MODE_P (op0_mode))
3569 gcc_assert (GET_MODE_INNER (mode)
3570 == GET_MODE_INNER (op0_mode));
3571 else
3572 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3574 if (VECTOR_MODE_P (op1_mode))
3575 gcc_assert (GET_MODE_INNER (mode)
3576 == GET_MODE_INNER (op1_mode));
3577 else
3578 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3580 if ((GET_CODE (trueop0) == CONST_VECTOR
3581 || CONST_SCALAR_INT_P (trueop0)
3582 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3583 && (GET_CODE (trueop1) == CONST_VECTOR
3584 || CONST_SCALAR_INT_P (trueop1)
3585 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3587 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3588 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3589 rtvec v = rtvec_alloc (n_elts);
3590 unsigned int i;
3591 unsigned in_n_elts = 1;
3593 if (VECTOR_MODE_P (op0_mode))
3594 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3595 for (i = 0; i < n_elts; i++)
3597 if (i < in_n_elts)
3599 if (!VECTOR_MODE_P (op0_mode))
3600 RTVEC_ELT (v, i) = trueop0;
3601 else
3602 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3604 else
3606 if (!VECTOR_MODE_P (op1_mode))
3607 RTVEC_ELT (v, i) = trueop1;
3608 else
3609 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3610 i - in_n_elts);
3614 return gen_rtx_CONST_VECTOR (mode, v);
3617 /* Try to merge VEC_SELECTs from the same vector into a single one. */
3618 if (GET_CODE (trueop0) == VEC_SELECT
3619 && GET_CODE (trueop1) == VEC_SELECT
3620 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0)))
3622 rtx par0 = XEXP (trueop0, 1);
3623 rtx par1 = XEXP (trueop1, 1);
3624 int len0 = XVECLEN (par0, 0);
3625 int len1 = XVECLEN (par1, 0);
3626 rtvec vec = rtvec_alloc (len0 + len1);
3627 for (int i = 0; i < len0; i++)
3628 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3629 for (int i = 0; i < len1; i++)
3630 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3631 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3632 gen_rtx_PARALLEL (VOIDmode, vec));
3635 return 0;
3637 default:
3638 gcc_unreachable ();
3641 return 0;
3645 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3646 rtx op0, rtx op1)
3648 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3649 HOST_WIDE_INT val;
3650 unsigned int width = GET_MODE_PRECISION (mode);
3652 if (VECTOR_MODE_P (mode)
3653 && code != VEC_CONCAT
3654 && GET_CODE (op0) == CONST_VECTOR
3655 && GET_CODE (op1) == CONST_VECTOR)
3657 unsigned n_elts = GET_MODE_NUNITS (mode);
3658 enum machine_mode op0mode = GET_MODE (op0);
3659 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3660 enum machine_mode op1mode = GET_MODE (op1);
3661 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3662 rtvec v = rtvec_alloc (n_elts);
3663 unsigned int i;
3665 gcc_assert (op0_n_elts == n_elts);
3666 gcc_assert (op1_n_elts == n_elts);
3667 for (i = 0; i < n_elts; i++)
3669 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3670 CONST_VECTOR_ELT (op0, i),
3671 CONST_VECTOR_ELT (op1, i));
3672 if (!x)
3673 return 0;
3674 RTVEC_ELT (v, i) = x;
3677 return gen_rtx_CONST_VECTOR (mode, v);
3680 if (VECTOR_MODE_P (mode)
3681 && code == VEC_CONCAT
3682 && (CONST_SCALAR_INT_P (op0)
3683 || GET_CODE (op0) == CONST_FIXED
3684 || CONST_DOUBLE_AS_FLOAT_P (op0))
3685 && (CONST_SCALAR_INT_P (op1)
3686 || CONST_DOUBLE_AS_FLOAT_P (op1)
3687 || GET_CODE (op1) == CONST_FIXED))
3689 unsigned n_elts = GET_MODE_NUNITS (mode);
3690 rtvec v = rtvec_alloc (n_elts);
3692 gcc_assert (n_elts >= 2);
3693 if (n_elts == 2)
3695 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3696 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3698 RTVEC_ELT (v, 0) = op0;
3699 RTVEC_ELT (v, 1) = op1;
3701 else
3703 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3704 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3705 unsigned i;
3707 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3708 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3709 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3711 for (i = 0; i < op0_n_elts; ++i)
3712 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3713 for (i = 0; i < op1_n_elts; ++i)
3714 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3717 return gen_rtx_CONST_VECTOR (mode, v);
3720 if (SCALAR_FLOAT_MODE_P (mode)
3721 && CONST_DOUBLE_AS_FLOAT_P (op0)
3722 && CONST_DOUBLE_AS_FLOAT_P (op1)
3723 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3725 if (code == AND
3726 || code == IOR
3727 || code == XOR)
3729 long tmp0[4];
3730 long tmp1[4];
3731 REAL_VALUE_TYPE r;
3732 int i;
3734 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3735 GET_MODE (op0));
3736 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3737 GET_MODE (op1));
3738 for (i = 0; i < 4; i++)
3740 switch (code)
3742 case AND:
3743 tmp0[i] &= tmp1[i];
3744 break;
3745 case IOR:
3746 tmp0[i] |= tmp1[i];
3747 break;
3748 case XOR:
3749 tmp0[i] ^= tmp1[i];
3750 break;
3751 default:
3752 gcc_unreachable ();
3755 real_from_target (&r, tmp0, mode);
3756 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3758 else
3760 REAL_VALUE_TYPE f0, f1, value, result;
3761 bool inexact;
3763 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3764 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3765 real_convert (&f0, mode, &f0);
3766 real_convert (&f1, mode, &f1);
3768 if (HONOR_SNANS (mode)
3769 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3770 return 0;
3772 if (code == DIV
3773 && REAL_VALUES_EQUAL (f1, dconst0)
3774 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3775 return 0;
3777 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3778 && flag_trapping_math
3779 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3781 int s0 = REAL_VALUE_NEGATIVE (f0);
3782 int s1 = REAL_VALUE_NEGATIVE (f1);
3784 switch (code)
3786 case PLUS:
3787 /* Inf + -Inf = NaN plus exception. */
3788 if (s0 != s1)
3789 return 0;
3790 break;
3791 case MINUS:
3792 /* Inf - Inf = NaN plus exception. */
3793 if (s0 == s1)
3794 return 0;
3795 break;
3796 case DIV:
3797 /* Inf / Inf = NaN plus exception. */
3798 return 0;
3799 default:
3800 break;
3804 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3805 && flag_trapping_math
3806 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3807 || (REAL_VALUE_ISINF (f1)
3808 && REAL_VALUES_EQUAL (f0, dconst0))))
3809 /* Inf * 0 = NaN plus exception. */
3810 return 0;
3812 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3813 &f0, &f1);
3814 real_convert (&result, mode, &value);
3816 /* Don't constant fold this floating point operation if
3817 the result has overflowed and flag_trapping_math. */
3819 if (flag_trapping_math
3820 && MODE_HAS_INFINITIES (mode)
3821 && REAL_VALUE_ISINF (result)
3822 && !REAL_VALUE_ISINF (f0)
3823 && !REAL_VALUE_ISINF (f1))
3824 /* Overflow plus exception. */
3825 return 0;
3827 /* Don't constant fold this floating point operation if the
3828 result may dependent upon the run-time rounding mode and
3829 flag_rounding_math is set, or if GCC's software emulation
3830 is unable to accurately represent the result. */
3832 if ((flag_rounding_math
3833 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3834 && (inexact || !real_identical (&result, &value)))
3835 return NULL_RTX;
3837 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3841 /* We can fold some multi-word operations. */
3842 if (GET_MODE_CLASS (mode) == MODE_INT
3843 && width == HOST_BITS_PER_DOUBLE_INT
3844 && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3845 && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3847 double_int o0, o1, res, tmp;
3848 bool overflow;
3850 o0 = rtx_to_double_int (op0);
3851 o1 = rtx_to_double_int (op1);
3853 switch (code)
3855 case MINUS:
3856 /* A - B == A + (-B). */
3857 o1 = -o1;
3859 /* Fall through.... */
3861 case PLUS:
3862 res = o0 + o1;
3863 break;
3865 case MULT:
3866 res = o0 * o1;
3867 break;
3869 case DIV:
3870 res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3871 &tmp, &overflow);
3872 if (overflow)
3873 return 0;
3874 break;
3876 case MOD:
3877 tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3878 &res, &overflow);
3879 if (overflow)
3880 return 0;
3881 break;
3883 case UDIV:
3884 res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3885 &tmp, &overflow);
3886 if (overflow)
3887 return 0;
3888 break;
3890 case UMOD:
3891 tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3892 &res, &overflow);
3893 if (overflow)
3894 return 0;
3895 break;
3897 case AND:
3898 res = o0 & o1;
3899 break;
3901 case IOR:
3902 res = o0 | o1;
3903 break;
3905 case XOR:
3906 res = o0 ^ o1;
3907 break;
3909 case SMIN:
3910 res = o0.smin (o1);
3911 break;
3913 case SMAX:
3914 res = o0.smax (o1);
3915 break;
3917 case UMIN:
3918 res = o0.umin (o1);
3919 break;
3921 case UMAX:
3922 res = o0.umax (o1);
3923 break;
3925 case LSHIFTRT: case ASHIFTRT:
3926 case ASHIFT:
3927 case ROTATE: case ROTATERT:
3929 unsigned HOST_WIDE_INT cnt;
3931 if (SHIFT_COUNT_TRUNCATED)
3933 o1.high = 0;
3934 o1.low &= GET_MODE_PRECISION (mode) - 1;
3937 if (!o1.fits_uhwi ()
3938 || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
3939 return 0;
3941 cnt = o1.to_uhwi ();
3942 unsigned short prec = GET_MODE_PRECISION (mode);
3944 if (code == LSHIFTRT || code == ASHIFTRT)
3945 res = o0.rshift (cnt, prec, code == ASHIFTRT);
3946 else if (code == ASHIFT)
3947 res = o0.alshift (cnt, prec);
3948 else if (code == ROTATE)
3949 res = o0.lrotate (cnt, prec);
3950 else /* code == ROTATERT */
3951 res = o0.rrotate (cnt, prec);
3953 break;
3955 default:
3956 return 0;
3959 return immed_double_int_const (res, mode);
3962 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3963 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3965 /* Get the integer argument values in two forms:
3966 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3968 arg0 = INTVAL (op0);
3969 arg1 = INTVAL (op1);
3971 if (width < HOST_BITS_PER_WIDE_INT)
3973 arg0 &= GET_MODE_MASK (mode);
3974 arg1 &= GET_MODE_MASK (mode);
3976 arg0s = arg0;
3977 if (val_signbit_known_set_p (mode, arg0s))
3978 arg0s |= ~GET_MODE_MASK (mode);
3980 arg1s = arg1;
3981 if (val_signbit_known_set_p (mode, arg1s))
3982 arg1s |= ~GET_MODE_MASK (mode);
3984 else
3986 arg0s = arg0;
3987 arg1s = arg1;
3990 /* Compute the value of the arithmetic. */
3992 switch (code)
3994 case PLUS:
3995 val = arg0s + arg1s;
3996 break;
3998 case MINUS:
3999 val = arg0s - arg1s;
4000 break;
4002 case MULT:
4003 val = arg0s * arg1s;
4004 break;
4006 case DIV:
4007 if (arg1s == 0
4008 || ((unsigned HOST_WIDE_INT) arg0s
4009 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4010 && arg1s == -1))
4011 return 0;
4012 val = arg0s / arg1s;
4013 break;
4015 case MOD:
4016 if (arg1s == 0
4017 || ((unsigned HOST_WIDE_INT) arg0s
4018 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4019 && arg1s == -1))
4020 return 0;
4021 val = arg0s % arg1s;
4022 break;
4024 case UDIV:
4025 if (arg1 == 0
4026 || ((unsigned HOST_WIDE_INT) arg0s
4027 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4028 && arg1s == -1))
4029 return 0;
4030 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
4031 break;
4033 case UMOD:
4034 if (arg1 == 0
4035 || ((unsigned HOST_WIDE_INT) arg0s
4036 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4037 && arg1s == -1))
4038 return 0;
4039 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4040 break;
4042 case AND:
4043 val = arg0 & arg1;
4044 break;
4046 case IOR:
4047 val = arg0 | arg1;
4048 break;
4050 case XOR:
4051 val = arg0 ^ arg1;
4052 break;
4054 case LSHIFTRT:
4055 case ASHIFT:
4056 case ASHIFTRT:
4057 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4058 the value is in range. We can't return any old value for
4059 out-of-range arguments because either the middle-end (via
4060 shift_truncation_mask) or the back-end might be relying on
4061 target-specific knowledge. Nor can we rely on
4062 shift_truncation_mask, since the shift might not be part of an
4063 ashlM3, lshrM3 or ashrM3 instruction. */
4064 if (SHIFT_COUNT_TRUNCATED)
4065 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
4066 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
4067 return 0;
4069 val = (code == ASHIFT
4070 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
4071 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
4073 /* Sign-extend the result for arithmetic right shifts. */
4074 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
4075 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
4076 break;
4078 case ROTATERT:
4079 if (arg1 < 0)
4080 return 0;
4082 arg1 %= width;
4083 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4084 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4085 break;
4087 case ROTATE:
4088 if (arg1 < 0)
4089 return 0;
4091 arg1 %= width;
4092 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4093 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4094 break;
4096 case COMPARE:
4097 /* Do nothing here. */
4098 return 0;
4100 case SMIN:
4101 val = arg0s <= arg1s ? arg0s : arg1s;
4102 break;
4104 case UMIN:
4105 val = ((unsigned HOST_WIDE_INT) arg0
4106 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4107 break;
4109 case SMAX:
4110 val = arg0s > arg1s ? arg0s : arg1s;
4111 break;
4113 case UMAX:
4114 val = ((unsigned HOST_WIDE_INT) arg0
4115 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4116 break;
4118 case SS_PLUS:
4119 case US_PLUS:
4120 case SS_MINUS:
4121 case US_MINUS:
4122 case SS_MULT:
4123 case US_MULT:
4124 case SS_DIV:
4125 case US_DIV:
4126 case SS_ASHIFT:
4127 case US_ASHIFT:
4128 /* ??? There are simplifications that can be done. */
4129 return 0;
4131 default:
4132 gcc_unreachable ();
4135 return gen_int_mode (val, mode);
4138 return NULL_RTX;
4143 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4144 PLUS or MINUS.
4146 Rather than test for specific case, we do this by a brute-force method
4147 and do all possible simplifications until no more changes occur. Then
4148 we rebuild the operation. */
4150 struct simplify_plus_minus_op_data
4152 rtx op;
4153 short neg;
4156 static bool
4157 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4159 int result;
4161 result = (commutative_operand_precedence (y)
4162 - commutative_operand_precedence (x));
4163 if (result)
4164 return result > 0;
4166 /* Group together equal REGs to do more simplification. */
4167 if (REG_P (x) && REG_P (y))
4168 return REGNO (x) > REGNO (y);
4169 else
4170 return false;
4173 static rtx
4174 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
4175 rtx op1)
4177 struct simplify_plus_minus_op_data ops[8];
4178 rtx result, tem;
4179 int n_ops = 2, input_ops = 2;
4180 int changed, n_constants = 0, canonicalized = 0;
4181 int i, j;
4183 memset (ops, 0, sizeof ops);
4185 /* Set up the two operands and then expand them until nothing has been
4186 changed. If we run out of room in our array, give up; this should
4187 almost never happen. */
4189 ops[0].op = op0;
4190 ops[0].neg = 0;
4191 ops[1].op = op1;
4192 ops[1].neg = (code == MINUS);
4196 changed = 0;
4198 for (i = 0; i < n_ops; i++)
4200 rtx this_op = ops[i].op;
4201 int this_neg = ops[i].neg;
4202 enum rtx_code this_code = GET_CODE (this_op);
4204 switch (this_code)
4206 case PLUS:
4207 case MINUS:
4208 if (n_ops == 7)
4209 return NULL_RTX;
4211 ops[n_ops].op = XEXP (this_op, 1);
4212 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4213 n_ops++;
4215 ops[i].op = XEXP (this_op, 0);
4216 input_ops++;
4217 changed = 1;
4218 canonicalized |= this_neg;
4219 break;
4221 case NEG:
4222 ops[i].op = XEXP (this_op, 0);
4223 ops[i].neg = ! this_neg;
4224 changed = 1;
4225 canonicalized = 1;
4226 break;
4228 case CONST:
4229 if (n_ops < 7
4230 && GET_CODE (XEXP (this_op, 0)) == PLUS
4231 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4232 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4234 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4235 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4236 ops[n_ops].neg = this_neg;
4237 n_ops++;
4238 changed = 1;
4239 canonicalized = 1;
4241 break;
4243 case NOT:
4244 /* ~a -> (-a - 1) */
4245 if (n_ops != 7)
4247 ops[n_ops].op = CONSTM1_RTX (mode);
4248 ops[n_ops++].neg = this_neg;
4249 ops[i].op = XEXP (this_op, 0);
4250 ops[i].neg = !this_neg;
4251 changed = 1;
4252 canonicalized = 1;
4254 break;
4256 case CONST_INT:
4257 n_constants++;
4258 if (this_neg)
4260 ops[i].op = neg_const_int (mode, this_op);
4261 ops[i].neg = 0;
4262 changed = 1;
4263 canonicalized = 1;
4265 break;
4267 default:
4268 break;
4272 while (changed);
4274 if (n_constants > 1)
4275 canonicalized = 1;
4277 gcc_assert (n_ops >= 2);
4279 /* If we only have two operands, we can avoid the loops. */
4280 if (n_ops == 2)
4282 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4283 rtx lhs, rhs;
4285 /* Get the two operands. Be careful with the order, especially for
4286 the cases where code == MINUS. */
4287 if (ops[0].neg && ops[1].neg)
4289 lhs = gen_rtx_NEG (mode, ops[0].op);
4290 rhs = ops[1].op;
4292 else if (ops[0].neg)
4294 lhs = ops[1].op;
4295 rhs = ops[0].op;
4297 else
4299 lhs = ops[0].op;
4300 rhs = ops[1].op;
4303 return simplify_const_binary_operation (code, mode, lhs, rhs);
4306 /* Now simplify each pair of operands until nothing changes. */
4309 /* Insertion sort is good enough for an eight-element array. */
4310 for (i = 1; i < n_ops; i++)
4312 struct simplify_plus_minus_op_data save;
4313 j = i - 1;
4314 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4315 continue;
4317 canonicalized = 1;
4318 save = ops[i];
4320 ops[j + 1] = ops[j];
4321 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4322 ops[j + 1] = save;
4325 changed = 0;
4326 for (i = n_ops - 1; i > 0; i--)
4327 for (j = i - 1; j >= 0; j--)
4329 rtx lhs = ops[j].op, rhs = ops[i].op;
4330 int lneg = ops[j].neg, rneg = ops[i].neg;
4332 if (lhs != 0 && rhs != 0)
4334 enum rtx_code ncode = PLUS;
4336 if (lneg != rneg)
4338 ncode = MINUS;
4339 if (lneg)
4340 tem = lhs, lhs = rhs, rhs = tem;
4342 else if (swap_commutative_operands_p (lhs, rhs))
4343 tem = lhs, lhs = rhs, rhs = tem;
4345 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4346 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4348 rtx tem_lhs, tem_rhs;
4350 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4351 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4352 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4354 if (tem && !CONSTANT_P (tem))
4355 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4357 else
4358 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4360 /* Reject "simplifications" that just wrap the two
4361 arguments in a CONST. Failure to do so can result
4362 in infinite recursion with simplify_binary_operation
4363 when it calls us to simplify CONST operations. */
4364 if (tem
4365 && ! (GET_CODE (tem) == CONST
4366 && GET_CODE (XEXP (tem, 0)) == ncode
4367 && XEXP (XEXP (tem, 0), 0) == lhs
4368 && XEXP (XEXP (tem, 0), 1) == rhs))
4370 lneg &= rneg;
4371 if (GET_CODE (tem) == NEG)
4372 tem = XEXP (tem, 0), lneg = !lneg;
4373 if (CONST_INT_P (tem) && lneg)
4374 tem = neg_const_int (mode, tem), lneg = 0;
4376 ops[i].op = tem;
4377 ops[i].neg = lneg;
4378 ops[j].op = NULL_RTX;
4379 changed = 1;
4380 canonicalized = 1;
4385 /* If nothing changed, fail. */
4386 if (!canonicalized)
4387 return NULL_RTX;
4389 /* Pack all the operands to the lower-numbered entries. */
4390 for (i = 0, j = 0; j < n_ops; j++)
4391 if (ops[j].op)
4393 ops[i] = ops[j];
4394 i++;
4396 n_ops = i;
4398 while (changed);
4400 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4401 if (n_ops == 2
4402 && CONST_INT_P (ops[1].op)
4403 && CONSTANT_P (ops[0].op)
4404 && ops[0].neg)
4405 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4407 /* We suppressed creation of trivial CONST expressions in the
4408 combination loop to avoid recursion. Create one manually now.
4409 The combination loop should have ensured that there is exactly
4410 one CONST_INT, and the sort will have ensured that it is last
4411 in the array and that any other constant will be next-to-last. */
4413 if (n_ops > 1
4414 && CONST_INT_P (ops[n_ops - 1].op)
4415 && CONSTANT_P (ops[n_ops - 2].op))
4417 rtx value = ops[n_ops - 1].op;
4418 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4419 value = neg_const_int (mode, value);
4420 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4421 INTVAL (value));
4422 n_ops--;
4425 /* Put a non-negated operand first, if possible. */
4427 for (i = 0; i < n_ops && ops[i].neg; i++)
4428 continue;
4429 if (i == n_ops)
4430 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4431 else if (i != 0)
4433 tem = ops[0].op;
4434 ops[0] = ops[i];
4435 ops[i].op = tem;
4436 ops[i].neg = 1;
4439 /* Now make the result by performing the requested operations. */
4440 result = ops[0].op;
4441 for (i = 1; i < n_ops; i++)
4442 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4443 mode, result, ops[i].op);
4445 return result;
4448 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4449 static bool
4450 plus_minus_operand_p (const_rtx x)
4452 return GET_CODE (x) == PLUS
4453 || GET_CODE (x) == MINUS
4454 || (GET_CODE (x) == CONST
4455 && GET_CODE (XEXP (x, 0)) == PLUS
4456 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4457 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4460 /* Like simplify_binary_operation except used for relational operators.
4461 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4462 not also be VOIDmode.
4464 CMP_MODE specifies in which mode the comparison is done in, so it is
4465 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4466 the operands or, if both are VOIDmode, the operands are compared in
4467 "infinite precision". */
4469 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4470 enum machine_mode cmp_mode, rtx op0, rtx op1)
4472 rtx tem, trueop0, trueop1;
4474 if (cmp_mode == VOIDmode)
4475 cmp_mode = GET_MODE (op0);
4476 if (cmp_mode == VOIDmode)
4477 cmp_mode = GET_MODE (op1);
4479 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4480 if (tem)
4482 if (SCALAR_FLOAT_MODE_P (mode))
4484 if (tem == const0_rtx)
4485 return CONST0_RTX (mode);
4486 #ifdef FLOAT_STORE_FLAG_VALUE
4488 REAL_VALUE_TYPE val;
4489 val = FLOAT_STORE_FLAG_VALUE (mode);
4490 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4492 #else
4493 return NULL_RTX;
4494 #endif
4496 if (VECTOR_MODE_P (mode))
4498 if (tem == const0_rtx)
4499 return CONST0_RTX (mode);
4500 #ifdef VECTOR_STORE_FLAG_VALUE
4502 int i, units;
4503 rtvec v;
4505 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4506 if (val == NULL_RTX)
4507 return NULL_RTX;
4508 if (val == const1_rtx)
4509 return CONST1_RTX (mode);
4511 units = GET_MODE_NUNITS (mode);
4512 v = rtvec_alloc (units);
4513 for (i = 0; i < units; i++)
4514 RTVEC_ELT (v, i) = val;
4515 return gen_rtx_raw_CONST_VECTOR (mode, v);
4517 #else
4518 return NULL_RTX;
4519 #endif
4522 return tem;
4525 /* For the following tests, ensure const0_rtx is op1. */
4526 if (swap_commutative_operands_p (op0, op1)
4527 || (op0 == const0_rtx && op1 != const0_rtx))
4528 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4530 /* If op0 is a compare, extract the comparison arguments from it. */
4531 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4532 return simplify_gen_relational (code, mode, VOIDmode,
4533 XEXP (op0, 0), XEXP (op0, 1));
4535 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4536 || CC0_P (op0))
4537 return NULL_RTX;
4539 trueop0 = avoid_constant_pool_reference (op0);
4540 trueop1 = avoid_constant_pool_reference (op1);
4541 return simplify_relational_operation_1 (code, mode, cmp_mode,
4542 trueop0, trueop1);
4545 /* This part of simplify_relational_operation is only used when CMP_MODE
4546 is not in class MODE_CC (i.e. it is a real comparison).
4548 MODE is the mode of the result, while CMP_MODE specifies in which
4549 mode the comparison is done in, so it is the mode of the operands. */
4551 static rtx
4552 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4553 enum machine_mode cmp_mode, rtx op0, rtx op1)
4555 enum rtx_code op0code = GET_CODE (op0);
4557 if (op1 == const0_rtx && COMPARISON_P (op0))
4559 /* If op0 is a comparison, extract the comparison arguments
4560 from it. */
4561 if (code == NE)
4563 if (GET_MODE (op0) == mode)
4564 return simplify_rtx (op0);
4565 else
4566 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4567 XEXP (op0, 0), XEXP (op0, 1));
4569 else if (code == EQ)
4571 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4572 if (new_code != UNKNOWN)
4573 return simplify_gen_relational (new_code, mode, VOIDmode,
4574 XEXP (op0, 0), XEXP (op0, 1));
4578 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4579 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4580 if ((code == LTU || code == GEU)
4581 && GET_CODE (op0) == PLUS
4582 && CONST_INT_P (XEXP (op0, 1))
4583 && (rtx_equal_p (op1, XEXP (op0, 0))
4584 || rtx_equal_p (op1, XEXP (op0, 1)))
4585 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4586 && XEXP (op0, 1) != const0_rtx)
4588 rtx new_cmp
4589 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4590 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4591 cmp_mode, XEXP (op0, 0), new_cmp);
4594 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4595 if ((code == LTU || code == GEU)
4596 && GET_CODE (op0) == PLUS
4597 && rtx_equal_p (op1, XEXP (op0, 1))
4598 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4599 && !rtx_equal_p (op1, XEXP (op0, 0)))
4600 return simplify_gen_relational (code, mode, cmp_mode, op0,
4601 copy_rtx (XEXP (op0, 0)));
4603 if (op1 == const0_rtx)
4605 /* Canonicalize (GTU x 0) as (NE x 0). */
4606 if (code == GTU)
4607 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4608 /* Canonicalize (LEU x 0) as (EQ x 0). */
4609 if (code == LEU)
4610 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4612 else if (op1 == const1_rtx)
4614 switch (code)
4616 case GE:
4617 /* Canonicalize (GE x 1) as (GT x 0). */
4618 return simplify_gen_relational (GT, mode, cmp_mode,
4619 op0, const0_rtx);
4620 case GEU:
4621 /* Canonicalize (GEU x 1) as (NE x 0). */
4622 return simplify_gen_relational (NE, mode, cmp_mode,
4623 op0, const0_rtx);
4624 case LT:
4625 /* Canonicalize (LT x 1) as (LE x 0). */
4626 return simplify_gen_relational (LE, mode, cmp_mode,
4627 op0, const0_rtx);
4628 case LTU:
4629 /* Canonicalize (LTU x 1) as (EQ x 0). */
4630 return simplify_gen_relational (EQ, mode, cmp_mode,
4631 op0, const0_rtx);
4632 default:
4633 break;
4636 else if (op1 == constm1_rtx)
4638 /* Canonicalize (LE x -1) as (LT x 0). */
4639 if (code == LE)
4640 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4641 /* Canonicalize (GT x -1) as (GE x 0). */
4642 if (code == GT)
4643 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4646 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4647 if ((code == EQ || code == NE)
4648 && (op0code == PLUS || op0code == MINUS)
4649 && CONSTANT_P (op1)
4650 && CONSTANT_P (XEXP (op0, 1))
4651 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4653 rtx x = XEXP (op0, 0);
4654 rtx c = XEXP (op0, 1);
4655 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4656 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4658 /* Detect an infinite recursive condition, where we oscillate at this
4659 simplification case between:
4660 A + B == C <---> C - B == A,
4661 where A, B, and C are all constants with non-simplifiable expressions,
4662 usually SYMBOL_REFs. */
4663 if (GET_CODE (tem) == invcode
4664 && CONSTANT_P (x)
4665 && rtx_equal_p (c, XEXP (tem, 1)))
4666 return NULL_RTX;
4668 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4671 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4672 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4673 if (code == NE
4674 && op1 == const0_rtx
4675 && GET_MODE_CLASS (mode) == MODE_INT
4676 && cmp_mode != VOIDmode
4677 /* ??? Work-around BImode bugs in the ia64 backend. */
4678 && mode != BImode
4679 && cmp_mode != BImode
4680 && nonzero_bits (op0, cmp_mode) == 1
4681 && STORE_FLAG_VALUE == 1)
4682 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4683 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4684 : lowpart_subreg (mode, op0, cmp_mode);
4686 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4687 if ((code == EQ || code == NE)
4688 && op1 == const0_rtx
4689 && op0code == XOR)
4690 return simplify_gen_relational (code, mode, cmp_mode,
4691 XEXP (op0, 0), XEXP (op0, 1));
4693 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4694 if ((code == EQ || code == NE)
4695 && op0code == XOR
4696 && rtx_equal_p (XEXP (op0, 0), op1)
4697 && !side_effects_p (XEXP (op0, 0)))
4698 return simplify_gen_relational (code, mode, cmp_mode,
4699 XEXP (op0, 1), const0_rtx);
4701 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4702 if ((code == EQ || code == NE)
4703 && op0code == XOR
4704 && rtx_equal_p (XEXP (op0, 1), op1)
4705 && !side_effects_p (XEXP (op0, 1)))
4706 return simplify_gen_relational (code, mode, cmp_mode,
4707 XEXP (op0, 0), const0_rtx);
4709 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4710 if ((code == EQ || code == NE)
4711 && op0code == XOR
4712 && CONST_SCALAR_INT_P (op1)
4713 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4714 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4715 simplify_gen_binary (XOR, cmp_mode,
4716 XEXP (op0, 1), op1));
4718 if (op0code == POPCOUNT && op1 == const0_rtx)
4719 switch (code)
4721 case EQ:
4722 case LE:
4723 case LEU:
4724 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4725 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4726 XEXP (op0, 0), const0_rtx);
4728 case NE:
4729 case GT:
4730 case GTU:
4731 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4732 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4733 XEXP (op0, 0), const0_rtx);
4735 default:
4736 break;
4739 return NULL_RTX;
4742 enum
4744 CMP_EQ = 1,
4745 CMP_LT = 2,
4746 CMP_GT = 4,
4747 CMP_LTU = 8,
4748 CMP_GTU = 16
4752 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4753 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4754 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4755 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4756 For floating-point comparisons, assume that the operands were ordered. */
4758 static rtx
4759 comparison_result (enum rtx_code code, int known_results)
4761 switch (code)
4763 case EQ:
4764 case UNEQ:
4765 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4766 case NE:
4767 case LTGT:
4768 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4770 case LT:
4771 case UNLT:
4772 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4773 case GE:
4774 case UNGE:
4775 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4777 case GT:
4778 case UNGT:
4779 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4780 case LE:
4781 case UNLE:
4782 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4784 case LTU:
4785 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4786 case GEU:
4787 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4789 case GTU:
4790 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4791 case LEU:
4792 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4794 case ORDERED:
4795 return const_true_rtx;
4796 case UNORDERED:
4797 return const0_rtx;
4798 default:
4799 gcc_unreachable ();
4803 /* Check if the given comparison (done in the given MODE) is actually a
4804 tautology or a contradiction.
4805 If no simplification is possible, this function returns zero.
4806 Otherwise, it returns either const_true_rtx or const0_rtx. */
4809 simplify_const_relational_operation (enum rtx_code code,
4810 enum machine_mode mode,
4811 rtx op0, rtx op1)
4813 rtx tem;
4814 rtx trueop0;
4815 rtx trueop1;
4817 gcc_assert (mode != VOIDmode
4818 || (GET_MODE (op0) == VOIDmode
4819 && GET_MODE (op1) == VOIDmode));
4821 /* If op0 is a compare, extract the comparison arguments from it. */
4822 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4824 op1 = XEXP (op0, 1);
4825 op0 = XEXP (op0, 0);
4827 if (GET_MODE (op0) != VOIDmode)
4828 mode = GET_MODE (op0);
4829 else if (GET_MODE (op1) != VOIDmode)
4830 mode = GET_MODE (op1);
4831 else
4832 return 0;
4835 /* We can't simplify MODE_CC values since we don't know what the
4836 actual comparison is. */
4837 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4838 return 0;
4840 /* Make sure the constant is second. */
4841 if (swap_commutative_operands_p (op0, op1))
4843 tem = op0, op0 = op1, op1 = tem;
4844 code = swap_condition (code);
4847 trueop0 = avoid_constant_pool_reference (op0);
4848 trueop1 = avoid_constant_pool_reference (op1);
4850 /* For integer comparisons of A and B maybe we can simplify A - B and can
4851 then simplify a comparison of that with zero. If A and B are both either
4852 a register or a CONST_INT, this can't help; testing for these cases will
4853 prevent infinite recursion here and speed things up.
4855 We can only do this for EQ and NE comparisons as otherwise we may
4856 lose or introduce overflow which we cannot disregard as undefined as
4857 we do not know the signedness of the operation on either the left or
4858 the right hand side of the comparison. */
4860 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4861 && (code == EQ || code == NE)
4862 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4863 && (REG_P (op1) || CONST_INT_P (trueop1)))
4864 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4865 /* We cannot do this if tem is a nonzero address. */
4866 && ! nonzero_address_p (tem))
4867 return simplify_const_relational_operation (signed_condition (code),
4868 mode, tem, const0_rtx);
4870 if (! HONOR_NANS (mode) && code == ORDERED)
4871 return const_true_rtx;
4873 if (! HONOR_NANS (mode) && code == UNORDERED)
4874 return const0_rtx;
4876 /* For modes without NaNs, if the two operands are equal, we know the
4877 result except if they have side-effects. Even with NaNs we know
4878 the result of unordered comparisons and, if signaling NaNs are
4879 irrelevant, also the result of LT/GT/LTGT. */
4880 if ((! HONOR_NANS (GET_MODE (trueop0))
4881 || code == UNEQ || code == UNLE || code == UNGE
4882 || ((code == LT || code == GT || code == LTGT)
4883 && ! HONOR_SNANS (GET_MODE (trueop0))))
4884 && rtx_equal_p (trueop0, trueop1)
4885 && ! side_effects_p (trueop0))
4886 return comparison_result (code, CMP_EQ);
4888 /* If the operands are floating-point constants, see if we can fold
4889 the result. */
4890 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4891 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4892 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4894 REAL_VALUE_TYPE d0, d1;
4896 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4897 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4899 /* Comparisons are unordered iff at least one of the values is NaN. */
4900 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4901 switch (code)
4903 case UNEQ:
4904 case UNLT:
4905 case UNGT:
4906 case UNLE:
4907 case UNGE:
4908 case NE:
4909 case UNORDERED:
4910 return const_true_rtx;
4911 case EQ:
4912 case LT:
4913 case GT:
4914 case LE:
4915 case GE:
4916 case LTGT:
4917 case ORDERED:
4918 return const0_rtx;
4919 default:
4920 return 0;
4923 return comparison_result (code,
4924 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4925 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4928 /* Otherwise, see if the operands are both integers. */
4929 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4930 && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
4931 && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
4933 int width = GET_MODE_PRECISION (mode);
4934 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4935 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4937 /* Get the two words comprising each integer constant. */
4938 if (CONST_DOUBLE_AS_INT_P (trueop0))
4940 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4941 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4943 else
4945 l0u = l0s = INTVAL (trueop0);
4946 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4949 if (CONST_DOUBLE_AS_INT_P (trueop1))
4951 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4952 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4954 else
4956 l1u = l1s = INTVAL (trueop1);
4957 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4960 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4961 we have to sign or zero-extend the values. */
4962 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4964 l0u &= GET_MODE_MASK (mode);
4965 l1u &= GET_MODE_MASK (mode);
4967 if (val_signbit_known_set_p (mode, l0s))
4968 l0s |= ~GET_MODE_MASK (mode);
4970 if (val_signbit_known_set_p (mode, l1s))
4971 l1s |= ~GET_MODE_MASK (mode);
4973 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4974 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4976 if (h0u == h1u && l0u == l1u)
4977 return comparison_result (code, CMP_EQ);
4978 else
4980 int cr;
4981 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4982 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4983 return comparison_result (code, cr);
4987 /* Optimize comparisons with upper and lower bounds. */
4988 if (HWI_COMPUTABLE_MODE_P (mode)
4989 && CONST_INT_P (trueop1))
4991 int sign;
4992 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4993 HOST_WIDE_INT val = INTVAL (trueop1);
4994 HOST_WIDE_INT mmin, mmax;
4996 if (code == GEU
4997 || code == LEU
4998 || code == GTU
4999 || code == LTU)
5000 sign = 0;
5001 else
5002 sign = 1;
5004 /* Get a reduced range if the sign bit is zero. */
5005 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5007 mmin = 0;
5008 mmax = nonzero;
5010 else
5012 rtx mmin_rtx, mmax_rtx;
5013 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5015 mmin = INTVAL (mmin_rtx);
5016 mmax = INTVAL (mmax_rtx);
5017 if (sign)
5019 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5021 mmin >>= (sign_copies - 1);
5022 mmax >>= (sign_copies - 1);
5026 switch (code)
5028 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5029 case GEU:
5030 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5031 return const_true_rtx;
5032 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5033 return const0_rtx;
5034 break;
5035 case GE:
5036 if (val <= mmin)
5037 return const_true_rtx;
5038 if (val > mmax)
5039 return const0_rtx;
5040 break;
5042 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5043 case LEU:
5044 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5045 return const_true_rtx;
5046 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5047 return const0_rtx;
5048 break;
5049 case LE:
5050 if (val >= mmax)
5051 return const_true_rtx;
5052 if (val < mmin)
5053 return const0_rtx;
5054 break;
5056 case EQ:
5057 /* x == y is always false for y out of range. */
5058 if (val < mmin || val > mmax)
5059 return const0_rtx;
5060 break;
5062 /* x > y is always false for y >= mmax, always true for y < mmin. */
5063 case GTU:
5064 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5065 return const0_rtx;
5066 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5067 return const_true_rtx;
5068 break;
5069 case GT:
5070 if (val >= mmax)
5071 return const0_rtx;
5072 if (val < mmin)
5073 return const_true_rtx;
5074 break;
5076 /* x < y is always false for y <= mmin, always true for y > mmax. */
5077 case LTU:
5078 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5079 return const0_rtx;
5080 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5081 return const_true_rtx;
5082 break;
5083 case LT:
5084 if (val <= mmin)
5085 return const0_rtx;
5086 if (val > mmax)
5087 return const_true_rtx;
5088 break;
5090 case NE:
5091 /* x != y is always true for y out of range. */
5092 if (val < mmin || val > mmax)
5093 return const_true_rtx;
5094 break;
5096 default:
5097 break;
5101 /* Optimize integer comparisons with zero. */
5102 if (trueop1 == const0_rtx)
5104 /* Some addresses are known to be nonzero. We don't know
5105 their sign, but equality comparisons are known. */
5106 if (nonzero_address_p (trueop0))
5108 if (code == EQ || code == LEU)
5109 return const0_rtx;
5110 if (code == NE || code == GTU)
5111 return const_true_rtx;
5114 /* See if the first operand is an IOR with a constant. If so, we
5115 may be able to determine the result of this comparison. */
5116 if (GET_CODE (op0) == IOR)
5118 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5119 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5121 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5122 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5123 && (UINTVAL (inner_const)
5124 & ((unsigned HOST_WIDE_INT) 1
5125 << sign_bitnum)));
5127 switch (code)
5129 case EQ:
5130 case LEU:
5131 return const0_rtx;
5132 case NE:
5133 case GTU:
5134 return const_true_rtx;
5135 case LT:
5136 case LE:
5137 if (has_sign)
5138 return const_true_rtx;
5139 break;
5140 case GT:
5141 case GE:
5142 if (has_sign)
5143 return const0_rtx;
5144 break;
5145 default:
5146 break;
5152 /* Optimize comparison of ABS with zero. */
5153 if (trueop1 == CONST0_RTX (mode)
5154 && (GET_CODE (trueop0) == ABS
5155 || (GET_CODE (trueop0) == FLOAT_EXTEND
5156 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5158 switch (code)
5160 case LT:
5161 /* Optimize abs(x) < 0.0. */
5162 if (!HONOR_SNANS (mode)
5163 && (!INTEGRAL_MODE_P (mode)
5164 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5166 if (INTEGRAL_MODE_P (mode)
5167 && (issue_strict_overflow_warning
5168 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5169 warning (OPT_Wstrict_overflow,
5170 ("assuming signed overflow does not occur when "
5171 "assuming abs (x) < 0 is false"));
5172 return const0_rtx;
5174 break;
5176 case GE:
5177 /* Optimize abs(x) >= 0.0. */
5178 if (!HONOR_NANS (mode)
5179 && (!INTEGRAL_MODE_P (mode)
5180 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5182 if (INTEGRAL_MODE_P (mode)
5183 && (issue_strict_overflow_warning
5184 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5185 warning (OPT_Wstrict_overflow,
5186 ("assuming signed overflow does not occur when "
5187 "assuming abs (x) >= 0 is true"));
5188 return const_true_rtx;
5190 break;
5192 case UNGE:
5193 /* Optimize ! (abs(x) < 0.0). */
5194 return const_true_rtx;
5196 default:
5197 break;
5201 return 0;
5204 /* Simplify CODE, an operation with result mode MODE and three operands,
5205 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5206 a constant. Return 0 if no simplifications is possible. */
5209 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
5210 enum machine_mode op0_mode, rtx op0, rtx op1,
5211 rtx op2)
5213 unsigned int width = GET_MODE_PRECISION (mode);
5214 bool any_change = false;
5215 rtx tem;
5217 /* VOIDmode means "infinite" precision. */
5218 if (width == 0)
5219 width = HOST_BITS_PER_WIDE_INT;
5221 switch (code)
5223 case FMA:
5224 /* Simplify negations around the multiplication. */
5225 /* -a * -b + c => a * b + c. */
5226 if (GET_CODE (op0) == NEG)
5228 tem = simplify_unary_operation (NEG, mode, op1, mode);
5229 if (tem)
5230 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5232 else if (GET_CODE (op1) == NEG)
5234 tem = simplify_unary_operation (NEG, mode, op0, mode);
5235 if (tem)
5236 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5239 /* Canonicalize the two multiplication operands. */
5240 /* a * -b + c => -b * a + c. */
5241 if (swap_commutative_operands_p (op0, op1))
5242 tem = op0, op0 = op1, op1 = tem, any_change = true;
5244 if (any_change)
5245 return gen_rtx_FMA (mode, op0, op1, op2);
5246 return NULL_RTX;
5248 case SIGN_EXTRACT:
5249 case ZERO_EXTRACT:
5250 if (CONST_INT_P (op0)
5251 && CONST_INT_P (op1)
5252 && CONST_INT_P (op2)
5253 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5254 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5256 /* Extracting a bit-field from a constant */
5257 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5258 HOST_WIDE_INT op1val = INTVAL (op1);
5259 HOST_WIDE_INT op2val = INTVAL (op2);
5260 if (BITS_BIG_ENDIAN)
5261 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5262 else
5263 val >>= op2val;
5265 if (HOST_BITS_PER_WIDE_INT != op1val)
5267 /* First zero-extend. */
5268 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5269 /* If desired, propagate sign bit. */
5270 if (code == SIGN_EXTRACT
5271 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5272 != 0)
5273 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5276 return gen_int_mode (val, mode);
5278 break;
5280 case IF_THEN_ELSE:
5281 if (CONST_INT_P (op0))
5282 return op0 != const0_rtx ? op1 : op2;
5284 /* Convert c ? a : a into "a". */
5285 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5286 return op1;
5288 /* Convert a != b ? a : b into "a". */
5289 if (GET_CODE (op0) == NE
5290 && ! side_effects_p (op0)
5291 && ! HONOR_NANS (mode)
5292 && ! HONOR_SIGNED_ZEROS (mode)
5293 && ((rtx_equal_p (XEXP (op0, 0), op1)
5294 && rtx_equal_p (XEXP (op0, 1), op2))
5295 || (rtx_equal_p (XEXP (op0, 0), op2)
5296 && rtx_equal_p (XEXP (op0, 1), op1))))
5297 return op1;
5299 /* Convert a == b ? a : b into "b". */
5300 if (GET_CODE (op0) == EQ
5301 && ! side_effects_p (op0)
5302 && ! HONOR_NANS (mode)
5303 && ! HONOR_SIGNED_ZEROS (mode)
5304 && ((rtx_equal_p (XEXP (op0, 0), op1)
5305 && rtx_equal_p (XEXP (op0, 1), op2))
5306 || (rtx_equal_p (XEXP (op0, 0), op2)
5307 && rtx_equal_p (XEXP (op0, 1), op1))))
5308 return op2;
5310 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5312 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5313 ? GET_MODE (XEXP (op0, 1))
5314 : GET_MODE (XEXP (op0, 0)));
5315 rtx temp;
5317 /* Look for happy constants in op1 and op2. */
5318 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5320 HOST_WIDE_INT t = INTVAL (op1);
5321 HOST_WIDE_INT f = INTVAL (op2);
5323 if (t == STORE_FLAG_VALUE && f == 0)
5324 code = GET_CODE (op0);
5325 else if (t == 0 && f == STORE_FLAG_VALUE)
5327 enum rtx_code tmp;
5328 tmp = reversed_comparison_code (op0, NULL_RTX);
5329 if (tmp == UNKNOWN)
5330 break;
5331 code = tmp;
5333 else
5334 break;
5336 return simplify_gen_relational (code, mode, cmp_mode,
5337 XEXP (op0, 0), XEXP (op0, 1));
5340 if (cmp_mode == VOIDmode)
5341 cmp_mode = op0_mode;
5342 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5343 cmp_mode, XEXP (op0, 0),
5344 XEXP (op0, 1));
5346 /* See if any simplifications were possible. */
5347 if (temp)
5349 if (CONST_INT_P (temp))
5350 return temp == const0_rtx ? op2 : op1;
5351 else if (temp)
5352 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5355 break;
5357 case VEC_MERGE:
5358 gcc_assert (GET_MODE (op0) == mode);
5359 gcc_assert (GET_MODE (op1) == mode);
5360 gcc_assert (VECTOR_MODE_P (mode));
5361 op2 = avoid_constant_pool_reference (op2);
5362 if (CONST_INT_P (op2))
5364 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5365 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5366 int mask = (1 << n_elts) - 1;
5368 if (!(INTVAL (op2) & mask))
5369 return op1;
5370 if ((INTVAL (op2) & mask) == mask)
5371 return op0;
5373 op0 = avoid_constant_pool_reference (op0);
5374 op1 = avoid_constant_pool_reference (op1);
5375 if (GET_CODE (op0) == CONST_VECTOR
5376 && GET_CODE (op1) == CONST_VECTOR)
5378 rtvec v = rtvec_alloc (n_elts);
5379 unsigned int i;
5381 for (i = 0; i < n_elts; i++)
5382 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5383 ? CONST_VECTOR_ELT (op0, i)
5384 : CONST_VECTOR_ELT (op1, i));
5385 return gen_rtx_CONST_VECTOR (mode, v);
5388 break;
5390 default:
5391 gcc_unreachable ();
5394 return 0;
5397 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5398 or CONST_VECTOR,
5399 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5401 Works by unpacking OP into a collection of 8-bit values
5402 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5403 and then repacking them again for OUTERMODE. */
5405 static rtx
5406 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5407 enum machine_mode innermode, unsigned int byte)
5409 /* We support up to 512-bit values (for V8DFmode). */
5410 enum {
5411 max_bitsize = 512,
5412 value_bit = 8,
5413 value_mask = (1 << value_bit) - 1
5415 unsigned char value[max_bitsize / value_bit];
5416 int value_start;
5417 int i;
5418 int elem;
5420 int num_elem;
5421 rtx * elems;
5422 int elem_bitsize;
5423 rtx result_s;
5424 rtvec result_v = NULL;
5425 enum mode_class outer_class;
5426 enum machine_mode outer_submode;
5428 /* Some ports misuse CCmode. */
5429 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5430 return op;
5432 /* We have no way to represent a complex constant at the rtl level. */
5433 if (COMPLEX_MODE_P (outermode))
5434 return NULL_RTX;
5436 /* Unpack the value. */
5438 if (GET_CODE (op) == CONST_VECTOR)
5440 num_elem = CONST_VECTOR_NUNITS (op);
5441 elems = &CONST_VECTOR_ELT (op, 0);
5442 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5444 else
5446 num_elem = 1;
5447 elems = &op;
5448 elem_bitsize = max_bitsize;
5450 /* If this asserts, it is too complicated; reducing value_bit may help. */
5451 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5452 /* I don't know how to handle endianness of sub-units. */
5453 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5455 for (elem = 0; elem < num_elem; elem++)
5457 unsigned char * vp;
5458 rtx el = elems[elem];
5460 /* Vectors are kept in target memory order. (This is probably
5461 a mistake.) */
5463 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5464 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5465 / BITS_PER_UNIT);
5466 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5467 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5468 unsigned bytele = (subword_byte % UNITS_PER_WORD
5469 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5470 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5473 switch (GET_CODE (el))
5475 case CONST_INT:
5476 for (i = 0;
5477 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5478 i += value_bit)
5479 *vp++ = INTVAL (el) >> i;
5480 /* CONST_INTs are always logically sign-extended. */
5481 for (; i < elem_bitsize; i += value_bit)
5482 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5483 break;
5485 case CONST_DOUBLE:
5486 if (GET_MODE (el) == VOIDmode)
5488 unsigned char extend = 0;
5489 /* If this triggers, someone should have generated a
5490 CONST_INT instead. */
5491 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5493 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5494 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5495 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5497 *vp++
5498 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5499 i += value_bit;
5502 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5503 extend = -1;
5504 for (; i < elem_bitsize; i += value_bit)
5505 *vp++ = extend;
5507 else
5509 long tmp[max_bitsize / 32];
5510 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5512 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5513 gcc_assert (bitsize <= elem_bitsize);
5514 gcc_assert (bitsize % value_bit == 0);
5516 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5517 GET_MODE (el));
5519 /* real_to_target produces its result in words affected by
5520 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5521 and use WORDS_BIG_ENDIAN instead; see the documentation
5522 of SUBREG in rtl.texi. */
5523 for (i = 0; i < bitsize; i += value_bit)
5525 int ibase;
5526 if (WORDS_BIG_ENDIAN)
5527 ibase = bitsize - 1 - i;
5528 else
5529 ibase = i;
5530 *vp++ = tmp[ibase / 32] >> i % 32;
5533 /* It shouldn't matter what's done here, so fill it with
5534 zero. */
5535 for (; i < elem_bitsize; i += value_bit)
5536 *vp++ = 0;
5538 break;
5540 case CONST_FIXED:
5541 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5543 for (i = 0; i < elem_bitsize; i += value_bit)
5544 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5546 else
5548 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5549 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5550 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5551 i += value_bit)
5552 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5553 >> (i - HOST_BITS_PER_WIDE_INT);
5554 for (; i < elem_bitsize; i += value_bit)
5555 *vp++ = 0;
5557 break;
5559 default:
5560 gcc_unreachable ();
5564 /* Now, pick the right byte to start with. */
5565 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5566 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5567 will already have offset 0. */
5568 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5570 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5571 - byte);
5572 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5573 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5574 byte = (subword_byte % UNITS_PER_WORD
5575 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5578 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5579 so if it's become negative it will instead be very large.) */
5580 gcc_assert (byte < GET_MODE_SIZE (innermode));
5582 /* Convert from bytes to chunks of size value_bit. */
5583 value_start = byte * (BITS_PER_UNIT / value_bit);
5585 /* Re-pack the value. */
5587 if (VECTOR_MODE_P (outermode))
5589 num_elem = GET_MODE_NUNITS (outermode);
5590 result_v = rtvec_alloc (num_elem);
5591 elems = &RTVEC_ELT (result_v, 0);
5592 outer_submode = GET_MODE_INNER (outermode);
5594 else
5596 num_elem = 1;
5597 elems = &result_s;
5598 outer_submode = outermode;
5601 outer_class = GET_MODE_CLASS (outer_submode);
5602 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5604 gcc_assert (elem_bitsize % value_bit == 0);
5605 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5607 for (elem = 0; elem < num_elem; elem++)
5609 unsigned char *vp;
5611 /* Vectors are stored in target memory order. (This is probably
5612 a mistake.) */
5614 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5615 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5616 / BITS_PER_UNIT);
5617 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5618 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5619 unsigned bytele = (subword_byte % UNITS_PER_WORD
5620 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5621 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5624 switch (outer_class)
5626 case MODE_INT:
5627 case MODE_PARTIAL_INT:
5629 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5631 for (i = 0;
5632 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5633 i += value_bit)
5634 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5635 for (; i < elem_bitsize; i += value_bit)
5636 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5637 << (i - HOST_BITS_PER_WIDE_INT);
5639 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5640 know why. */
5641 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5642 elems[elem] = gen_int_mode (lo, outer_submode);
5643 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5644 elems[elem] = immed_double_const (lo, hi, outer_submode);
5645 else
5646 return NULL_RTX;
5648 break;
5650 case MODE_FLOAT:
5651 case MODE_DECIMAL_FLOAT:
5653 REAL_VALUE_TYPE r;
5654 long tmp[max_bitsize / 32];
5656 /* real_from_target wants its input in words affected by
5657 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5658 and use WORDS_BIG_ENDIAN instead; see the documentation
5659 of SUBREG in rtl.texi. */
5660 for (i = 0; i < max_bitsize / 32; i++)
5661 tmp[i] = 0;
5662 for (i = 0; i < elem_bitsize; i += value_bit)
5664 int ibase;
5665 if (WORDS_BIG_ENDIAN)
5666 ibase = elem_bitsize - 1 - i;
5667 else
5668 ibase = i;
5669 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5672 real_from_target (&r, tmp, outer_submode);
5673 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5675 break;
5677 case MODE_FRACT:
5678 case MODE_UFRACT:
5679 case MODE_ACCUM:
5680 case MODE_UACCUM:
5682 FIXED_VALUE_TYPE f;
5683 f.data.low = 0;
5684 f.data.high = 0;
5685 f.mode = outer_submode;
5687 for (i = 0;
5688 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5689 i += value_bit)
5690 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5691 for (; i < elem_bitsize; i += value_bit)
5692 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5693 << (i - HOST_BITS_PER_WIDE_INT));
5695 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5697 break;
5699 default:
5700 gcc_unreachable ();
5703 if (VECTOR_MODE_P (outermode))
5704 return gen_rtx_CONST_VECTOR (outermode, result_v);
5705 else
5706 return result_s;
5709 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5710 Return 0 if no simplifications are possible. */
5712 simplify_subreg (enum machine_mode outermode, rtx op,
5713 enum machine_mode innermode, unsigned int byte)
5715 /* Little bit of sanity checking. */
5716 gcc_assert (innermode != VOIDmode);
5717 gcc_assert (outermode != VOIDmode);
5718 gcc_assert (innermode != BLKmode);
5719 gcc_assert (outermode != BLKmode);
5721 gcc_assert (GET_MODE (op) == innermode
5722 || GET_MODE (op) == VOIDmode);
5724 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5725 return NULL_RTX;
5727 if (byte >= GET_MODE_SIZE (innermode))
5728 return NULL_RTX;
5730 if (outermode == innermode && !byte)
5731 return op;
5733 if (CONST_SCALAR_INT_P (op)
5734 || CONST_DOUBLE_AS_FLOAT_P (op)
5735 || GET_CODE (op) == CONST_FIXED
5736 || GET_CODE (op) == CONST_VECTOR)
5737 return simplify_immed_subreg (outermode, op, innermode, byte);
5739 /* Changing mode twice with SUBREG => just change it once,
5740 or not at all if changing back op starting mode. */
5741 if (GET_CODE (op) == SUBREG)
5743 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5744 int final_offset = byte + SUBREG_BYTE (op);
5745 rtx newx;
5747 if (outermode == innermostmode
5748 && byte == 0 && SUBREG_BYTE (op) == 0)
5749 return SUBREG_REG (op);
5751 /* The SUBREG_BYTE represents offset, as if the value were stored
5752 in memory. Irritating exception is paradoxical subreg, where
5753 we define SUBREG_BYTE to be 0. On big endian machines, this
5754 value should be negative. For a moment, undo this exception. */
5755 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5757 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5758 if (WORDS_BIG_ENDIAN)
5759 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5760 if (BYTES_BIG_ENDIAN)
5761 final_offset += difference % UNITS_PER_WORD;
5763 if (SUBREG_BYTE (op) == 0
5764 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5766 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5767 if (WORDS_BIG_ENDIAN)
5768 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5769 if (BYTES_BIG_ENDIAN)
5770 final_offset += difference % UNITS_PER_WORD;
5773 /* See whether resulting subreg will be paradoxical. */
5774 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5776 /* In nonparadoxical subregs we can't handle negative offsets. */
5777 if (final_offset < 0)
5778 return NULL_RTX;
5779 /* Bail out in case resulting subreg would be incorrect. */
5780 if (final_offset % GET_MODE_SIZE (outermode)
5781 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5782 return NULL_RTX;
5784 else
5786 int offset = 0;
5787 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5789 /* In paradoxical subreg, see if we are still looking on lower part.
5790 If so, our SUBREG_BYTE will be 0. */
5791 if (WORDS_BIG_ENDIAN)
5792 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5793 if (BYTES_BIG_ENDIAN)
5794 offset += difference % UNITS_PER_WORD;
5795 if (offset == final_offset)
5796 final_offset = 0;
5797 else
5798 return NULL_RTX;
5801 /* Recurse for further possible simplifications. */
5802 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5803 final_offset);
5804 if (newx)
5805 return newx;
5806 if (validate_subreg (outermode, innermostmode,
5807 SUBREG_REG (op), final_offset))
5809 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5810 if (SUBREG_PROMOTED_VAR_P (op)
5811 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5812 && GET_MODE_CLASS (outermode) == MODE_INT
5813 && IN_RANGE (GET_MODE_SIZE (outermode),
5814 GET_MODE_SIZE (innermode),
5815 GET_MODE_SIZE (innermostmode))
5816 && subreg_lowpart_p (newx))
5818 SUBREG_PROMOTED_VAR_P (newx) = 1;
5819 SUBREG_PROMOTED_UNSIGNED_SET
5820 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5822 return newx;
5824 return NULL_RTX;
5827 /* SUBREG of a hard register => just change the register number
5828 and/or mode. If the hard register is not valid in that mode,
5829 suppress this simplification. If the hard register is the stack,
5830 frame, or argument pointer, leave this as a SUBREG. */
5832 if (REG_P (op) && HARD_REGISTER_P (op))
5834 unsigned int regno, final_regno;
5836 regno = REGNO (op);
5837 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5838 if (HARD_REGISTER_NUM_P (final_regno))
5840 rtx x;
5841 int final_offset = byte;
5843 /* Adjust offset for paradoxical subregs. */
5844 if (byte == 0
5845 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5847 int difference = (GET_MODE_SIZE (innermode)
5848 - GET_MODE_SIZE (outermode));
5849 if (WORDS_BIG_ENDIAN)
5850 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5851 if (BYTES_BIG_ENDIAN)
5852 final_offset += difference % UNITS_PER_WORD;
5855 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5857 /* Propagate original regno. We don't have any way to specify
5858 the offset inside original regno, so do so only for lowpart.
5859 The information is used only by alias analysis that can not
5860 grog partial register anyway. */
5862 if (subreg_lowpart_offset (outermode, innermode) == byte)
5863 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5864 return x;
5868 /* If we have a SUBREG of a register that we are replacing and we are
5869 replacing it with a MEM, make a new MEM and try replacing the
5870 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5871 or if we would be widening it. */
5873 if (MEM_P (op)
5874 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5875 /* Allow splitting of volatile memory references in case we don't
5876 have instruction to move the whole thing. */
5877 && (! MEM_VOLATILE_P (op)
5878 || ! have_insn_for (SET, innermode))
5879 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5880 return adjust_address_nv (op, outermode, byte);
5882 /* Handle complex values represented as CONCAT
5883 of real and imaginary part. */
5884 if (GET_CODE (op) == CONCAT)
5886 unsigned int part_size, final_offset;
5887 rtx part, res;
5889 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5890 if (byte < part_size)
5892 part = XEXP (op, 0);
5893 final_offset = byte;
5895 else
5897 part = XEXP (op, 1);
5898 final_offset = byte - part_size;
5901 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5902 return NULL_RTX;
5904 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5905 if (res)
5906 return res;
5907 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5908 return gen_rtx_SUBREG (outermode, part, final_offset);
5909 return NULL_RTX;
5912 /* A SUBREG resulting from a zero extension may fold to zero if
5913 it extracts higher bits that the ZERO_EXTEND's source bits. */
5914 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5916 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5917 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5918 return CONST0_RTX (outermode);
5921 if (SCALAR_INT_MODE_P (outermode)
5922 && SCALAR_INT_MODE_P (innermode)
5923 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5924 && byte == subreg_lowpart_offset (outermode, innermode))
5926 rtx tem = simplify_truncation (outermode, op, innermode);
5927 if (tem)
5928 return tem;
5931 return NULL_RTX;
5934 /* Make a SUBREG operation or equivalent if it folds. */
5937 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5938 enum machine_mode innermode, unsigned int byte)
5940 rtx newx;
5942 newx = simplify_subreg (outermode, op, innermode, byte);
5943 if (newx)
5944 return newx;
5946 if (GET_CODE (op) == SUBREG
5947 || GET_CODE (op) == CONCAT
5948 || GET_MODE (op) == VOIDmode)
5949 return NULL_RTX;
5951 if (validate_subreg (outermode, innermode, op, byte))
5952 return gen_rtx_SUBREG (outermode, op, byte);
5954 return NULL_RTX;
5957 /* Simplify X, an rtx expression.
5959 Return the simplified expression or NULL if no simplifications
5960 were possible.
5962 This is the preferred entry point into the simplification routines;
5963 however, we still allow passes to call the more specific routines.
5965 Right now GCC has three (yes, three) major bodies of RTL simplification
5966 code that need to be unified.
5968 1. fold_rtx in cse.c. This code uses various CSE specific
5969 information to aid in RTL simplification.
5971 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5972 it uses combine specific information to aid in RTL
5973 simplification.
5975 3. The routines in this file.
5978 Long term we want to only have one body of simplification code; to
5979 get to that state I recommend the following steps:
5981 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5982 which are not pass dependent state into these routines.
5984 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5985 use this routine whenever possible.
5987 3. Allow for pass dependent state to be provided to these
5988 routines and add simplifications based on the pass dependent
5989 state. Remove code from cse.c & combine.c that becomes
5990 redundant/dead.
5992 It will take time, but ultimately the compiler will be easier to
5993 maintain and improve. It's totally silly that when we add a
5994 simplification that it needs to be added to 4 places (3 for RTL
5995 simplification and 1 for tree simplification. */
5998 simplify_rtx (const_rtx x)
6000 const enum rtx_code code = GET_CODE (x);
6001 const enum machine_mode mode = GET_MODE (x);
6003 switch (GET_RTX_CLASS (code))
6005 case RTX_UNARY:
6006 return simplify_unary_operation (code, mode,
6007 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6008 case RTX_COMM_ARITH:
6009 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6010 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6012 /* Fall through.... */
6014 case RTX_BIN_ARITH:
6015 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6017 case RTX_TERNARY:
6018 case RTX_BITFIELD_OPS:
6019 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6020 XEXP (x, 0), XEXP (x, 1),
6021 XEXP (x, 2));
6023 case RTX_COMPARE:
6024 case RTX_COMM_COMPARE:
6025 return simplify_relational_operation (code, mode,
6026 ((GET_MODE (XEXP (x, 0))
6027 != VOIDmode)
6028 ? GET_MODE (XEXP (x, 0))
6029 : GET_MODE (XEXP (x, 1))),
6030 XEXP (x, 0),
6031 XEXP (x, 1));
6033 case RTX_EXTRA:
6034 if (code == SUBREG)
6035 return simplify_subreg (mode, SUBREG_REG (x),
6036 GET_MODE (SUBREG_REG (x)),
6037 SUBREG_BYTE (x));
6038 break;
6040 case RTX_OBJ:
6041 if (code == LO_SUM)
6043 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6044 if (GET_CODE (XEXP (x, 0)) == HIGH
6045 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6046 return XEXP (x, 1);
6048 break;
6050 default:
6051 break;
6053 return NULL;