Support slim switch for cfg graph dump
[official-gcc.git] / gcc / simplify-rtx.c
blob791f91a34a87b5a0ed1a96e8ce421fe23dcf38aa
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "recog.h"
33 #include "function.h"
34 #include "expr.h"
35 #include "diagnostic-core.h"
36 #include "ggc.h"
37 #include "target.h"
39 /* Simplification and canonicalization of RTL. */
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
44 signed wide int. */
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
48 static rtx neg_const_int (enum machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
51 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
52 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
53 unsigned int);
54 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
55 rtx, rtx);
56 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
57 enum machine_mode, rtx, rtx);
58 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
59 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
60 rtx, rtx, rtx, rtx);
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
64 static rtx
65 neg_const_int (enum machine_mode mode, const_rtx i)
67 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
70 /* Test whether expression, X, is an immediate constant that represents
71 the most significant bit of machine mode MODE. */
73 bool
74 mode_signbit_p (enum machine_mode mode, const_rtx x)
76 unsigned HOST_WIDE_INT val;
77 unsigned int width;
79 if (GET_MODE_CLASS (mode) != MODE_INT)
80 return false;
82 width = GET_MODE_PRECISION (mode);
83 if (width == 0)
84 return false;
86 if (width <= HOST_BITS_PER_WIDE_INT
87 && CONST_INT_P (x))
88 val = INTVAL (x);
89 else if (width <= HOST_BITS_PER_DOUBLE_INT
90 && CONST_DOUBLE_AS_INT_P (x)
91 && CONST_DOUBLE_LOW (x) == 0)
93 val = CONST_DOUBLE_HIGH (x);
94 width -= HOST_BITS_PER_WIDE_INT;
96 else
97 /* FIXME: We don't yet have a representation for wider modes. */
98 return false;
100 if (width < HOST_BITS_PER_WIDE_INT)
101 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
102 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
105 /* Test whether VAL is equal to the most significant bit of mode MODE
106 (after masking with the mode mask of MODE). Returns false if the
107 precision of MODE is too large to handle. */
109 bool
110 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
112 unsigned int width;
114 if (GET_MODE_CLASS (mode) != MODE_INT)
115 return false;
117 width = GET_MODE_PRECISION (mode);
118 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
119 return false;
121 val &= GET_MODE_MASK (mode);
122 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
125 /* Test whether the most significant bit of mode MODE is set in VAL.
126 Returns false if the precision of MODE is too large to handle. */
127 bool
128 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
130 unsigned int width;
132 if (GET_MODE_CLASS (mode) != MODE_INT)
133 return false;
135 width = GET_MODE_PRECISION (mode);
136 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
137 return false;
139 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
140 return val != 0;
143 /* Test whether the most significant bit of mode MODE is clear in VAL.
144 Returns false if the precision of MODE is too large to handle. */
145 bool
146 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
148 unsigned int width;
150 if (GET_MODE_CLASS (mode) != MODE_INT)
151 return false;
153 width = GET_MODE_PRECISION (mode);
154 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
155 return false;
157 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
158 return val == 0;
161 /* Make a binary operation by properly ordering the operands and
162 seeing if the expression folds. */
165 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
166 rtx op1)
168 rtx tem;
170 /* If this simplifies, do it. */
171 tem = simplify_binary_operation (code, mode, op0, op1);
172 if (tem)
173 return tem;
175 /* Put complex operands first and constants second if commutative. */
176 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
177 && swap_commutative_operands_p (op0, op1))
178 tem = op0, op0 = op1, op1 = tem;
180 return gen_rtx_fmt_ee (code, mode, op0, op1);
183 /* If X is a MEM referencing the constant pool, return the real value.
184 Otherwise return X. */
186 avoid_constant_pool_reference (rtx x)
188 rtx c, tmp, addr;
189 enum machine_mode cmode;
190 HOST_WIDE_INT offset = 0;
192 switch (GET_CODE (x))
194 case MEM:
195 break;
197 case FLOAT_EXTEND:
198 /* Handle float extensions of constant pool references. */
199 tmp = XEXP (x, 0);
200 c = avoid_constant_pool_reference (tmp);
201 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
203 REAL_VALUE_TYPE d;
205 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
206 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
208 return x;
210 default:
211 return x;
214 if (GET_MODE (x) == BLKmode)
215 return x;
217 addr = XEXP (x, 0);
219 /* Call target hook to avoid the effects of -fpic etc.... */
220 addr = targetm.delegitimize_address (addr);
222 /* Split the address into a base and integer offset. */
223 if (GET_CODE (addr) == CONST
224 && GET_CODE (XEXP (addr, 0)) == PLUS
225 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
227 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
228 addr = XEXP (XEXP (addr, 0), 0);
231 if (GET_CODE (addr) == LO_SUM)
232 addr = XEXP (addr, 1);
234 /* If this is a constant pool reference, we can turn it into its
235 constant and hope that simplifications happen. */
236 if (GET_CODE (addr) == SYMBOL_REF
237 && CONSTANT_POOL_ADDRESS_P (addr))
239 c = get_pool_constant (addr);
240 cmode = get_pool_mode (addr);
242 /* If we're accessing the constant in a different mode than it was
243 originally stored, attempt to fix that up via subreg simplifications.
244 If that fails we have no choice but to return the original memory. */
245 if ((offset != 0 || cmode != GET_MODE (x))
246 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
248 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
249 if (tem && CONSTANT_P (tem))
250 return tem;
252 else
253 return c;
256 return x;
259 /* Simplify a MEM based on its attributes. This is the default
260 delegitimize_address target hook, and it's recommended that every
261 overrider call it. */
264 delegitimize_mem_from_attrs (rtx x)
266 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
267 use their base addresses as equivalent. */
268 if (MEM_P (x)
269 && MEM_EXPR (x)
270 && MEM_OFFSET_KNOWN_P (x))
272 tree decl = MEM_EXPR (x);
273 enum machine_mode mode = GET_MODE (x);
274 HOST_WIDE_INT offset = 0;
276 switch (TREE_CODE (decl))
278 default:
279 decl = NULL;
280 break;
282 case VAR_DECL:
283 break;
285 case ARRAY_REF:
286 case ARRAY_RANGE_REF:
287 case COMPONENT_REF:
288 case BIT_FIELD_REF:
289 case REALPART_EXPR:
290 case IMAGPART_EXPR:
291 case VIEW_CONVERT_EXPR:
293 HOST_WIDE_INT bitsize, bitpos;
294 tree toffset;
295 int unsignedp, volatilep = 0;
297 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
298 &mode, &unsignedp, &volatilep, false);
299 if (bitsize != GET_MODE_BITSIZE (mode)
300 || (bitpos % BITS_PER_UNIT)
301 || (toffset && !host_integerp (toffset, 0)))
302 decl = NULL;
303 else
305 offset += bitpos / BITS_PER_UNIT;
306 if (toffset)
307 offset += TREE_INT_CST_LOW (toffset);
309 break;
313 if (decl
314 && mode == GET_MODE (x)
315 && TREE_CODE (decl) == VAR_DECL
316 && (TREE_STATIC (decl)
317 || DECL_THREAD_LOCAL_P (decl))
318 && DECL_RTL_SET_P (decl)
319 && MEM_P (DECL_RTL (decl)))
321 rtx newx;
323 offset += MEM_OFFSET (x);
325 newx = DECL_RTL (decl);
327 if (MEM_P (newx))
329 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
331 /* Avoid creating a new MEM needlessly if we already had
332 the same address. We do if there's no OFFSET and the
333 old address X is identical to NEWX, or if X is of the
334 form (plus NEWX OFFSET), or the NEWX is of the form
335 (plus Y (const_int Z)) and X is that with the offset
336 added: (plus Y (const_int Z+OFFSET)). */
337 if (!((offset == 0
338 || (GET_CODE (o) == PLUS
339 && GET_CODE (XEXP (o, 1)) == CONST_INT
340 && (offset == INTVAL (XEXP (o, 1))
341 || (GET_CODE (n) == PLUS
342 && GET_CODE (XEXP (n, 1)) == CONST_INT
343 && (INTVAL (XEXP (n, 1)) + offset
344 == INTVAL (XEXP (o, 1)))
345 && (n = XEXP (n, 0))))
346 && (o = XEXP (o, 0))))
347 && rtx_equal_p (o, n)))
348 x = adjust_address_nv (newx, mode, offset);
350 else if (GET_MODE (x) == GET_MODE (newx)
351 && offset == 0)
352 x = newx;
356 return x;
359 /* Make a unary operation by first seeing if it folds and otherwise making
360 the specified operation. */
363 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
364 enum machine_mode op_mode)
366 rtx tem;
368 /* If this simplifies, use it. */
369 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
370 return tem;
372 return gen_rtx_fmt_e (code, mode, op);
375 /* Likewise for ternary operations. */
378 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
379 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
381 rtx tem;
383 /* If this simplifies, use it. */
384 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
385 op0, op1, op2)))
386 return tem;
388 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
391 /* Likewise, for relational operations.
392 CMP_MODE specifies mode comparison is done in. */
395 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
396 enum machine_mode cmp_mode, rtx op0, rtx op1)
398 rtx tem;
400 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
401 op0, op1)))
402 return tem;
404 return gen_rtx_fmt_ee (code, mode, op0, op1);
407 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
408 and simplify the result. If FN is non-NULL, call this callback on each
409 X, if it returns non-NULL, replace X with its return value and simplify the
410 result. */
413 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
414 rtx (*fn) (rtx, const_rtx, void *), void *data)
416 enum rtx_code code = GET_CODE (x);
417 enum machine_mode mode = GET_MODE (x);
418 enum machine_mode op_mode;
419 const char *fmt;
420 rtx op0, op1, op2, newx, op;
421 rtvec vec, newvec;
422 int i, j;
424 if (__builtin_expect (fn != NULL, 0))
426 newx = fn (x, old_rtx, data);
427 if (newx)
428 return newx;
430 else if (rtx_equal_p (x, old_rtx))
431 return copy_rtx ((rtx) data);
433 switch (GET_RTX_CLASS (code))
435 case RTX_UNARY:
436 op0 = XEXP (x, 0);
437 op_mode = GET_MODE (op0);
438 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
439 if (op0 == XEXP (x, 0))
440 return x;
441 return simplify_gen_unary (code, mode, op0, op_mode);
443 case RTX_BIN_ARITH:
444 case RTX_COMM_ARITH:
445 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
446 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
447 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
448 return x;
449 return simplify_gen_binary (code, mode, op0, op1);
451 case RTX_COMPARE:
452 case RTX_COMM_COMPARE:
453 op0 = XEXP (x, 0);
454 op1 = XEXP (x, 1);
455 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
456 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
457 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
458 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
459 return x;
460 return simplify_gen_relational (code, mode, op_mode, op0, op1);
462 case RTX_TERNARY:
463 case RTX_BITFIELD_OPS:
464 op0 = XEXP (x, 0);
465 op_mode = GET_MODE (op0);
466 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
467 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
468 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
469 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
470 return x;
471 if (op_mode == VOIDmode)
472 op_mode = GET_MODE (op0);
473 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
475 case RTX_EXTRA:
476 if (code == SUBREG)
478 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
479 if (op0 == SUBREG_REG (x))
480 return x;
481 op0 = simplify_gen_subreg (GET_MODE (x), op0,
482 GET_MODE (SUBREG_REG (x)),
483 SUBREG_BYTE (x));
484 return op0 ? op0 : x;
486 break;
488 case RTX_OBJ:
489 if (code == MEM)
491 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
492 if (op0 == XEXP (x, 0))
493 return x;
494 return replace_equiv_address_nv (x, op0);
496 else if (code == LO_SUM)
498 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
499 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
501 /* (lo_sum (high x) x) -> x */
502 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
503 return op1;
505 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
506 return x;
507 return gen_rtx_LO_SUM (mode, op0, op1);
509 break;
511 default:
512 break;
515 newx = x;
516 fmt = GET_RTX_FORMAT (code);
517 for (i = 0; fmt[i]; i++)
518 switch (fmt[i])
520 case 'E':
521 vec = XVEC (x, i);
522 newvec = XVEC (newx, i);
523 for (j = 0; j < GET_NUM_ELEM (vec); j++)
525 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
526 old_rtx, fn, data);
527 if (op != RTVEC_ELT (vec, j))
529 if (newvec == vec)
531 newvec = shallow_copy_rtvec (vec);
532 if (x == newx)
533 newx = shallow_copy_rtx (x);
534 XVEC (newx, i) = newvec;
536 RTVEC_ELT (newvec, j) = op;
539 break;
541 case 'e':
542 if (XEXP (x, i))
544 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
545 if (op != XEXP (x, i))
547 if (x == newx)
548 newx = shallow_copy_rtx (x);
549 XEXP (newx, i) = op;
552 break;
554 return newx;
557 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
558 resulting RTX. Return a new RTX which is as simplified as possible. */
561 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
563 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
566 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
567 Only handle cases where the truncated value is inherently an rvalue.
569 RTL provides two ways of truncating a value:
571 1. a lowpart subreg. This form is only a truncation when both
572 the outer and inner modes (here MODE and OP_MODE respectively)
573 are scalar integers, and only then when the subreg is used as
574 an rvalue.
576 It is only valid to form such truncating subregs if the
577 truncation requires no action by the target. The onus for
578 proving this is on the creator of the subreg -- e.g. the
579 caller to simplify_subreg or simplify_gen_subreg -- and typically
580 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
582 2. a TRUNCATE. This form handles both scalar and compound integers.
584 The first form is preferred where valid. However, the TRUNCATE
585 handling in simplify_unary_operation turns the second form into the
586 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
587 so it is generally safe to form rvalue truncations using:
589 simplify_gen_unary (TRUNCATE, ...)
591 and leave simplify_unary_operation to work out which representation
592 should be used.
594 Because of the proof requirements on (1), simplify_truncation must
595 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
596 regardless of whether the outer truncation came from a SUBREG or a
597 TRUNCATE. For example, if the caller has proven that an SImode
598 truncation of:
600 (and:DI X Y)
602 is a no-op and can be represented as a subreg, it does not follow
603 that SImode truncations of X and Y are also no-ops. On a target
604 like 64-bit MIPS that requires SImode values to be stored in
605 sign-extended form, an SImode truncation of:
607 (and:DI (reg:DI X) (const_int 63))
609 is trivially a no-op because only the lower 6 bits can be set.
610 However, X is still an arbitrary 64-bit number and so we cannot
611 assume that truncating it too is a no-op. */
613 static rtx
614 simplify_truncation (enum machine_mode mode, rtx op,
615 enum machine_mode op_mode)
617 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
618 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
619 gcc_assert (precision <= op_precision);
621 /* Optimize truncations of zero and sign extended values. */
622 if (GET_CODE (op) == ZERO_EXTEND
623 || GET_CODE (op) == SIGN_EXTEND)
625 /* There are three possibilities. If MODE is the same as the
626 origmode, we can omit both the extension and the subreg.
627 If MODE is not larger than the origmode, we can apply the
628 truncation without the extension. Finally, if the outermode
629 is larger than the origmode, we can just extend to the appropriate
630 mode. */
631 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
632 if (mode == origmode)
633 return XEXP (op, 0);
634 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
635 return simplify_gen_unary (TRUNCATE, mode,
636 XEXP (op, 0), origmode);
637 else
638 return simplify_gen_unary (GET_CODE (op), mode,
639 XEXP (op, 0), origmode);
642 /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
643 to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
644 if (GET_CODE (op) == PLUS
645 || GET_CODE (op) == MINUS
646 || GET_CODE (op) == MULT)
648 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
649 if (op0)
651 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
652 if (op1)
653 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
657 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
658 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
659 the outer subreg is effectively a truncation to the original mode. */
660 if ((GET_CODE (op) == LSHIFTRT
661 || GET_CODE (op) == ASHIFTRT)
662 /* Ensure that OP_MODE is at least twice as wide as MODE
663 to avoid the possibility that an outer LSHIFTRT shifts by more
664 than the sign extension's sign_bit_copies and introduces zeros
665 into the high bits of the result. */
666 && 2 * precision <= op_precision
667 && CONST_INT_P (XEXP (op, 1))
668 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
669 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
670 && UINTVAL (XEXP (op, 1)) < precision)
671 return simplify_gen_binary (ASHIFTRT, mode,
672 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
674 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
675 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
676 the outer subreg is effectively a truncation to the original mode. */
677 if ((GET_CODE (op) == LSHIFTRT
678 || GET_CODE (op) == ASHIFTRT)
679 && CONST_INT_P (XEXP (op, 1))
680 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
681 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
682 && UINTVAL (XEXP (op, 1)) < precision)
683 return simplify_gen_binary (LSHIFTRT, mode,
684 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
686 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
687 to (ashift:QI (x:QI) C), where C is a suitable small constant and
688 the outer subreg is effectively a truncation to the original mode. */
689 if (GET_CODE (op) == ASHIFT
690 && CONST_INT_P (XEXP (op, 1))
691 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
692 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
693 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
694 && UINTVAL (XEXP (op, 1)) < precision)
695 return simplify_gen_binary (ASHIFT, mode,
696 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
698 /* Recognize a word extraction from a multi-word subreg. */
699 if ((GET_CODE (op) == LSHIFTRT
700 || GET_CODE (op) == ASHIFTRT)
701 && SCALAR_INT_MODE_P (mode)
702 && SCALAR_INT_MODE_P (op_mode)
703 && precision >= BITS_PER_WORD
704 && 2 * precision <= op_precision
705 && CONST_INT_P (XEXP (op, 1))
706 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
707 && UINTVAL (XEXP (op, 1)) < op_precision)
709 int byte = subreg_lowpart_offset (mode, op_mode);
710 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
711 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
712 (WORDS_BIG_ENDIAN
713 ? byte - shifted_bytes
714 : byte + shifted_bytes));
717 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
718 and try replacing the TRUNCATE and shift with it. Don't do this
719 if the MEM has a mode-dependent address. */
720 if ((GET_CODE (op) == LSHIFTRT
721 || GET_CODE (op) == ASHIFTRT)
722 && SCALAR_INT_MODE_P (op_mode)
723 && MEM_P (XEXP (op, 0))
724 && CONST_INT_P (XEXP (op, 1))
725 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
726 && INTVAL (XEXP (op, 1)) > 0
727 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
728 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
729 MEM_ADDR_SPACE (XEXP (op, 0)))
730 && ! MEM_VOLATILE_P (XEXP (op, 0))
731 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
732 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
734 int byte = subreg_lowpart_offset (mode, op_mode);
735 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
736 return adjust_address_nv (XEXP (op, 0), mode,
737 (WORDS_BIG_ENDIAN
738 ? byte - shifted_bytes
739 : byte + shifted_bytes));
742 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
743 (OP:SI foo:SI) if OP is NEG or ABS. */
744 if ((GET_CODE (op) == ABS
745 || GET_CODE (op) == NEG)
746 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
747 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
748 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
749 return simplify_gen_unary (GET_CODE (op), mode,
750 XEXP (XEXP (op, 0), 0), mode);
752 /* (truncate:A (subreg:B (truncate:C X) 0)) is
753 (truncate:A X). */
754 if (GET_CODE (op) == SUBREG
755 && SCALAR_INT_MODE_P (mode)
756 && SCALAR_INT_MODE_P (op_mode)
757 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
758 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
759 && subreg_lowpart_p (op))
761 rtx inner = XEXP (SUBREG_REG (op), 0);
762 if (GET_MODE_PRECISION (mode)
763 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
764 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
765 else
766 /* If subreg above is paradoxical and C is narrower
767 than A, return (subreg:A (truncate:C X) 0). */
768 return simplify_gen_subreg (mode, SUBREG_REG (op),
769 GET_MODE (SUBREG_REG (op)), 0);
772 /* (truncate:A (truncate:B X)) is (truncate:A X). */
773 if (GET_CODE (op) == TRUNCATE)
774 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
775 GET_MODE (XEXP (op, 0)));
777 return NULL_RTX;
780 /* Try to simplify a unary operation CODE whose output mode is to be
781 MODE with input operand OP whose mode was originally OP_MODE.
782 Return zero if no simplification can be made. */
784 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
785 rtx op, enum machine_mode op_mode)
787 rtx trueop, tem;
789 trueop = avoid_constant_pool_reference (op);
791 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
792 if (tem)
793 return tem;
795 return simplify_unary_operation_1 (code, mode, op);
798 /* Perform some simplifications we can do even if the operands
799 aren't constant. */
800 static rtx
801 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
803 enum rtx_code reversed;
804 rtx temp;
806 switch (code)
808 case NOT:
809 /* (not (not X)) == X. */
810 if (GET_CODE (op) == NOT)
811 return XEXP (op, 0);
813 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
814 comparison is all ones. */
815 if (COMPARISON_P (op)
816 && (mode == BImode || STORE_FLAG_VALUE == -1)
817 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
818 return simplify_gen_relational (reversed, mode, VOIDmode,
819 XEXP (op, 0), XEXP (op, 1));
821 /* (not (plus X -1)) can become (neg X). */
822 if (GET_CODE (op) == PLUS
823 && XEXP (op, 1) == constm1_rtx)
824 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
826 /* Similarly, (not (neg X)) is (plus X -1). */
827 if (GET_CODE (op) == NEG)
828 return plus_constant (mode, XEXP (op, 0), -1);
830 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
831 if (GET_CODE (op) == XOR
832 && CONST_INT_P (XEXP (op, 1))
833 && (temp = simplify_unary_operation (NOT, mode,
834 XEXP (op, 1), mode)) != 0)
835 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
837 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
838 if (GET_CODE (op) == PLUS
839 && CONST_INT_P (XEXP (op, 1))
840 && mode_signbit_p (mode, XEXP (op, 1))
841 && (temp = simplify_unary_operation (NOT, mode,
842 XEXP (op, 1), mode)) != 0)
843 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
846 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
847 operands other than 1, but that is not valid. We could do a
848 similar simplification for (not (lshiftrt C X)) where C is
849 just the sign bit, but this doesn't seem common enough to
850 bother with. */
851 if (GET_CODE (op) == ASHIFT
852 && XEXP (op, 0) == const1_rtx)
854 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
855 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
858 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
859 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
860 so we can perform the above simplification. */
862 if (STORE_FLAG_VALUE == -1
863 && GET_CODE (op) == ASHIFTRT
864 && GET_CODE (XEXP (op, 1))
865 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
866 return simplify_gen_relational (GE, mode, VOIDmode,
867 XEXP (op, 0), const0_rtx);
870 if (GET_CODE (op) == SUBREG
871 && subreg_lowpart_p (op)
872 && (GET_MODE_SIZE (GET_MODE (op))
873 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
874 && GET_CODE (SUBREG_REG (op)) == ASHIFT
875 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
877 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
878 rtx x;
880 x = gen_rtx_ROTATE (inner_mode,
881 simplify_gen_unary (NOT, inner_mode, const1_rtx,
882 inner_mode),
883 XEXP (SUBREG_REG (op), 1));
884 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
885 if (temp)
886 return temp;
889 /* Apply De Morgan's laws to reduce number of patterns for machines
890 with negating logical insns (and-not, nand, etc.). If result has
891 only one NOT, put it first, since that is how the patterns are
892 coded. */
894 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
896 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
897 enum machine_mode op_mode;
899 op_mode = GET_MODE (in1);
900 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
902 op_mode = GET_MODE (in2);
903 if (op_mode == VOIDmode)
904 op_mode = mode;
905 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
907 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
909 rtx tem = in2;
910 in2 = in1; in1 = tem;
913 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
914 mode, in1, in2);
916 break;
918 case NEG:
919 /* (neg (neg X)) == X. */
920 if (GET_CODE (op) == NEG)
921 return XEXP (op, 0);
923 /* (neg (plus X 1)) can become (not X). */
924 if (GET_CODE (op) == PLUS
925 && XEXP (op, 1) == const1_rtx)
926 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
928 /* Similarly, (neg (not X)) is (plus X 1). */
929 if (GET_CODE (op) == NOT)
930 return plus_constant (mode, XEXP (op, 0), 1);
932 /* (neg (minus X Y)) can become (minus Y X). This transformation
933 isn't safe for modes with signed zeros, since if X and Y are
934 both +0, (minus Y X) is the same as (minus X Y). If the
935 rounding mode is towards +infinity (or -infinity) then the two
936 expressions will be rounded differently. */
937 if (GET_CODE (op) == MINUS
938 && !HONOR_SIGNED_ZEROS (mode)
939 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
940 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
942 if (GET_CODE (op) == PLUS
943 && !HONOR_SIGNED_ZEROS (mode)
944 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
946 /* (neg (plus A C)) is simplified to (minus -C A). */
947 if (CONST_SCALAR_INT_P (XEXP (op, 1))
948 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
950 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
951 if (temp)
952 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
955 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
956 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
957 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
960 /* (neg (mult A B)) becomes (mult A (neg B)).
961 This works even for floating-point values. */
962 if (GET_CODE (op) == MULT
963 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
965 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
966 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
969 /* NEG commutes with ASHIFT since it is multiplication. Only do
970 this if we can then eliminate the NEG (e.g., if the operand
971 is a constant). */
972 if (GET_CODE (op) == ASHIFT)
974 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
975 if (temp)
976 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
979 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
980 C is equal to the width of MODE minus 1. */
981 if (GET_CODE (op) == ASHIFTRT
982 && CONST_INT_P (XEXP (op, 1))
983 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
984 return simplify_gen_binary (LSHIFTRT, mode,
985 XEXP (op, 0), XEXP (op, 1));
987 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
988 C is equal to the width of MODE minus 1. */
989 if (GET_CODE (op) == LSHIFTRT
990 && CONST_INT_P (XEXP (op, 1))
991 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
992 return simplify_gen_binary (ASHIFTRT, mode,
993 XEXP (op, 0), XEXP (op, 1));
995 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
996 if (GET_CODE (op) == XOR
997 && XEXP (op, 1) == const1_rtx
998 && nonzero_bits (XEXP (op, 0), mode) == 1)
999 return plus_constant (mode, XEXP (op, 0), -1);
1001 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1002 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1003 if (GET_CODE (op) == LT
1004 && XEXP (op, 1) == const0_rtx
1005 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1007 enum machine_mode inner = GET_MODE (XEXP (op, 0));
1008 int isize = GET_MODE_PRECISION (inner);
1009 if (STORE_FLAG_VALUE == 1)
1011 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1012 GEN_INT (isize - 1));
1013 if (mode == inner)
1014 return temp;
1015 if (GET_MODE_PRECISION (mode) > isize)
1016 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1017 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1019 else if (STORE_FLAG_VALUE == -1)
1021 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1022 GEN_INT (isize - 1));
1023 if (mode == inner)
1024 return temp;
1025 if (GET_MODE_PRECISION (mode) > isize)
1026 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1027 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1030 break;
1032 case TRUNCATE:
1033 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1034 with the umulXi3_highpart patterns. */
1035 if (GET_CODE (op) == LSHIFTRT
1036 && GET_CODE (XEXP (op, 0)) == MULT)
1037 break;
1039 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1041 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1043 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1044 if (temp)
1045 return temp;
1047 /* We can't handle truncation to a partial integer mode here
1048 because we don't know the real bitsize of the partial
1049 integer mode. */
1050 break;
1053 if (GET_MODE (op) != VOIDmode)
1055 temp = simplify_truncation (mode, op, GET_MODE (op));
1056 if (temp)
1057 return temp;
1060 /* If we know that the value is already truncated, we can
1061 replace the TRUNCATE with a SUBREG. */
1062 if (GET_MODE_NUNITS (mode) == 1
1063 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1064 || truncated_to_mode (mode, op)))
1066 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1067 if (temp)
1068 return temp;
1071 /* A truncate of a comparison can be replaced with a subreg if
1072 STORE_FLAG_VALUE permits. This is like the previous test,
1073 but it works even if the comparison is done in a mode larger
1074 than HOST_BITS_PER_WIDE_INT. */
1075 if (HWI_COMPUTABLE_MODE_P (mode)
1076 && COMPARISON_P (op)
1077 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1079 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1080 if (temp)
1081 return temp;
1084 /* A truncate of a memory is just loading the low part of the memory
1085 if we are not changing the meaning of the address. */
1086 if (GET_CODE (op) == MEM
1087 && !VECTOR_MODE_P (mode)
1088 && !MEM_VOLATILE_P (op)
1089 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1091 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1092 if (temp)
1093 return temp;
1096 break;
1098 case FLOAT_TRUNCATE:
1099 if (DECIMAL_FLOAT_MODE_P (mode))
1100 break;
1102 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1103 if (GET_CODE (op) == FLOAT_EXTEND
1104 && GET_MODE (XEXP (op, 0)) == mode)
1105 return XEXP (op, 0);
1107 /* (float_truncate:SF (float_truncate:DF foo:XF))
1108 = (float_truncate:SF foo:XF).
1109 This may eliminate double rounding, so it is unsafe.
1111 (float_truncate:SF (float_extend:XF foo:DF))
1112 = (float_truncate:SF foo:DF).
1114 (float_truncate:DF (float_extend:XF foo:SF))
1115 = (float_extend:SF foo:DF). */
1116 if ((GET_CODE (op) == FLOAT_TRUNCATE
1117 && flag_unsafe_math_optimizations)
1118 || GET_CODE (op) == FLOAT_EXTEND)
1119 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1120 0)))
1121 > GET_MODE_SIZE (mode)
1122 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1123 mode,
1124 XEXP (op, 0), mode);
1126 /* (float_truncate (float x)) is (float x) */
1127 if (GET_CODE (op) == FLOAT
1128 && (flag_unsafe_math_optimizations
1129 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1130 && ((unsigned)significand_size (GET_MODE (op))
1131 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1132 - num_sign_bit_copies (XEXP (op, 0),
1133 GET_MODE (XEXP (op, 0))))))))
1134 return simplify_gen_unary (FLOAT, mode,
1135 XEXP (op, 0),
1136 GET_MODE (XEXP (op, 0)));
1138 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1139 (OP:SF foo:SF) if OP is NEG or ABS. */
1140 if ((GET_CODE (op) == ABS
1141 || GET_CODE (op) == NEG)
1142 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1143 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1144 return simplify_gen_unary (GET_CODE (op), mode,
1145 XEXP (XEXP (op, 0), 0), mode);
1147 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1148 is (float_truncate:SF x). */
1149 if (GET_CODE (op) == SUBREG
1150 && subreg_lowpart_p (op)
1151 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1152 return SUBREG_REG (op);
1153 break;
1155 case FLOAT_EXTEND:
1156 if (DECIMAL_FLOAT_MODE_P (mode))
1157 break;
1159 /* (float_extend (float_extend x)) is (float_extend x)
1161 (float_extend (float x)) is (float x) assuming that double
1162 rounding can't happen.
1164 if (GET_CODE (op) == FLOAT_EXTEND
1165 || (GET_CODE (op) == FLOAT
1166 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1167 && ((unsigned)significand_size (GET_MODE (op))
1168 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1169 - num_sign_bit_copies (XEXP (op, 0),
1170 GET_MODE (XEXP (op, 0)))))))
1171 return simplify_gen_unary (GET_CODE (op), mode,
1172 XEXP (op, 0),
1173 GET_MODE (XEXP (op, 0)));
1175 break;
1177 case ABS:
1178 /* (abs (neg <foo>)) -> (abs <foo>) */
1179 if (GET_CODE (op) == NEG)
1180 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1181 GET_MODE (XEXP (op, 0)));
1183 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1184 do nothing. */
1185 if (GET_MODE (op) == VOIDmode)
1186 break;
1188 /* If operand is something known to be positive, ignore the ABS. */
1189 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1190 || val_signbit_known_clear_p (GET_MODE (op),
1191 nonzero_bits (op, GET_MODE (op))))
1192 return op;
1194 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1195 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1196 return gen_rtx_NEG (mode, op);
1198 break;
1200 case FFS:
1201 /* (ffs (*_extend <X>)) = (ffs <X>) */
1202 if (GET_CODE (op) == SIGN_EXTEND
1203 || GET_CODE (op) == ZERO_EXTEND)
1204 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1205 GET_MODE (XEXP (op, 0)));
1206 break;
1208 case POPCOUNT:
1209 switch (GET_CODE (op))
1211 case BSWAP:
1212 case ZERO_EXTEND:
1213 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1214 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1215 GET_MODE (XEXP (op, 0)));
1217 case ROTATE:
1218 case ROTATERT:
1219 /* Rotations don't affect popcount. */
1220 if (!side_effects_p (XEXP (op, 1)))
1221 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1222 GET_MODE (XEXP (op, 0)));
1223 break;
1225 default:
1226 break;
1228 break;
1230 case PARITY:
1231 switch (GET_CODE (op))
1233 case NOT:
1234 case BSWAP:
1235 case ZERO_EXTEND:
1236 case SIGN_EXTEND:
1237 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1238 GET_MODE (XEXP (op, 0)));
1240 case ROTATE:
1241 case ROTATERT:
1242 /* Rotations don't affect parity. */
1243 if (!side_effects_p (XEXP (op, 1)))
1244 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1245 GET_MODE (XEXP (op, 0)));
1246 break;
1248 default:
1249 break;
1251 break;
1253 case BSWAP:
1254 /* (bswap (bswap x)) -> x. */
1255 if (GET_CODE (op) == BSWAP)
1256 return XEXP (op, 0);
1257 break;
1259 case FLOAT:
1260 /* (float (sign_extend <X>)) = (float <X>). */
1261 if (GET_CODE (op) == SIGN_EXTEND)
1262 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1263 GET_MODE (XEXP (op, 0)));
1264 break;
1266 case SIGN_EXTEND:
1267 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1268 becomes just the MINUS if its mode is MODE. This allows
1269 folding switch statements on machines using casesi (such as
1270 the VAX). */
1271 if (GET_CODE (op) == TRUNCATE
1272 && GET_MODE (XEXP (op, 0)) == mode
1273 && GET_CODE (XEXP (op, 0)) == MINUS
1274 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1275 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1276 return XEXP (op, 0);
1278 /* Extending a widening multiplication should be canonicalized to
1279 a wider widening multiplication. */
1280 if (GET_CODE (op) == MULT)
1282 rtx lhs = XEXP (op, 0);
1283 rtx rhs = XEXP (op, 1);
1284 enum rtx_code lcode = GET_CODE (lhs);
1285 enum rtx_code rcode = GET_CODE (rhs);
1287 /* Widening multiplies usually extend both operands, but sometimes
1288 they use a shift to extract a portion of a register. */
1289 if ((lcode == SIGN_EXTEND
1290 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1291 && (rcode == SIGN_EXTEND
1292 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1294 enum machine_mode lmode = GET_MODE (lhs);
1295 enum machine_mode rmode = GET_MODE (rhs);
1296 int bits;
1298 if (lcode == ASHIFTRT)
1299 /* Number of bits not shifted off the end. */
1300 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1301 else /* lcode == SIGN_EXTEND */
1302 /* Size of inner mode. */
1303 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1305 if (rcode == ASHIFTRT)
1306 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1307 else /* rcode == SIGN_EXTEND */
1308 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1310 /* We can only widen multiplies if the result is mathematiclly
1311 equivalent. I.e. if overflow was impossible. */
1312 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1313 return simplify_gen_binary
1314 (MULT, mode,
1315 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1316 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1320 /* Check for a sign extension of a subreg of a promoted
1321 variable, where the promotion is sign-extended, and the
1322 target mode is the same as the variable's promotion. */
1323 if (GET_CODE (op) == SUBREG
1324 && SUBREG_PROMOTED_VAR_P (op)
1325 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1326 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1328 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1329 if (temp)
1330 return temp;
1333 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1334 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1335 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1337 gcc_assert (GET_MODE_BITSIZE (mode)
1338 > GET_MODE_BITSIZE (GET_MODE (op)));
1339 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1340 GET_MODE (XEXP (op, 0)));
1343 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1344 is (sign_extend:M (subreg:O <X>)) if there is mode with
1345 GET_MODE_BITSIZE (N) - I bits.
1346 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1347 is similarly (zero_extend:M (subreg:O <X>)). */
1348 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1349 && GET_CODE (XEXP (op, 0)) == ASHIFT
1350 && CONST_INT_P (XEXP (op, 1))
1351 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1352 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1354 enum machine_mode tmode
1355 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1356 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1357 gcc_assert (GET_MODE_BITSIZE (mode)
1358 > GET_MODE_BITSIZE (GET_MODE (op)));
1359 if (tmode != BLKmode)
1361 rtx inner =
1362 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1363 if (inner)
1364 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1365 ? SIGN_EXTEND : ZERO_EXTEND,
1366 mode, inner, tmode);
1370 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1371 /* As we do not know which address space the pointer is referring to,
1372 we can do this only if the target does not support different pointer
1373 or address modes depending on the address space. */
1374 if (target_default_pointer_address_modes_p ()
1375 && ! POINTERS_EXTEND_UNSIGNED
1376 && mode == Pmode && GET_MODE (op) == ptr_mode
1377 && (CONSTANT_P (op)
1378 || (GET_CODE (op) == SUBREG
1379 && REG_P (SUBREG_REG (op))
1380 && REG_POINTER (SUBREG_REG (op))
1381 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1382 return convert_memory_address (Pmode, op);
1383 #endif
1384 break;
1386 case ZERO_EXTEND:
1387 /* Check for a zero extension of a subreg of a promoted
1388 variable, where the promotion is zero-extended, and the
1389 target mode is the same as the variable's promotion. */
1390 if (GET_CODE (op) == SUBREG
1391 && SUBREG_PROMOTED_VAR_P (op)
1392 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1393 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1395 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1396 if (temp)
1397 return temp;
1400 /* Extending a widening multiplication should be canonicalized to
1401 a wider widening multiplication. */
1402 if (GET_CODE (op) == MULT)
1404 rtx lhs = XEXP (op, 0);
1405 rtx rhs = XEXP (op, 1);
1406 enum rtx_code lcode = GET_CODE (lhs);
1407 enum rtx_code rcode = GET_CODE (rhs);
1409 /* Widening multiplies usually extend both operands, but sometimes
1410 they use a shift to extract a portion of a register. */
1411 if ((lcode == ZERO_EXTEND
1412 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1413 && (rcode == ZERO_EXTEND
1414 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1416 enum machine_mode lmode = GET_MODE (lhs);
1417 enum machine_mode rmode = GET_MODE (rhs);
1418 int bits;
1420 if (lcode == LSHIFTRT)
1421 /* Number of bits not shifted off the end. */
1422 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1423 else /* lcode == ZERO_EXTEND */
1424 /* Size of inner mode. */
1425 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1427 if (rcode == LSHIFTRT)
1428 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1429 else /* rcode == ZERO_EXTEND */
1430 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1432 /* We can only widen multiplies if the result is mathematiclly
1433 equivalent. I.e. if overflow was impossible. */
1434 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1435 return simplify_gen_binary
1436 (MULT, mode,
1437 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1438 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1442 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1443 if (GET_CODE (op) == ZERO_EXTEND)
1444 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1445 GET_MODE (XEXP (op, 0)));
1447 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1448 is (zero_extend:M (subreg:O <X>)) if there is mode with
1449 GET_MODE_BITSIZE (N) - I bits. */
1450 if (GET_CODE (op) == LSHIFTRT
1451 && GET_CODE (XEXP (op, 0)) == ASHIFT
1452 && CONST_INT_P (XEXP (op, 1))
1453 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1454 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1456 enum machine_mode tmode
1457 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1458 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1459 if (tmode != BLKmode)
1461 rtx inner =
1462 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1463 if (inner)
1464 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1468 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1469 /* As we do not know which address space the pointer is referring to,
1470 we can do this only if the target does not support different pointer
1471 or address modes depending on the address space. */
1472 if (target_default_pointer_address_modes_p ()
1473 && POINTERS_EXTEND_UNSIGNED > 0
1474 && mode == Pmode && GET_MODE (op) == ptr_mode
1475 && (CONSTANT_P (op)
1476 || (GET_CODE (op) == SUBREG
1477 && REG_P (SUBREG_REG (op))
1478 && REG_POINTER (SUBREG_REG (op))
1479 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1480 return convert_memory_address (Pmode, op);
1481 #endif
1482 break;
1484 default:
1485 break;
1488 return 0;
1491 /* Try to compute the value of a unary operation CODE whose output mode is to
1492 be MODE with input operand OP whose mode was originally OP_MODE.
1493 Return zero if the value cannot be computed. */
1495 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1496 rtx op, enum machine_mode op_mode)
1498 unsigned int width = GET_MODE_PRECISION (mode);
1499 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1501 if (code == VEC_DUPLICATE)
1503 gcc_assert (VECTOR_MODE_P (mode));
1504 if (GET_MODE (op) != VOIDmode)
1506 if (!VECTOR_MODE_P (GET_MODE (op)))
1507 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1508 else
1509 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1510 (GET_MODE (op)));
1512 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1513 || GET_CODE (op) == CONST_VECTOR)
1515 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1516 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1517 rtvec v = rtvec_alloc (n_elts);
1518 unsigned int i;
1520 if (GET_CODE (op) != CONST_VECTOR)
1521 for (i = 0; i < n_elts; i++)
1522 RTVEC_ELT (v, i) = op;
1523 else
1525 enum machine_mode inmode = GET_MODE (op);
1526 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1527 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1529 gcc_assert (in_n_elts < n_elts);
1530 gcc_assert ((n_elts % in_n_elts) == 0);
1531 for (i = 0; i < n_elts; i++)
1532 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1534 return gen_rtx_CONST_VECTOR (mode, v);
1538 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1540 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1541 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1542 enum machine_mode opmode = GET_MODE (op);
1543 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1544 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1545 rtvec v = rtvec_alloc (n_elts);
1546 unsigned int i;
1548 gcc_assert (op_n_elts == n_elts);
1549 for (i = 0; i < n_elts; i++)
1551 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1552 CONST_VECTOR_ELT (op, i),
1553 GET_MODE_INNER (opmode));
1554 if (!x)
1555 return 0;
1556 RTVEC_ELT (v, i) = x;
1558 return gen_rtx_CONST_VECTOR (mode, v);
1561 /* The order of these tests is critical so that, for example, we don't
1562 check the wrong mode (input vs. output) for a conversion operation,
1563 such as FIX. At some point, this should be simplified. */
1565 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1567 HOST_WIDE_INT hv, lv;
1568 REAL_VALUE_TYPE d;
1570 if (CONST_INT_P (op))
1571 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1572 else
1573 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1575 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1576 d = real_value_truncate (mode, d);
1577 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1579 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1581 HOST_WIDE_INT hv, lv;
1582 REAL_VALUE_TYPE d;
1584 if (CONST_INT_P (op))
1585 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1586 else
1587 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1589 if (op_mode == VOIDmode
1590 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1591 /* We should never get a negative number. */
1592 gcc_assert (hv >= 0);
1593 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1594 hv = 0, lv &= GET_MODE_MASK (op_mode);
1596 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1597 d = real_value_truncate (mode, d);
1598 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1601 if (CONST_INT_P (op)
1602 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1604 HOST_WIDE_INT arg0 = INTVAL (op);
1605 HOST_WIDE_INT val;
1607 switch (code)
1609 case NOT:
1610 val = ~ arg0;
1611 break;
1613 case NEG:
1614 val = - arg0;
1615 break;
1617 case ABS:
1618 val = (arg0 >= 0 ? arg0 : - arg0);
1619 break;
1621 case FFS:
1622 arg0 &= GET_MODE_MASK (mode);
1623 val = ffs_hwi (arg0);
1624 break;
1626 case CLZ:
1627 arg0 &= GET_MODE_MASK (mode);
1628 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1630 else
1631 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1632 break;
1634 case CLRSB:
1635 arg0 &= GET_MODE_MASK (mode);
1636 if (arg0 == 0)
1637 val = GET_MODE_PRECISION (mode) - 1;
1638 else if (arg0 >= 0)
1639 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1640 else if (arg0 < 0)
1641 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1642 break;
1644 case CTZ:
1645 arg0 &= GET_MODE_MASK (mode);
1646 if (arg0 == 0)
1648 /* Even if the value at zero is undefined, we have to come
1649 up with some replacement. Seems good enough. */
1650 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1651 val = GET_MODE_PRECISION (mode);
1653 else
1654 val = ctz_hwi (arg0);
1655 break;
1657 case POPCOUNT:
1658 arg0 &= GET_MODE_MASK (mode);
1659 val = 0;
1660 while (arg0)
1661 val++, arg0 &= arg0 - 1;
1662 break;
1664 case PARITY:
1665 arg0 &= GET_MODE_MASK (mode);
1666 val = 0;
1667 while (arg0)
1668 val++, arg0 &= arg0 - 1;
1669 val &= 1;
1670 break;
1672 case BSWAP:
1674 unsigned int s;
1676 val = 0;
1677 for (s = 0; s < width; s += 8)
1679 unsigned int d = width - s - 8;
1680 unsigned HOST_WIDE_INT byte;
1681 byte = (arg0 >> s) & 0xff;
1682 val |= byte << d;
1685 break;
1687 case TRUNCATE:
1688 val = arg0;
1689 break;
1691 case ZERO_EXTEND:
1692 /* When zero-extending a CONST_INT, we need to know its
1693 original mode. */
1694 gcc_assert (op_mode != VOIDmode);
1695 if (op_width == HOST_BITS_PER_WIDE_INT)
1697 /* If we were really extending the mode,
1698 we would have to distinguish between zero-extension
1699 and sign-extension. */
1700 gcc_assert (width == op_width);
1701 val = arg0;
1703 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1704 val = arg0 & GET_MODE_MASK (op_mode);
1705 else
1706 return 0;
1707 break;
1709 case SIGN_EXTEND:
1710 if (op_mode == VOIDmode)
1711 op_mode = mode;
1712 op_width = GET_MODE_PRECISION (op_mode);
1713 if (op_width == HOST_BITS_PER_WIDE_INT)
1715 /* If we were really extending the mode,
1716 we would have to distinguish between zero-extension
1717 and sign-extension. */
1718 gcc_assert (width == op_width);
1719 val = arg0;
1721 else if (op_width < HOST_BITS_PER_WIDE_INT)
1723 val = arg0 & GET_MODE_MASK (op_mode);
1724 if (val_signbit_known_set_p (op_mode, val))
1725 val |= ~GET_MODE_MASK (op_mode);
1727 else
1728 return 0;
1729 break;
1731 case SQRT:
1732 case FLOAT_EXTEND:
1733 case FLOAT_TRUNCATE:
1734 case SS_TRUNCATE:
1735 case US_TRUNCATE:
1736 case SS_NEG:
1737 case US_NEG:
1738 case SS_ABS:
1739 return 0;
1741 default:
1742 gcc_unreachable ();
1745 return gen_int_mode (val, mode);
1748 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1749 for a DImode operation on a CONST_INT. */
1750 else if (width <= HOST_BITS_PER_DOUBLE_INT
1751 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1753 double_int first, value;
1755 if (CONST_DOUBLE_AS_INT_P (op))
1756 first = double_int::from_pair (CONST_DOUBLE_HIGH (op),
1757 CONST_DOUBLE_LOW (op));
1758 else
1759 first = double_int::from_shwi (INTVAL (op));
1761 switch (code)
1763 case NOT:
1764 value = ~first;
1765 break;
1767 case NEG:
1768 value = -first;
1769 break;
1771 case ABS:
1772 if (first.is_negative ())
1773 value = -first;
1774 else
1775 value = first;
1776 break;
1778 case FFS:
1779 value.high = 0;
1780 if (first.low != 0)
1781 value.low = ffs_hwi (first.low);
1782 else if (first.high != 0)
1783 value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high);
1784 else
1785 value.low = 0;
1786 break;
1788 case CLZ:
1789 value.high = 0;
1790 if (first.high != 0)
1791 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1
1792 - HOST_BITS_PER_WIDE_INT;
1793 else if (first.low != 0)
1794 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1;
1795 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1796 value.low = GET_MODE_PRECISION (mode);
1797 break;
1799 case CTZ:
1800 value.high = 0;
1801 if (first.low != 0)
1802 value.low = ctz_hwi (first.low);
1803 else if (first.high != 0)
1804 value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high);
1805 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1806 value.low = GET_MODE_PRECISION (mode);
1807 break;
1809 case POPCOUNT:
1810 value = double_int_zero;
1811 while (first.low)
1813 value.low++;
1814 first.low &= first.low - 1;
1816 while (first.high)
1818 value.low++;
1819 first.high &= first.high - 1;
1821 break;
1823 case PARITY:
1824 value = double_int_zero;
1825 while (first.low)
1827 value.low++;
1828 first.low &= first.low - 1;
1830 while (first.high)
1832 value.low++;
1833 first.high &= first.high - 1;
1835 value.low &= 1;
1836 break;
1838 case BSWAP:
1840 unsigned int s;
1842 value = double_int_zero;
1843 for (s = 0; s < width; s += 8)
1845 unsigned int d = width - s - 8;
1846 unsigned HOST_WIDE_INT byte;
1848 if (s < HOST_BITS_PER_WIDE_INT)
1849 byte = (first.low >> s) & 0xff;
1850 else
1851 byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1853 if (d < HOST_BITS_PER_WIDE_INT)
1854 value.low |= byte << d;
1855 else
1856 value.high |= byte << (d - HOST_BITS_PER_WIDE_INT);
1859 break;
1861 case TRUNCATE:
1862 /* This is just a change-of-mode, so do nothing. */
1863 value = first;
1864 break;
1866 case ZERO_EXTEND:
1867 gcc_assert (op_mode != VOIDmode);
1869 if (op_width > HOST_BITS_PER_WIDE_INT)
1870 return 0;
1872 value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode));
1873 break;
1875 case SIGN_EXTEND:
1876 if (op_mode == VOIDmode
1877 || op_width > HOST_BITS_PER_WIDE_INT)
1878 return 0;
1879 else
1881 value.low = first.low & GET_MODE_MASK (op_mode);
1882 if (val_signbit_known_set_p (op_mode, value.low))
1883 value.low |= ~GET_MODE_MASK (op_mode);
1885 value.high = HWI_SIGN_EXTEND (value.low);
1887 break;
1889 case SQRT:
1890 return 0;
1892 default:
1893 return 0;
1896 return immed_double_int_const (value, mode);
1899 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1900 && SCALAR_FLOAT_MODE_P (mode)
1901 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1903 REAL_VALUE_TYPE d, t;
1904 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1906 switch (code)
1908 case SQRT:
1909 if (HONOR_SNANS (mode) && real_isnan (&d))
1910 return 0;
1911 real_sqrt (&t, mode, &d);
1912 d = t;
1913 break;
1914 case ABS:
1915 d = real_value_abs (&d);
1916 break;
1917 case NEG:
1918 d = real_value_negate (&d);
1919 break;
1920 case FLOAT_TRUNCATE:
1921 d = real_value_truncate (mode, d);
1922 break;
1923 case FLOAT_EXTEND:
1924 /* All this does is change the mode, unless changing
1925 mode class. */
1926 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1927 real_convert (&d, mode, &d);
1928 break;
1929 case FIX:
1930 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1931 break;
1932 case NOT:
1934 long tmp[4];
1935 int i;
1937 real_to_target (tmp, &d, GET_MODE (op));
1938 for (i = 0; i < 4; i++)
1939 tmp[i] = ~tmp[i];
1940 real_from_target (&d, tmp, mode);
1941 break;
1943 default:
1944 gcc_unreachable ();
1946 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1949 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1950 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1951 && GET_MODE_CLASS (mode) == MODE_INT
1952 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1954 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1955 operators are intentionally left unspecified (to ease implementation
1956 by target backends), for consistency, this routine implements the
1957 same semantics for constant folding as used by the middle-end. */
1959 /* This was formerly used only for non-IEEE float.
1960 eggert@twinsun.com says it is safe for IEEE also. */
1961 HOST_WIDE_INT xh, xl, th, tl;
1962 REAL_VALUE_TYPE x, t;
1963 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1964 switch (code)
1966 case FIX:
1967 if (REAL_VALUE_ISNAN (x))
1968 return const0_rtx;
1970 /* Test against the signed upper bound. */
1971 if (width > HOST_BITS_PER_WIDE_INT)
1973 th = ((unsigned HOST_WIDE_INT) 1
1974 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1975 tl = -1;
1977 else
1979 th = 0;
1980 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1982 real_from_integer (&t, VOIDmode, tl, th, 0);
1983 if (REAL_VALUES_LESS (t, x))
1985 xh = th;
1986 xl = tl;
1987 break;
1990 /* Test against the signed lower bound. */
1991 if (width > HOST_BITS_PER_WIDE_INT)
1993 th = (unsigned HOST_WIDE_INT) (-1)
1994 << (width - HOST_BITS_PER_WIDE_INT - 1);
1995 tl = 0;
1997 else
1999 th = -1;
2000 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
2002 real_from_integer (&t, VOIDmode, tl, th, 0);
2003 if (REAL_VALUES_LESS (x, t))
2005 xh = th;
2006 xl = tl;
2007 break;
2009 REAL_VALUE_TO_INT (&xl, &xh, x);
2010 break;
2012 case UNSIGNED_FIX:
2013 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
2014 return const0_rtx;
2016 /* Test against the unsigned upper bound. */
2017 if (width == HOST_BITS_PER_DOUBLE_INT)
2019 th = -1;
2020 tl = -1;
2022 else if (width >= HOST_BITS_PER_WIDE_INT)
2024 th = ((unsigned HOST_WIDE_INT) 1
2025 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
2026 tl = -1;
2028 else
2030 th = 0;
2031 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
2033 real_from_integer (&t, VOIDmode, tl, th, 1);
2034 if (REAL_VALUES_LESS (t, x))
2036 xh = th;
2037 xl = tl;
2038 break;
2041 REAL_VALUE_TO_INT (&xl, &xh, x);
2042 break;
2044 default:
2045 gcc_unreachable ();
2047 return immed_double_const (xl, xh, mode);
2050 return NULL_RTX;
2053 /* Subroutine of simplify_binary_operation to simplify a commutative,
2054 associative binary operation CODE with result mode MODE, operating
2055 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2056 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2057 canonicalization is possible. */
2059 static rtx
2060 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
2061 rtx op0, rtx op1)
2063 rtx tem;
2065 /* Linearize the operator to the left. */
2066 if (GET_CODE (op1) == code)
2068 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2069 if (GET_CODE (op0) == code)
2071 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2072 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2075 /* "a op (b op c)" becomes "(b op c) op a". */
2076 if (! swap_commutative_operands_p (op1, op0))
2077 return simplify_gen_binary (code, mode, op1, op0);
2079 tem = op0;
2080 op0 = op1;
2081 op1 = tem;
2084 if (GET_CODE (op0) == code)
2086 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2087 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2089 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2090 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2093 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2094 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2095 if (tem != 0)
2096 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2098 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2099 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2100 if (tem != 0)
2101 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2104 return 0;
2108 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2109 and OP1. Return 0 if no simplification is possible.
2111 Don't use this for relational operations such as EQ or LT.
2112 Use simplify_relational_operation instead. */
2114 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
2115 rtx op0, rtx op1)
2117 rtx trueop0, trueop1;
2118 rtx tem;
2120 /* Relational operations don't work here. We must know the mode
2121 of the operands in order to do the comparison correctly.
2122 Assuming a full word can give incorrect results.
2123 Consider comparing 128 with -128 in QImode. */
2124 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2125 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2127 /* Make sure the constant is second. */
2128 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2129 && swap_commutative_operands_p (op0, op1))
2131 tem = op0, op0 = op1, op1 = tem;
2134 trueop0 = avoid_constant_pool_reference (op0);
2135 trueop1 = avoid_constant_pool_reference (op1);
2137 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2138 if (tem)
2139 return tem;
2140 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2143 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2144 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2145 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2146 actual constants. */
2148 static rtx
2149 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
2150 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2152 rtx tem, reversed, opleft, opright;
2153 HOST_WIDE_INT val;
2154 unsigned int width = GET_MODE_PRECISION (mode);
2156 /* Even if we can't compute a constant result,
2157 there are some cases worth simplifying. */
2159 switch (code)
2161 case PLUS:
2162 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2163 when x is NaN, infinite, or finite and nonzero. They aren't
2164 when x is -0 and the rounding mode is not towards -infinity,
2165 since (-0) + 0 is then 0. */
2166 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2167 return op0;
2169 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2170 transformations are safe even for IEEE. */
2171 if (GET_CODE (op0) == NEG)
2172 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2173 else if (GET_CODE (op1) == NEG)
2174 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2176 /* (~a) + 1 -> -a */
2177 if (INTEGRAL_MODE_P (mode)
2178 && GET_CODE (op0) == NOT
2179 && trueop1 == const1_rtx)
2180 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2182 /* Handle both-operands-constant cases. We can only add
2183 CONST_INTs to constants since the sum of relocatable symbols
2184 can't be handled by most assemblers. Don't add CONST_INT
2185 to CONST_INT since overflow won't be computed properly if wider
2186 than HOST_BITS_PER_WIDE_INT. */
2188 if ((GET_CODE (op0) == CONST
2189 || GET_CODE (op0) == SYMBOL_REF
2190 || GET_CODE (op0) == LABEL_REF)
2191 && CONST_INT_P (op1))
2192 return plus_constant (mode, op0, INTVAL (op1));
2193 else if ((GET_CODE (op1) == CONST
2194 || GET_CODE (op1) == SYMBOL_REF
2195 || GET_CODE (op1) == LABEL_REF)
2196 && CONST_INT_P (op0))
2197 return plus_constant (mode, op1, INTVAL (op0));
2199 /* See if this is something like X * C - X or vice versa or
2200 if the multiplication is written as a shift. If so, we can
2201 distribute and make a new multiply, shift, or maybe just
2202 have X (if C is 2 in the example above). But don't make
2203 something more expensive than we had before. */
2205 if (SCALAR_INT_MODE_P (mode))
2207 double_int coeff0, coeff1;
2208 rtx lhs = op0, rhs = op1;
2210 coeff0 = double_int_one;
2211 coeff1 = double_int_one;
2213 if (GET_CODE (lhs) == NEG)
2215 coeff0 = double_int_minus_one;
2216 lhs = XEXP (lhs, 0);
2218 else if (GET_CODE (lhs) == MULT
2219 && CONST_INT_P (XEXP (lhs, 1)))
2221 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2222 lhs = XEXP (lhs, 0);
2224 else if (GET_CODE (lhs) == ASHIFT
2225 && CONST_INT_P (XEXP (lhs, 1))
2226 && INTVAL (XEXP (lhs, 1)) >= 0
2227 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2229 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2230 lhs = XEXP (lhs, 0);
2233 if (GET_CODE (rhs) == NEG)
2235 coeff1 = double_int_minus_one;
2236 rhs = XEXP (rhs, 0);
2238 else if (GET_CODE (rhs) == MULT
2239 && CONST_INT_P (XEXP (rhs, 1)))
2241 coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
2242 rhs = XEXP (rhs, 0);
2244 else if (GET_CODE (rhs) == ASHIFT
2245 && CONST_INT_P (XEXP (rhs, 1))
2246 && INTVAL (XEXP (rhs, 1)) >= 0
2247 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2249 coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2250 rhs = XEXP (rhs, 0);
2253 if (rtx_equal_p (lhs, rhs))
2255 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2256 rtx coeff;
2257 double_int val;
2258 bool speed = optimize_function_for_speed_p (cfun);
2260 val = coeff0 + coeff1;
2261 coeff = immed_double_int_const (val, mode);
2263 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2264 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2265 ? tem : 0;
2269 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2270 if (CONST_SCALAR_INT_P (op1)
2271 && GET_CODE (op0) == XOR
2272 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2273 && mode_signbit_p (mode, op1))
2274 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2275 simplify_gen_binary (XOR, mode, op1,
2276 XEXP (op0, 1)));
2278 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2279 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2280 && GET_CODE (op0) == MULT
2281 && GET_CODE (XEXP (op0, 0)) == NEG)
2283 rtx in1, in2;
2285 in1 = XEXP (XEXP (op0, 0), 0);
2286 in2 = XEXP (op0, 1);
2287 return simplify_gen_binary (MINUS, mode, op1,
2288 simplify_gen_binary (MULT, mode,
2289 in1, in2));
2292 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2293 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2294 is 1. */
2295 if (COMPARISON_P (op0)
2296 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2297 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2298 && (reversed = reversed_comparison (op0, mode)))
2299 return
2300 simplify_gen_unary (NEG, mode, reversed, mode);
2302 /* If one of the operands is a PLUS or a MINUS, see if we can
2303 simplify this by the associative law.
2304 Don't use the associative law for floating point.
2305 The inaccuracy makes it nonassociative,
2306 and subtle programs can break if operations are associated. */
2308 if (INTEGRAL_MODE_P (mode)
2309 && (plus_minus_operand_p (op0)
2310 || plus_minus_operand_p (op1))
2311 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2312 return tem;
2314 /* Reassociate floating point addition only when the user
2315 specifies associative math operations. */
2316 if (FLOAT_MODE_P (mode)
2317 && flag_associative_math)
2319 tem = simplify_associative_operation (code, mode, op0, op1);
2320 if (tem)
2321 return tem;
2323 break;
2325 case COMPARE:
2326 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2327 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2328 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2329 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2331 rtx xop00 = XEXP (op0, 0);
2332 rtx xop10 = XEXP (op1, 0);
2334 #ifdef HAVE_cc0
2335 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2336 #else
2337 if (REG_P (xop00) && REG_P (xop10)
2338 && GET_MODE (xop00) == GET_MODE (xop10)
2339 && REGNO (xop00) == REGNO (xop10)
2340 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2341 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2342 #endif
2343 return xop00;
2345 break;
2347 case MINUS:
2348 /* We can't assume x-x is 0 even with non-IEEE floating point,
2349 but since it is zero except in very strange circumstances, we
2350 will treat it as zero with -ffinite-math-only. */
2351 if (rtx_equal_p (trueop0, trueop1)
2352 && ! side_effects_p (op0)
2353 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2354 return CONST0_RTX (mode);
2356 /* Change subtraction from zero into negation. (0 - x) is the
2357 same as -x when x is NaN, infinite, or finite and nonzero.
2358 But if the mode has signed zeros, and does not round towards
2359 -infinity, then 0 - 0 is 0, not -0. */
2360 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2361 return simplify_gen_unary (NEG, mode, op1, mode);
2363 /* (-1 - a) is ~a. */
2364 if (trueop0 == constm1_rtx)
2365 return simplify_gen_unary (NOT, mode, op1, mode);
2367 /* Subtracting 0 has no effect unless the mode has signed zeros
2368 and supports rounding towards -infinity. In such a case,
2369 0 - 0 is -0. */
2370 if (!(HONOR_SIGNED_ZEROS (mode)
2371 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2372 && trueop1 == CONST0_RTX (mode))
2373 return op0;
2375 /* See if this is something like X * C - X or vice versa or
2376 if the multiplication is written as a shift. If so, we can
2377 distribute and make a new multiply, shift, or maybe just
2378 have X (if C is 2 in the example above). But don't make
2379 something more expensive than we had before. */
2381 if (SCALAR_INT_MODE_P (mode))
2383 double_int coeff0, negcoeff1;
2384 rtx lhs = op0, rhs = op1;
2386 coeff0 = double_int_one;
2387 negcoeff1 = double_int_minus_one;
2389 if (GET_CODE (lhs) == NEG)
2391 coeff0 = double_int_minus_one;
2392 lhs = XEXP (lhs, 0);
2394 else if (GET_CODE (lhs) == MULT
2395 && CONST_INT_P (XEXP (lhs, 1)))
2397 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2398 lhs = XEXP (lhs, 0);
2400 else if (GET_CODE (lhs) == ASHIFT
2401 && CONST_INT_P (XEXP (lhs, 1))
2402 && INTVAL (XEXP (lhs, 1)) >= 0
2403 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2405 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2406 lhs = XEXP (lhs, 0);
2409 if (GET_CODE (rhs) == NEG)
2411 negcoeff1 = double_int_one;
2412 rhs = XEXP (rhs, 0);
2414 else if (GET_CODE (rhs) == MULT
2415 && CONST_INT_P (XEXP (rhs, 1)))
2417 negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
2418 rhs = XEXP (rhs, 0);
2420 else if (GET_CODE (rhs) == ASHIFT
2421 && CONST_INT_P (XEXP (rhs, 1))
2422 && INTVAL (XEXP (rhs, 1)) >= 0
2423 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2425 negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2426 negcoeff1 = -negcoeff1;
2427 rhs = XEXP (rhs, 0);
2430 if (rtx_equal_p (lhs, rhs))
2432 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2433 rtx coeff;
2434 double_int val;
2435 bool speed = optimize_function_for_speed_p (cfun);
2437 val = coeff0 + negcoeff1;
2438 coeff = immed_double_int_const (val, mode);
2440 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2441 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2442 ? tem : 0;
2446 /* (a - (-b)) -> (a + b). True even for IEEE. */
2447 if (GET_CODE (op1) == NEG)
2448 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2450 /* (-x - c) may be simplified as (-c - x). */
2451 if (GET_CODE (op0) == NEG
2452 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2454 tem = simplify_unary_operation (NEG, mode, op1, mode);
2455 if (tem)
2456 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2459 /* Don't let a relocatable value get a negative coeff. */
2460 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2461 return simplify_gen_binary (PLUS, mode,
2462 op0,
2463 neg_const_int (mode, op1));
2465 /* (x - (x & y)) -> (x & ~y) */
2466 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2468 if (rtx_equal_p (op0, XEXP (op1, 0)))
2470 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2471 GET_MODE (XEXP (op1, 1)));
2472 return simplify_gen_binary (AND, mode, op0, tem);
2474 if (rtx_equal_p (op0, XEXP (op1, 1)))
2476 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2477 GET_MODE (XEXP (op1, 0)));
2478 return simplify_gen_binary (AND, mode, op0, tem);
2482 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2483 by reversing the comparison code if valid. */
2484 if (STORE_FLAG_VALUE == 1
2485 && trueop0 == const1_rtx
2486 && COMPARISON_P (op1)
2487 && (reversed = reversed_comparison (op1, mode)))
2488 return reversed;
2490 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2491 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2492 && GET_CODE (op1) == MULT
2493 && GET_CODE (XEXP (op1, 0)) == NEG)
2495 rtx in1, in2;
2497 in1 = XEXP (XEXP (op1, 0), 0);
2498 in2 = XEXP (op1, 1);
2499 return simplify_gen_binary (PLUS, mode,
2500 simplify_gen_binary (MULT, mode,
2501 in1, in2),
2502 op0);
2505 /* Canonicalize (minus (neg A) (mult B C)) to
2506 (minus (mult (neg B) C) A). */
2507 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2508 && GET_CODE (op1) == MULT
2509 && GET_CODE (op0) == NEG)
2511 rtx in1, in2;
2513 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2514 in2 = XEXP (op1, 1);
2515 return simplify_gen_binary (MINUS, mode,
2516 simplify_gen_binary (MULT, mode,
2517 in1, in2),
2518 XEXP (op0, 0));
2521 /* If one of the operands is a PLUS or a MINUS, see if we can
2522 simplify this by the associative law. This will, for example,
2523 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2524 Don't use the associative law for floating point.
2525 The inaccuracy makes it nonassociative,
2526 and subtle programs can break if operations are associated. */
2528 if (INTEGRAL_MODE_P (mode)
2529 && (plus_minus_operand_p (op0)
2530 || plus_minus_operand_p (op1))
2531 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2532 return tem;
2533 break;
2535 case MULT:
2536 if (trueop1 == constm1_rtx)
2537 return simplify_gen_unary (NEG, mode, op0, mode);
2539 if (GET_CODE (op0) == NEG)
2541 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2542 /* If op1 is a MULT as well and simplify_unary_operation
2543 just moved the NEG to the second operand, simplify_gen_binary
2544 below could through simplify_associative_operation move
2545 the NEG around again and recurse endlessly. */
2546 if (temp
2547 && GET_CODE (op1) == MULT
2548 && GET_CODE (temp) == MULT
2549 && XEXP (op1, 0) == XEXP (temp, 0)
2550 && GET_CODE (XEXP (temp, 1)) == NEG
2551 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2552 temp = NULL_RTX;
2553 if (temp)
2554 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2556 if (GET_CODE (op1) == NEG)
2558 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2559 /* If op0 is a MULT as well and simplify_unary_operation
2560 just moved the NEG to the second operand, simplify_gen_binary
2561 below could through simplify_associative_operation move
2562 the NEG around again and recurse endlessly. */
2563 if (temp
2564 && GET_CODE (op0) == MULT
2565 && GET_CODE (temp) == MULT
2566 && XEXP (op0, 0) == XEXP (temp, 0)
2567 && GET_CODE (XEXP (temp, 1)) == NEG
2568 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2569 temp = NULL_RTX;
2570 if (temp)
2571 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2574 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2575 x is NaN, since x * 0 is then also NaN. Nor is it valid
2576 when the mode has signed zeros, since multiplying a negative
2577 number by 0 will give -0, not 0. */
2578 if (!HONOR_NANS (mode)
2579 && !HONOR_SIGNED_ZEROS (mode)
2580 && trueop1 == CONST0_RTX (mode)
2581 && ! side_effects_p (op0))
2582 return op1;
2584 /* In IEEE floating point, x*1 is not equivalent to x for
2585 signalling NaNs. */
2586 if (!HONOR_SNANS (mode)
2587 && trueop1 == CONST1_RTX (mode))
2588 return op0;
2590 /* Convert multiply by constant power of two into shift unless
2591 we are still generating RTL. This test is a kludge. */
2592 if (CONST_INT_P (trueop1)
2593 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2594 /* If the mode is larger than the host word size, and the
2595 uppermost bit is set, then this isn't a power of two due
2596 to implicit sign extension. */
2597 && (width <= HOST_BITS_PER_WIDE_INT
2598 || val != HOST_BITS_PER_WIDE_INT - 1))
2599 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2601 /* Likewise for multipliers wider than a word. */
2602 if (CONST_DOUBLE_AS_INT_P (trueop1)
2603 && GET_MODE (op0) == mode
2604 && CONST_DOUBLE_LOW (trueop1) == 0
2605 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2606 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2607 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2608 return simplify_gen_binary (ASHIFT, mode, op0,
2609 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2611 /* x*2 is x+x and x*(-1) is -x */
2612 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2613 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2614 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2615 && GET_MODE (op0) == mode)
2617 REAL_VALUE_TYPE d;
2618 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2620 if (REAL_VALUES_EQUAL (d, dconst2))
2621 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2623 if (!HONOR_SNANS (mode)
2624 && REAL_VALUES_EQUAL (d, dconstm1))
2625 return simplify_gen_unary (NEG, mode, op0, mode);
2628 /* Optimize -x * -x as x * x. */
2629 if (FLOAT_MODE_P (mode)
2630 && GET_CODE (op0) == NEG
2631 && GET_CODE (op1) == NEG
2632 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2633 && !side_effects_p (XEXP (op0, 0)))
2634 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2636 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2637 if (SCALAR_FLOAT_MODE_P (mode)
2638 && GET_CODE (op0) == ABS
2639 && GET_CODE (op1) == ABS
2640 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2641 && !side_effects_p (XEXP (op0, 0)))
2642 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2644 /* Reassociate multiplication, but for floating point MULTs
2645 only when the user specifies unsafe math optimizations. */
2646 if (! FLOAT_MODE_P (mode)
2647 || flag_unsafe_math_optimizations)
2649 tem = simplify_associative_operation (code, mode, op0, op1);
2650 if (tem)
2651 return tem;
2653 break;
2655 case IOR:
2656 if (trueop1 == CONST0_RTX (mode))
2657 return op0;
2658 if (INTEGRAL_MODE_P (mode)
2659 && trueop1 == CONSTM1_RTX (mode)
2660 && !side_effects_p (op0))
2661 return op1;
2662 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2663 return op0;
2664 /* A | (~A) -> -1 */
2665 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2666 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2667 && ! side_effects_p (op0)
2668 && SCALAR_INT_MODE_P (mode))
2669 return constm1_rtx;
2671 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2672 if (CONST_INT_P (op1)
2673 && HWI_COMPUTABLE_MODE_P (mode)
2674 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2675 && !side_effects_p (op0))
2676 return op1;
2678 /* Canonicalize (X & C1) | C2. */
2679 if (GET_CODE (op0) == AND
2680 && CONST_INT_P (trueop1)
2681 && CONST_INT_P (XEXP (op0, 1)))
2683 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2684 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2685 HOST_WIDE_INT c2 = INTVAL (trueop1);
2687 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2688 if ((c1 & c2) == c1
2689 && !side_effects_p (XEXP (op0, 0)))
2690 return trueop1;
2692 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2693 if (((c1|c2) & mask) == mask)
2694 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2696 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2697 if (((c1 & ~c2) & mask) != (c1 & mask))
2699 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2700 gen_int_mode (c1 & ~c2, mode));
2701 return simplify_gen_binary (IOR, mode, tem, op1);
2705 /* Convert (A & B) | A to A. */
2706 if (GET_CODE (op0) == AND
2707 && (rtx_equal_p (XEXP (op0, 0), op1)
2708 || rtx_equal_p (XEXP (op0, 1), op1))
2709 && ! side_effects_p (XEXP (op0, 0))
2710 && ! side_effects_p (XEXP (op0, 1)))
2711 return op1;
2713 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2714 mode size to (rotate A CX). */
2716 if (GET_CODE (op1) == ASHIFT
2717 || GET_CODE (op1) == SUBREG)
2719 opleft = op1;
2720 opright = op0;
2722 else
2724 opright = op1;
2725 opleft = op0;
2728 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2729 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2730 && CONST_INT_P (XEXP (opleft, 1))
2731 && CONST_INT_P (XEXP (opright, 1))
2732 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2733 == GET_MODE_PRECISION (mode)))
2734 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2736 /* Same, but for ashift that has been "simplified" to a wider mode
2737 by simplify_shift_const. */
2739 if (GET_CODE (opleft) == SUBREG
2740 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2741 && GET_CODE (opright) == LSHIFTRT
2742 && GET_CODE (XEXP (opright, 0)) == SUBREG
2743 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2744 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2745 && (GET_MODE_SIZE (GET_MODE (opleft))
2746 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2747 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2748 SUBREG_REG (XEXP (opright, 0)))
2749 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2750 && CONST_INT_P (XEXP (opright, 1))
2751 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2752 == GET_MODE_PRECISION (mode)))
2753 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2754 XEXP (SUBREG_REG (opleft), 1));
2756 /* If we have (ior (and (X C1) C2)), simplify this by making
2757 C1 as small as possible if C1 actually changes. */
2758 if (CONST_INT_P (op1)
2759 && (HWI_COMPUTABLE_MODE_P (mode)
2760 || INTVAL (op1) > 0)
2761 && GET_CODE (op0) == AND
2762 && CONST_INT_P (XEXP (op0, 1))
2763 && CONST_INT_P (op1)
2764 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2765 return simplify_gen_binary (IOR, mode,
2766 simplify_gen_binary
2767 (AND, mode, XEXP (op0, 0),
2768 GEN_INT (UINTVAL (XEXP (op0, 1))
2769 & ~UINTVAL (op1))),
2770 op1);
2772 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2773 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2774 the PLUS does not affect any of the bits in OP1: then we can do
2775 the IOR as a PLUS and we can associate. This is valid if OP1
2776 can be safely shifted left C bits. */
2777 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2778 && GET_CODE (XEXP (op0, 0)) == PLUS
2779 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2780 && CONST_INT_P (XEXP (op0, 1))
2781 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2783 int count = INTVAL (XEXP (op0, 1));
2784 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2786 if (mask >> count == INTVAL (trueop1)
2787 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2788 return simplify_gen_binary (ASHIFTRT, mode,
2789 plus_constant (mode, XEXP (op0, 0),
2790 mask),
2791 XEXP (op0, 1));
2794 tem = simplify_associative_operation (code, mode, op0, op1);
2795 if (tem)
2796 return tem;
2797 break;
2799 case XOR:
2800 if (trueop1 == CONST0_RTX (mode))
2801 return op0;
2802 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2803 return simplify_gen_unary (NOT, mode, op0, mode);
2804 if (rtx_equal_p (trueop0, trueop1)
2805 && ! side_effects_p (op0)
2806 && GET_MODE_CLASS (mode) != MODE_CC)
2807 return CONST0_RTX (mode);
2809 /* Canonicalize XOR of the most significant bit to PLUS. */
2810 if (CONST_SCALAR_INT_P (op1)
2811 && mode_signbit_p (mode, op1))
2812 return simplify_gen_binary (PLUS, mode, op0, op1);
2813 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2814 if (CONST_SCALAR_INT_P (op1)
2815 && GET_CODE (op0) == PLUS
2816 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2817 && mode_signbit_p (mode, XEXP (op0, 1)))
2818 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2819 simplify_gen_binary (XOR, mode, op1,
2820 XEXP (op0, 1)));
2822 /* If we are XORing two things that have no bits in common,
2823 convert them into an IOR. This helps to detect rotation encoded
2824 using those methods and possibly other simplifications. */
2826 if (HWI_COMPUTABLE_MODE_P (mode)
2827 && (nonzero_bits (op0, mode)
2828 & nonzero_bits (op1, mode)) == 0)
2829 return (simplify_gen_binary (IOR, mode, op0, op1));
2831 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2832 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2833 (NOT y). */
2835 int num_negated = 0;
2837 if (GET_CODE (op0) == NOT)
2838 num_negated++, op0 = XEXP (op0, 0);
2839 if (GET_CODE (op1) == NOT)
2840 num_negated++, op1 = XEXP (op1, 0);
2842 if (num_negated == 2)
2843 return simplify_gen_binary (XOR, mode, op0, op1);
2844 else if (num_negated == 1)
2845 return simplify_gen_unary (NOT, mode,
2846 simplify_gen_binary (XOR, mode, op0, op1),
2847 mode);
2850 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2851 correspond to a machine insn or result in further simplifications
2852 if B is a constant. */
2854 if (GET_CODE (op0) == AND
2855 && rtx_equal_p (XEXP (op0, 1), op1)
2856 && ! side_effects_p (op1))
2857 return simplify_gen_binary (AND, mode,
2858 simplify_gen_unary (NOT, mode,
2859 XEXP (op0, 0), mode),
2860 op1);
2862 else if (GET_CODE (op0) == AND
2863 && rtx_equal_p (XEXP (op0, 0), op1)
2864 && ! side_effects_p (op1))
2865 return simplify_gen_binary (AND, mode,
2866 simplify_gen_unary (NOT, mode,
2867 XEXP (op0, 1), mode),
2868 op1);
2870 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2871 we can transform like this:
2872 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2873 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2874 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2875 Attempt a few simplifications when B and C are both constants. */
2876 if (GET_CODE (op0) == AND
2877 && CONST_INT_P (op1)
2878 && CONST_INT_P (XEXP (op0, 1)))
2880 rtx a = XEXP (op0, 0);
2881 rtx b = XEXP (op0, 1);
2882 rtx c = op1;
2883 HOST_WIDE_INT bval = INTVAL (b);
2884 HOST_WIDE_INT cval = INTVAL (c);
2886 rtx na_c
2887 = simplify_binary_operation (AND, mode,
2888 simplify_gen_unary (NOT, mode, a, mode),
2890 if ((~cval & bval) == 0)
2892 /* Try to simplify ~A&C | ~B&C. */
2893 if (na_c != NULL_RTX)
2894 return simplify_gen_binary (IOR, mode, na_c,
2895 GEN_INT (~bval & cval));
2897 else
2899 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2900 if (na_c == const0_rtx)
2902 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2903 GEN_INT (~cval & bval));
2904 return simplify_gen_binary (IOR, mode, a_nc_b,
2905 GEN_INT (~bval & cval));
2910 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2911 comparison if STORE_FLAG_VALUE is 1. */
2912 if (STORE_FLAG_VALUE == 1
2913 && trueop1 == const1_rtx
2914 && COMPARISON_P (op0)
2915 && (reversed = reversed_comparison (op0, mode)))
2916 return reversed;
2918 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2919 is (lt foo (const_int 0)), so we can perform the above
2920 simplification if STORE_FLAG_VALUE is 1. */
2922 if (STORE_FLAG_VALUE == 1
2923 && trueop1 == const1_rtx
2924 && GET_CODE (op0) == LSHIFTRT
2925 && CONST_INT_P (XEXP (op0, 1))
2926 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2927 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2929 /* (xor (comparison foo bar) (const_int sign-bit))
2930 when STORE_FLAG_VALUE is the sign bit. */
2931 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2932 && trueop1 == const_true_rtx
2933 && COMPARISON_P (op0)
2934 && (reversed = reversed_comparison (op0, mode)))
2935 return reversed;
2937 tem = simplify_associative_operation (code, mode, op0, op1);
2938 if (tem)
2939 return tem;
2940 break;
2942 case AND:
2943 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2944 return trueop1;
2945 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2946 return op0;
2947 if (HWI_COMPUTABLE_MODE_P (mode))
2949 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2950 HOST_WIDE_INT nzop1;
2951 if (CONST_INT_P (trueop1))
2953 HOST_WIDE_INT val1 = INTVAL (trueop1);
2954 /* If we are turning off bits already known off in OP0, we need
2955 not do an AND. */
2956 if ((nzop0 & ~val1) == 0)
2957 return op0;
2959 nzop1 = nonzero_bits (trueop1, mode);
2960 /* If we are clearing all the nonzero bits, the result is zero. */
2961 if ((nzop1 & nzop0) == 0
2962 && !side_effects_p (op0) && !side_effects_p (op1))
2963 return CONST0_RTX (mode);
2965 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2966 && GET_MODE_CLASS (mode) != MODE_CC)
2967 return op0;
2968 /* A & (~A) -> 0 */
2969 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2970 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2971 && ! side_effects_p (op0)
2972 && GET_MODE_CLASS (mode) != MODE_CC)
2973 return CONST0_RTX (mode);
2975 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2976 there are no nonzero bits of C outside of X's mode. */
2977 if ((GET_CODE (op0) == SIGN_EXTEND
2978 || GET_CODE (op0) == ZERO_EXTEND)
2979 && CONST_INT_P (trueop1)
2980 && HWI_COMPUTABLE_MODE_P (mode)
2981 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2982 & UINTVAL (trueop1)) == 0)
2984 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2985 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2986 gen_int_mode (INTVAL (trueop1),
2987 imode));
2988 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2991 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2992 we might be able to further simplify the AND with X and potentially
2993 remove the truncation altogether. */
2994 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2996 rtx x = XEXP (op0, 0);
2997 enum machine_mode xmode = GET_MODE (x);
2998 tem = simplify_gen_binary (AND, xmode, x,
2999 gen_int_mode (INTVAL (trueop1), xmode));
3000 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3003 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3004 if (GET_CODE (op0) == IOR
3005 && CONST_INT_P (trueop1)
3006 && CONST_INT_P (XEXP (op0, 1)))
3008 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3009 return simplify_gen_binary (IOR, mode,
3010 simplify_gen_binary (AND, mode,
3011 XEXP (op0, 0), op1),
3012 gen_int_mode (tmp, mode));
3015 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3016 insn (and may simplify more). */
3017 if (GET_CODE (op0) == XOR
3018 && rtx_equal_p (XEXP (op0, 0), op1)
3019 && ! side_effects_p (op1))
3020 return simplify_gen_binary (AND, mode,
3021 simplify_gen_unary (NOT, mode,
3022 XEXP (op0, 1), mode),
3023 op1);
3025 if (GET_CODE (op0) == XOR
3026 && rtx_equal_p (XEXP (op0, 1), op1)
3027 && ! side_effects_p (op1))
3028 return simplify_gen_binary (AND, mode,
3029 simplify_gen_unary (NOT, mode,
3030 XEXP (op0, 0), mode),
3031 op1);
3033 /* Similarly for (~(A ^ B)) & A. */
3034 if (GET_CODE (op0) == NOT
3035 && GET_CODE (XEXP (op0, 0)) == XOR
3036 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3037 && ! side_effects_p (op1))
3038 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3040 if (GET_CODE (op0) == NOT
3041 && GET_CODE (XEXP (op0, 0)) == XOR
3042 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3043 && ! side_effects_p (op1))
3044 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3046 /* Convert (A | B) & A to A. */
3047 if (GET_CODE (op0) == IOR
3048 && (rtx_equal_p (XEXP (op0, 0), op1)
3049 || rtx_equal_p (XEXP (op0, 1), op1))
3050 && ! side_effects_p (XEXP (op0, 0))
3051 && ! side_effects_p (XEXP (op0, 1)))
3052 return op1;
3054 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3055 ((A & N) + B) & M -> (A + B) & M
3056 Similarly if (N & M) == 0,
3057 ((A | N) + B) & M -> (A + B) & M
3058 and for - instead of + and/or ^ instead of |.
3059 Also, if (N & M) == 0, then
3060 (A +- N) & M -> A & M. */
3061 if (CONST_INT_P (trueop1)
3062 && HWI_COMPUTABLE_MODE_P (mode)
3063 && ~UINTVAL (trueop1)
3064 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3065 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3067 rtx pmop[2];
3068 int which;
3070 pmop[0] = XEXP (op0, 0);
3071 pmop[1] = XEXP (op0, 1);
3073 if (CONST_INT_P (pmop[1])
3074 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3075 return simplify_gen_binary (AND, mode, pmop[0], op1);
3077 for (which = 0; which < 2; which++)
3079 tem = pmop[which];
3080 switch (GET_CODE (tem))
3082 case AND:
3083 if (CONST_INT_P (XEXP (tem, 1))
3084 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3085 == UINTVAL (trueop1))
3086 pmop[which] = XEXP (tem, 0);
3087 break;
3088 case IOR:
3089 case XOR:
3090 if (CONST_INT_P (XEXP (tem, 1))
3091 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3092 pmop[which] = XEXP (tem, 0);
3093 break;
3094 default:
3095 break;
3099 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3101 tem = simplify_gen_binary (GET_CODE (op0), mode,
3102 pmop[0], pmop[1]);
3103 return simplify_gen_binary (code, mode, tem, op1);
3107 /* (and X (ior (not X) Y) -> (and X Y) */
3108 if (GET_CODE (op1) == IOR
3109 && GET_CODE (XEXP (op1, 0)) == NOT
3110 && op0 == XEXP (XEXP (op1, 0), 0))
3111 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3113 /* (and (ior (not X) Y) X) -> (and X Y) */
3114 if (GET_CODE (op0) == IOR
3115 && GET_CODE (XEXP (op0, 0)) == NOT
3116 && op1 == XEXP (XEXP (op0, 0), 0))
3117 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3119 tem = simplify_associative_operation (code, mode, op0, op1);
3120 if (tem)
3121 return tem;
3122 break;
3124 case UDIV:
3125 /* 0/x is 0 (or x&0 if x has side-effects). */
3126 if (trueop0 == CONST0_RTX (mode))
3128 if (side_effects_p (op1))
3129 return simplify_gen_binary (AND, mode, op1, trueop0);
3130 return trueop0;
3132 /* x/1 is x. */
3133 if (trueop1 == CONST1_RTX (mode))
3135 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3136 if (tem)
3137 return tem;
3139 /* Convert divide by power of two into shift. */
3140 if (CONST_INT_P (trueop1)
3141 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3142 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3143 break;
3145 case DIV:
3146 /* Handle floating point and integers separately. */
3147 if (SCALAR_FLOAT_MODE_P (mode))
3149 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3150 safe for modes with NaNs, since 0.0 / 0.0 will then be
3151 NaN rather than 0.0. Nor is it safe for modes with signed
3152 zeros, since dividing 0 by a negative number gives -0.0 */
3153 if (trueop0 == CONST0_RTX (mode)
3154 && !HONOR_NANS (mode)
3155 && !HONOR_SIGNED_ZEROS (mode)
3156 && ! side_effects_p (op1))
3157 return op0;
3158 /* x/1.0 is x. */
3159 if (trueop1 == CONST1_RTX (mode)
3160 && !HONOR_SNANS (mode))
3161 return op0;
3163 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3164 && trueop1 != CONST0_RTX (mode))
3166 REAL_VALUE_TYPE d;
3167 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3169 /* x/-1.0 is -x. */
3170 if (REAL_VALUES_EQUAL (d, dconstm1)
3171 && !HONOR_SNANS (mode))
3172 return simplify_gen_unary (NEG, mode, op0, mode);
3174 /* Change FP division by a constant into multiplication.
3175 Only do this with -freciprocal-math. */
3176 if (flag_reciprocal_math
3177 && !REAL_VALUES_EQUAL (d, dconst0))
3179 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3180 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3181 return simplify_gen_binary (MULT, mode, op0, tem);
3185 else if (SCALAR_INT_MODE_P (mode))
3187 /* 0/x is 0 (or x&0 if x has side-effects). */
3188 if (trueop0 == CONST0_RTX (mode)
3189 && !cfun->can_throw_non_call_exceptions)
3191 if (side_effects_p (op1))
3192 return simplify_gen_binary (AND, mode, op1, trueop0);
3193 return trueop0;
3195 /* x/1 is x. */
3196 if (trueop1 == CONST1_RTX (mode))
3198 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3199 if (tem)
3200 return tem;
3202 /* x/-1 is -x. */
3203 if (trueop1 == constm1_rtx)
3205 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3206 if (x)
3207 return simplify_gen_unary (NEG, mode, x, mode);
3210 break;
3212 case UMOD:
3213 /* 0%x is 0 (or x&0 if x has side-effects). */
3214 if (trueop0 == CONST0_RTX (mode))
3216 if (side_effects_p (op1))
3217 return simplify_gen_binary (AND, mode, op1, trueop0);
3218 return trueop0;
3220 /* x%1 is 0 (of x&0 if x has side-effects). */
3221 if (trueop1 == CONST1_RTX (mode))
3223 if (side_effects_p (op0))
3224 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3225 return CONST0_RTX (mode);
3227 /* Implement modulus by power of two as AND. */
3228 if (CONST_INT_P (trueop1)
3229 && exact_log2 (UINTVAL (trueop1)) > 0)
3230 return simplify_gen_binary (AND, mode, op0,
3231 GEN_INT (INTVAL (op1) - 1));
3232 break;
3234 case MOD:
3235 /* 0%x is 0 (or x&0 if x has side-effects). */
3236 if (trueop0 == CONST0_RTX (mode))
3238 if (side_effects_p (op1))
3239 return simplify_gen_binary (AND, mode, op1, trueop0);
3240 return trueop0;
3242 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3243 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3245 if (side_effects_p (op0))
3246 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3247 return CONST0_RTX (mode);
3249 break;
3251 case ROTATERT:
3252 case ROTATE:
3253 case ASHIFTRT:
3254 if (trueop1 == CONST0_RTX (mode))
3255 return op0;
3256 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3257 return op0;
3258 /* Rotating ~0 always results in ~0. */
3259 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3260 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3261 && ! side_effects_p (op1))
3262 return op0;
3263 canonicalize_shift:
3264 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3266 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3267 if (val != INTVAL (op1))
3268 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3270 break;
3272 case ASHIFT:
3273 case SS_ASHIFT:
3274 case US_ASHIFT:
3275 if (trueop1 == CONST0_RTX (mode))
3276 return op0;
3277 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3278 return op0;
3279 goto canonicalize_shift;
3281 case LSHIFTRT:
3282 if (trueop1 == CONST0_RTX (mode))
3283 return op0;
3284 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3285 return op0;
3286 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3287 if (GET_CODE (op0) == CLZ
3288 && CONST_INT_P (trueop1)
3289 && STORE_FLAG_VALUE == 1
3290 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3292 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3293 unsigned HOST_WIDE_INT zero_val = 0;
3295 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3296 && zero_val == GET_MODE_PRECISION (imode)
3297 && INTVAL (trueop1) == exact_log2 (zero_val))
3298 return simplify_gen_relational (EQ, mode, imode,
3299 XEXP (op0, 0), const0_rtx);
3301 goto canonicalize_shift;
3303 case SMIN:
3304 if (width <= HOST_BITS_PER_WIDE_INT
3305 && mode_signbit_p (mode, trueop1)
3306 && ! side_effects_p (op0))
3307 return op1;
3308 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3309 return op0;
3310 tem = simplify_associative_operation (code, mode, op0, op1);
3311 if (tem)
3312 return tem;
3313 break;
3315 case SMAX:
3316 if (width <= HOST_BITS_PER_WIDE_INT
3317 && CONST_INT_P (trueop1)
3318 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3319 && ! side_effects_p (op0))
3320 return op1;
3321 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3322 return op0;
3323 tem = simplify_associative_operation (code, mode, op0, op1);
3324 if (tem)
3325 return tem;
3326 break;
3328 case UMIN:
3329 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3330 return op1;
3331 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3332 return op0;
3333 tem = simplify_associative_operation (code, mode, op0, op1);
3334 if (tem)
3335 return tem;
3336 break;
3338 case UMAX:
3339 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3340 return op1;
3341 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3342 return op0;
3343 tem = simplify_associative_operation (code, mode, op0, op1);
3344 if (tem)
3345 return tem;
3346 break;
3348 case SS_PLUS:
3349 case US_PLUS:
3350 case SS_MINUS:
3351 case US_MINUS:
3352 case SS_MULT:
3353 case US_MULT:
3354 case SS_DIV:
3355 case US_DIV:
3356 /* ??? There are simplifications that can be done. */
3357 return 0;
3359 case VEC_SELECT:
3360 if (!VECTOR_MODE_P (mode))
3362 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3363 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3364 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3365 gcc_assert (XVECLEN (trueop1, 0) == 1);
3366 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3368 if (GET_CODE (trueop0) == CONST_VECTOR)
3369 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3370 (trueop1, 0, 0)));
3372 /* Extract a scalar element from a nested VEC_SELECT expression
3373 (with optional nested VEC_CONCAT expression). Some targets
3374 (i386) extract scalar element from a vector using chain of
3375 nested VEC_SELECT expressions. When input operand is a memory
3376 operand, this operation can be simplified to a simple scalar
3377 load from an offseted memory address. */
3378 if (GET_CODE (trueop0) == VEC_SELECT)
3380 rtx op0 = XEXP (trueop0, 0);
3381 rtx op1 = XEXP (trueop0, 1);
3383 enum machine_mode opmode = GET_MODE (op0);
3384 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3385 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3387 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3388 int elem;
3390 rtvec vec;
3391 rtx tmp_op, tmp;
3393 gcc_assert (GET_CODE (op1) == PARALLEL);
3394 gcc_assert (i < n_elts);
3396 /* Select element, pointed by nested selector. */
3397 elem = INTVAL (XVECEXP (op1, 0, i));
3399 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3400 if (GET_CODE (op0) == VEC_CONCAT)
3402 rtx op00 = XEXP (op0, 0);
3403 rtx op01 = XEXP (op0, 1);
3405 enum machine_mode mode00, mode01;
3406 int n_elts00, n_elts01;
3408 mode00 = GET_MODE (op00);
3409 mode01 = GET_MODE (op01);
3411 /* Find out number of elements of each operand. */
3412 if (VECTOR_MODE_P (mode00))
3414 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3415 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3417 else
3418 n_elts00 = 1;
3420 if (VECTOR_MODE_P (mode01))
3422 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3423 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3425 else
3426 n_elts01 = 1;
3428 gcc_assert (n_elts == n_elts00 + n_elts01);
3430 /* Select correct operand of VEC_CONCAT
3431 and adjust selector. */
3432 if (elem < n_elts01)
3433 tmp_op = op00;
3434 else
3436 tmp_op = op01;
3437 elem -= n_elts00;
3440 else
3441 tmp_op = op0;
3443 vec = rtvec_alloc (1);
3444 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3446 tmp = gen_rtx_fmt_ee (code, mode,
3447 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3448 return tmp;
3450 if (GET_CODE (trueop0) == VEC_DUPLICATE
3451 && GET_MODE (XEXP (trueop0, 0)) == mode)
3452 return XEXP (trueop0, 0);
3454 else
3456 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3457 gcc_assert (GET_MODE_INNER (mode)
3458 == GET_MODE_INNER (GET_MODE (trueop0)));
3459 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3461 if (GET_CODE (trueop0) == CONST_VECTOR)
3463 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3464 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3465 rtvec v = rtvec_alloc (n_elts);
3466 unsigned int i;
3468 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3469 for (i = 0; i < n_elts; i++)
3471 rtx x = XVECEXP (trueop1, 0, i);
3473 gcc_assert (CONST_INT_P (x));
3474 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3475 INTVAL (x));
3478 return gen_rtx_CONST_VECTOR (mode, v);
3481 /* Recognize the identity. */
3482 if (GET_MODE (trueop0) == mode)
3484 bool maybe_ident = true;
3485 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3487 rtx j = XVECEXP (trueop1, 0, i);
3488 if (!CONST_INT_P (j) || INTVAL (j) != i)
3490 maybe_ident = false;
3491 break;
3494 if (maybe_ident)
3495 return trueop0;
3498 /* If we build {a,b} then permute it, build the result directly. */
3499 if (XVECLEN (trueop1, 0) == 2
3500 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3501 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3502 && GET_CODE (trueop0) == VEC_CONCAT
3503 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3504 && GET_MODE (XEXP (trueop0, 0)) == mode
3505 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3506 && GET_MODE (XEXP (trueop0, 1)) == mode)
3508 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3509 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3510 rtx subop0, subop1;
3512 gcc_assert (i0 < 4 && i1 < 4);
3513 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3514 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3516 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3519 if (XVECLEN (trueop1, 0) == 2
3520 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3521 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3522 && GET_CODE (trueop0) == VEC_CONCAT
3523 && GET_MODE (trueop0) == mode)
3525 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3526 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3527 rtx subop0, subop1;
3529 gcc_assert (i0 < 2 && i1 < 2);
3530 subop0 = XEXP (trueop0, i0);
3531 subop1 = XEXP (trueop0, i1);
3533 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3537 if (XVECLEN (trueop1, 0) == 1
3538 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3539 && GET_CODE (trueop0) == VEC_CONCAT)
3541 rtx vec = trueop0;
3542 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3544 /* Try to find the element in the VEC_CONCAT. */
3545 while (GET_MODE (vec) != mode
3546 && GET_CODE (vec) == VEC_CONCAT)
3548 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3549 if (offset < vec_size)
3550 vec = XEXP (vec, 0);
3551 else
3553 offset -= vec_size;
3554 vec = XEXP (vec, 1);
3556 vec = avoid_constant_pool_reference (vec);
3559 if (GET_MODE (vec) == mode)
3560 return vec;
3563 /* If we select elements in a vec_merge that all come from the same
3564 operand, select from that operand directly. */
3565 if (GET_CODE (op0) == VEC_MERGE)
3567 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3568 if (CONST_INT_P (trueop02))
3570 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3571 bool all_operand0 = true;
3572 bool all_operand1 = true;
3573 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3575 rtx j = XVECEXP (trueop1, 0, i);
3576 if (sel & (1 << UINTVAL (j)))
3577 all_operand1 = false;
3578 else
3579 all_operand0 = false;
3581 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3582 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3583 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3584 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3588 return 0;
3589 case VEC_CONCAT:
3591 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3592 ? GET_MODE (trueop0)
3593 : GET_MODE_INNER (mode));
3594 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3595 ? GET_MODE (trueop1)
3596 : GET_MODE_INNER (mode));
3598 gcc_assert (VECTOR_MODE_P (mode));
3599 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3600 == GET_MODE_SIZE (mode));
3602 if (VECTOR_MODE_P (op0_mode))
3603 gcc_assert (GET_MODE_INNER (mode)
3604 == GET_MODE_INNER (op0_mode));
3605 else
3606 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3608 if (VECTOR_MODE_P (op1_mode))
3609 gcc_assert (GET_MODE_INNER (mode)
3610 == GET_MODE_INNER (op1_mode));
3611 else
3612 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3614 if ((GET_CODE (trueop0) == CONST_VECTOR
3615 || CONST_SCALAR_INT_P (trueop0)
3616 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3617 && (GET_CODE (trueop1) == CONST_VECTOR
3618 || CONST_SCALAR_INT_P (trueop1)
3619 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3621 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3622 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3623 rtvec v = rtvec_alloc (n_elts);
3624 unsigned int i;
3625 unsigned in_n_elts = 1;
3627 if (VECTOR_MODE_P (op0_mode))
3628 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3629 for (i = 0; i < n_elts; i++)
3631 if (i < in_n_elts)
3633 if (!VECTOR_MODE_P (op0_mode))
3634 RTVEC_ELT (v, i) = trueop0;
3635 else
3636 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3638 else
3640 if (!VECTOR_MODE_P (op1_mode))
3641 RTVEC_ELT (v, i) = trueop1;
3642 else
3643 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3644 i - in_n_elts);
3648 return gen_rtx_CONST_VECTOR (mode, v);
3651 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3652 Restrict the transformation to avoid generating a VEC_SELECT with a
3653 mode unrelated to its operand. */
3654 if (GET_CODE (trueop0) == VEC_SELECT
3655 && GET_CODE (trueop1) == VEC_SELECT
3656 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3657 && GET_MODE (XEXP (trueop0, 0)) == mode)
3659 rtx par0 = XEXP (trueop0, 1);
3660 rtx par1 = XEXP (trueop1, 1);
3661 int len0 = XVECLEN (par0, 0);
3662 int len1 = XVECLEN (par1, 0);
3663 rtvec vec = rtvec_alloc (len0 + len1);
3664 for (int i = 0; i < len0; i++)
3665 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3666 for (int i = 0; i < len1; i++)
3667 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3668 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3669 gen_rtx_PARALLEL (VOIDmode, vec));
3672 return 0;
3674 default:
3675 gcc_unreachable ();
3678 return 0;
3682 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3683 rtx op0, rtx op1)
3685 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3686 HOST_WIDE_INT val;
3687 unsigned int width = GET_MODE_PRECISION (mode);
3689 if (VECTOR_MODE_P (mode)
3690 && code != VEC_CONCAT
3691 && GET_CODE (op0) == CONST_VECTOR
3692 && GET_CODE (op1) == CONST_VECTOR)
3694 unsigned n_elts = GET_MODE_NUNITS (mode);
3695 enum machine_mode op0mode = GET_MODE (op0);
3696 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3697 enum machine_mode op1mode = GET_MODE (op1);
3698 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3699 rtvec v = rtvec_alloc (n_elts);
3700 unsigned int i;
3702 gcc_assert (op0_n_elts == n_elts);
3703 gcc_assert (op1_n_elts == n_elts);
3704 for (i = 0; i < n_elts; i++)
3706 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3707 CONST_VECTOR_ELT (op0, i),
3708 CONST_VECTOR_ELT (op1, i));
3709 if (!x)
3710 return 0;
3711 RTVEC_ELT (v, i) = x;
3714 return gen_rtx_CONST_VECTOR (mode, v);
3717 if (VECTOR_MODE_P (mode)
3718 && code == VEC_CONCAT
3719 && (CONST_SCALAR_INT_P (op0)
3720 || GET_CODE (op0) == CONST_FIXED
3721 || CONST_DOUBLE_AS_FLOAT_P (op0))
3722 && (CONST_SCALAR_INT_P (op1)
3723 || CONST_DOUBLE_AS_FLOAT_P (op1)
3724 || GET_CODE (op1) == CONST_FIXED))
3726 unsigned n_elts = GET_MODE_NUNITS (mode);
3727 rtvec v = rtvec_alloc (n_elts);
3729 gcc_assert (n_elts >= 2);
3730 if (n_elts == 2)
3732 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3733 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3735 RTVEC_ELT (v, 0) = op0;
3736 RTVEC_ELT (v, 1) = op1;
3738 else
3740 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3741 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3742 unsigned i;
3744 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3745 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3746 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3748 for (i = 0; i < op0_n_elts; ++i)
3749 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3750 for (i = 0; i < op1_n_elts; ++i)
3751 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3754 return gen_rtx_CONST_VECTOR (mode, v);
3757 if (SCALAR_FLOAT_MODE_P (mode)
3758 && CONST_DOUBLE_AS_FLOAT_P (op0)
3759 && CONST_DOUBLE_AS_FLOAT_P (op1)
3760 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3762 if (code == AND
3763 || code == IOR
3764 || code == XOR)
3766 long tmp0[4];
3767 long tmp1[4];
3768 REAL_VALUE_TYPE r;
3769 int i;
3771 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3772 GET_MODE (op0));
3773 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3774 GET_MODE (op1));
3775 for (i = 0; i < 4; i++)
3777 switch (code)
3779 case AND:
3780 tmp0[i] &= tmp1[i];
3781 break;
3782 case IOR:
3783 tmp0[i] |= tmp1[i];
3784 break;
3785 case XOR:
3786 tmp0[i] ^= tmp1[i];
3787 break;
3788 default:
3789 gcc_unreachable ();
3792 real_from_target (&r, tmp0, mode);
3793 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3795 else
3797 REAL_VALUE_TYPE f0, f1, value, result;
3798 bool inexact;
3800 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3801 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3802 real_convert (&f0, mode, &f0);
3803 real_convert (&f1, mode, &f1);
3805 if (HONOR_SNANS (mode)
3806 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3807 return 0;
3809 if (code == DIV
3810 && REAL_VALUES_EQUAL (f1, dconst0)
3811 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3812 return 0;
3814 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3815 && flag_trapping_math
3816 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3818 int s0 = REAL_VALUE_NEGATIVE (f0);
3819 int s1 = REAL_VALUE_NEGATIVE (f1);
3821 switch (code)
3823 case PLUS:
3824 /* Inf + -Inf = NaN plus exception. */
3825 if (s0 != s1)
3826 return 0;
3827 break;
3828 case MINUS:
3829 /* Inf - Inf = NaN plus exception. */
3830 if (s0 == s1)
3831 return 0;
3832 break;
3833 case DIV:
3834 /* Inf / Inf = NaN plus exception. */
3835 return 0;
3836 default:
3837 break;
3841 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3842 && flag_trapping_math
3843 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3844 || (REAL_VALUE_ISINF (f1)
3845 && REAL_VALUES_EQUAL (f0, dconst0))))
3846 /* Inf * 0 = NaN plus exception. */
3847 return 0;
3849 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3850 &f0, &f1);
3851 real_convert (&result, mode, &value);
3853 /* Don't constant fold this floating point operation if
3854 the result has overflowed and flag_trapping_math. */
3856 if (flag_trapping_math
3857 && MODE_HAS_INFINITIES (mode)
3858 && REAL_VALUE_ISINF (result)
3859 && !REAL_VALUE_ISINF (f0)
3860 && !REAL_VALUE_ISINF (f1))
3861 /* Overflow plus exception. */
3862 return 0;
3864 /* Don't constant fold this floating point operation if the
3865 result may dependent upon the run-time rounding mode and
3866 flag_rounding_math is set, or if GCC's software emulation
3867 is unable to accurately represent the result. */
3869 if ((flag_rounding_math
3870 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3871 && (inexact || !real_identical (&result, &value)))
3872 return NULL_RTX;
3874 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3878 /* We can fold some multi-word operations. */
3879 if (GET_MODE_CLASS (mode) == MODE_INT
3880 && width == HOST_BITS_PER_DOUBLE_INT
3881 && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3882 && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3884 double_int o0, o1, res, tmp;
3885 bool overflow;
3887 o0 = rtx_to_double_int (op0);
3888 o1 = rtx_to_double_int (op1);
3890 switch (code)
3892 case MINUS:
3893 /* A - B == A + (-B). */
3894 o1 = -o1;
3896 /* Fall through.... */
3898 case PLUS:
3899 res = o0 + o1;
3900 break;
3902 case MULT:
3903 res = o0 * o1;
3904 break;
3906 case DIV:
3907 res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3908 &tmp, &overflow);
3909 if (overflow)
3910 return 0;
3911 break;
3913 case MOD:
3914 tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3915 &res, &overflow);
3916 if (overflow)
3917 return 0;
3918 break;
3920 case UDIV:
3921 res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3922 &tmp, &overflow);
3923 if (overflow)
3924 return 0;
3925 break;
3927 case UMOD:
3928 tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3929 &res, &overflow);
3930 if (overflow)
3931 return 0;
3932 break;
3934 case AND:
3935 res = o0 & o1;
3936 break;
3938 case IOR:
3939 res = o0 | o1;
3940 break;
3942 case XOR:
3943 res = o0 ^ o1;
3944 break;
3946 case SMIN:
3947 res = o0.smin (o1);
3948 break;
3950 case SMAX:
3951 res = o0.smax (o1);
3952 break;
3954 case UMIN:
3955 res = o0.umin (o1);
3956 break;
3958 case UMAX:
3959 res = o0.umax (o1);
3960 break;
3962 case LSHIFTRT: case ASHIFTRT:
3963 case ASHIFT:
3964 case ROTATE: case ROTATERT:
3966 unsigned HOST_WIDE_INT cnt;
3968 if (SHIFT_COUNT_TRUNCATED)
3970 o1.high = 0;
3971 o1.low &= GET_MODE_PRECISION (mode) - 1;
3974 if (!o1.fits_uhwi ()
3975 || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
3976 return 0;
3978 cnt = o1.to_uhwi ();
3979 unsigned short prec = GET_MODE_PRECISION (mode);
3981 if (code == LSHIFTRT || code == ASHIFTRT)
3982 res = o0.rshift (cnt, prec, code == ASHIFTRT);
3983 else if (code == ASHIFT)
3984 res = o0.alshift (cnt, prec);
3985 else if (code == ROTATE)
3986 res = o0.lrotate (cnt, prec);
3987 else /* code == ROTATERT */
3988 res = o0.rrotate (cnt, prec);
3990 break;
3992 default:
3993 return 0;
3996 return immed_double_int_const (res, mode);
3999 if (CONST_INT_P (op0) && CONST_INT_P (op1)
4000 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
4002 /* Get the integer argument values in two forms:
4003 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
4005 arg0 = INTVAL (op0);
4006 arg1 = INTVAL (op1);
4008 if (width < HOST_BITS_PER_WIDE_INT)
4010 arg0 &= GET_MODE_MASK (mode);
4011 arg1 &= GET_MODE_MASK (mode);
4013 arg0s = arg0;
4014 if (val_signbit_known_set_p (mode, arg0s))
4015 arg0s |= ~GET_MODE_MASK (mode);
4017 arg1s = arg1;
4018 if (val_signbit_known_set_p (mode, arg1s))
4019 arg1s |= ~GET_MODE_MASK (mode);
4021 else
4023 arg0s = arg0;
4024 arg1s = arg1;
4027 /* Compute the value of the arithmetic. */
4029 switch (code)
4031 case PLUS:
4032 val = arg0s + arg1s;
4033 break;
4035 case MINUS:
4036 val = arg0s - arg1s;
4037 break;
4039 case MULT:
4040 val = arg0s * arg1s;
4041 break;
4043 case DIV:
4044 if (arg1s == 0
4045 || ((unsigned HOST_WIDE_INT) arg0s
4046 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4047 && arg1s == -1))
4048 return 0;
4049 val = arg0s / arg1s;
4050 break;
4052 case MOD:
4053 if (arg1s == 0
4054 || ((unsigned HOST_WIDE_INT) arg0s
4055 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4056 && arg1s == -1))
4057 return 0;
4058 val = arg0s % arg1s;
4059 break;
4061 case UDIV:
4062 if (arg1 == 0
4063 || ((unsigned HOST_WIDE_INT) arg0s
4064 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4065 && arg1s == -1))
4066 return 0;
4067 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
4068 break;
4070 case UMOD:
4071 if (arg1 == 0
4072 || ((unsigned HOST_WIDE_INT) arg0s
4073 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4074 && arg1s == -1))
4075 return 0;
4076 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4077 break;
4079 case AND:
4080 val = arg0 & arg1;
4081 break;
4083 case IOR:
4084 val = arg0 | arg1;
4085 break;
4087 case XOR:
4088 val = arg0 ^ arg1;
4089 break;
4091 case LSHIFTRT:
4092 case ASHIFT:
4093 case ASHIFTRT:
4094 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4095 the value is in range. We can't return any old value for
4096 out-of-range arguments because either the middle-end (via
4097 shift_truncation_mask) or the back-end might be relying on
4098 target-specific knowledge. Nor can we rely on
4099 shift_truncation_mask, since the shift might not be part of an
4100 ashlM3, lshrM3 or ashrM3 instruction. */
4101 if (SHIFT_COUNT_TRUNCATED)
4102 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
4103 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
4104 return 0;
4106 val = (code == ASHIFT
4107 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
4108 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
4110 /* Sign-extend the result for arithmetic right shifts. */
4111 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
4112 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
4113 break;
4115 case ROTATERT:
4116 if (arg1 < 0)
4117 return 0;
4119 arg1 %= width;
4120 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4121 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4122 break;
4124 case ROTATE:
4125 if (arg1 < 0)
4126 return 0;
4128 arg1 %= width;
4129 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4130 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4131 break;
4133 case COMPARE:
4134 /* Do nothing here. */
4135 return 0;
4137 case SMIN:
4138 val = arg0s <= arg1s ? arg0s : arg1s;
4139 break;
4141 case UMIN:
4142 val = ((unsigned HOST_WIDE_INT) arg0
4143 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4144 break;
4146 case SMAX:
4147 val = arg0s > arg1s ? arg0s : arg1s;
4148 break;
4150 case UMAX:
4151 val = ((unsigned HOST_WIDE_INT) arg0
4152 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4153 break;
4155 case SS_PLUS:
4156 case US_PLUS:
4157 case SS_MINUS:
4158 case US_MINUS:
4159 case SS_MULT:
4160 case US_MULT:
4161 case SS_DIV:
4162 case US_DIV:
4163 case SS_ASHIFT:
4164 case US_ASHIFT:
4165 /* ??? There are simplifications that can be done. */
4166 return 0;
4168 default:
4169 gcc_unreachable ();
4172 return gen_int_mode (val, mode);
4175 return NULL_RTX;
4180 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4181 PLUS or MINUS.
4183 Rather than test for specific case, we do this by a brute-force method
4184 and do all possible simplifications until no more changes occur. Then
4185 we rebuild the operation. */
4187 struct simplify_plus_minus_op_data
4189 rtx op;
4190 short neg;
4193 static bool
4194 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4196 int result;
4198 result = (commutative_operand_precedence (y)
4199 - commutative_operand_precedence (x));
4200 if (result)
4201 return result > 0;
4203 /* Group together equal REGs to do more simplification. */
4204 if (REG_P (x) && REG_P (y))
4205 return REGNO (x) > REGNO (y);
4206 else
4207 return false;
4210 static rtx
4211 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
4212 rtx op1)
4214 struct simplify_plus_minus_op_data ops[8];
4215 rtx result, tem;
4216 int n_ops = 2, input_ops = 2;
4217 int changed, n_constants = 0, canonicalized = 0;
4218 int i, j;
4220 memset (ops, 0, sizeof ops);
4222 /* Set up the two operands and then expand them until nothing has been
4223 changed. If we run out of room in our array, give up; this should
4224 almost never happen. */
4226 ops[0].op = op0;
4227 ops[0].neg = 0;
4228 ops[1].op = op1;
4229 ops[1].neg = (code == MINUS);
4233 changed = 0;
4235 for (i = 0; i < n_ops; i++)
4237 rtx this_op = ops[i].op;
4238 int this_neg = ops[i].neg;
4239 enum rtx_code this_code = GET_CODE (this_op);
4241 switch (this_code)
4243 case PLUS:
4244 case MINUS:
4245 if (n_ops == 7)
4246 return NULL_RTX;
4248 ops[n_ops].op = XEXP (this_op, 1);
4249 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4250 n_ops++;
4252 ops[i].op = XEXP (this_op, 0);
4253 input_ops++;
4254 changed = 1;
4255 canonicalized |= this_neg;
4256 break;
4258 case NEG:
4259 ops[i].op = XEXP (this_op, 0);
4260 ops[i].neg = ! this_neg;
4261 changed = 1;
4262 canonicalized = 1;
4263 break;
4265 case CONST:
4266 if (n_ops < 7
4267 && GET_CODE (XEXP (this_op, 0)) == PLUS
4268 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4269 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4271 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4272 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4273 ops[n_ops].neg = this_neg;
4274 n_ops++;
4275 changed = 1;
4276 canonicalized = 1;
4278 break;
4280 case NOT:
4281 /* ~a -> (-a - 1) */
4282 if (n_ops != 7)
4284 ops[n_ops].op = CONSTM1_RTX (mode);
4285 ops[n_ops++].neg = this_neg;
4286 ops[i].op = XEXP (this_op, 0);
4287 ops[i].neg = !this_neg;
4288 changed = 1;
4289 canonicalized = 1;
4291 break;
4293 case CONST_INT:
4294 n_constants++;
4295 if (this_neg)
4297 ops[i].op = neg_const_int (mode, this_op);
4298 ops[i].neg = 0;
4299 changed = 1;
4300 canonicalized = 1;
4302 break;
4304 default:
4305 break;
4309 while (changed);
4311 if (n_constants > 1)
4312 canonicalized = 1;
4314 gcc_assert (n_ops >= 2);
4316 /* If we only have two operands, we can avoid the loops. */
4317 if (n_ops == 2)
4319 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4320 rtx lhs, rhs;
4322 /* Get the two operands. Be careful with the order, especially for
4323 the cases where code == MINUS. */
4324 if (ops[0].neg && ops[1].neg)
4326 lhs = gen_rtx_NEG (mode, ops[0].op);
4327 rhs = ops[1].op;
4329 else if (ops[0].neg)
4331 lhs = ops[1].op;
4332 rhs = ops[0].op;
4334 else
4336 lhs = ops[0].op;
4337 rhs = ops[1].op;
4340 return simplify_const_binary_operation (code, mode, lhs, rhs);
4343 /* Now simplify each pair of operands until nothing changes. */
4346 /* Insertion sort is good enough for an eight-element array. */
4347 for (i = 1; i < n_ops; i++)
4349 struct simplify_plus_minus_op_data save;
4350 j = i - 1;
4351 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4352 continue;
4354 canonicalized = 1;
4355 save = ops[i];
4357 ops[j + 1] = ops[j];
4358 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4359 ops[j + 1] = save;
4362 changed = 0;
4363 for (i = n_ops - 1; i > 0; i--)
4364 for (j = i - 1; j >= 0; j--)
4366 rtx lhs = ops[j].op, rhs = ops[i].op;
4367 int lneg = ops[j].neg, rneg = ops[i].neg;
4369 if (lhs != 0 && rhs != 0)
4371 enum rtx_code ncode = PLUS;
4373 if (lneg != rneg)
4375 ncode = MINUS;
4376 if (lneg)
4377 tem = lhs, lhs = rhs, rhs = tem;
4379 else if (swap_commutative_operands_p (lhs, rhs))
4380 tem = lhs, lhs = rhs, rhs = tem;
4382 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4383 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4385 rtx tem_lhs, tem_rhs;
4387 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4388 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4389 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4391 if (tem && !CONSTANT_P (tem))
4392 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4394 else
4395 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4397 /* Reject "simplifications" that just wrap the two
4398 arguments in a CONST. Failure to do so can result
4399 in infinite recursion with simplify_binary_operation
4400 when it calls us to simplify CONST operations. */
4401 if (tem
4402 && ! (GET_CODE (tem) == CONST
4403 && GET_CODE (XEXP (tem, 0)) == ncode
4404 && XEXP (XEXP (tem, 0), 0) == lhs
4405 && XEXP (XEXP (tem, 0), 1) == rhs))
4407 lneg &= rneg;
4408 if (GET_CODE (tem) == NEG)
4409 tem = XEXP (tem, 0), lneg = !lneg;
4410 if (CONST_INT_P (tem) && lneg)
4411 tem = neg_const_int (mode, tem), lneg = 0;
4413 ops[i].op = tem;
4414 ops[i].neg = lneg;
4415 ops[j].op = NULL_RTX;
4416 changed = 1;
4417 canonicalized = 1;
4422 /* If nothing changed, fail. */
4423 if (!canonicalized)
4424 return NULL_RTX;
4426 /* Pack all the operands to the lower-numbered entries. */
4427 for (i = 0, j = 0; j < n_ops; j++)
4428 if (ops[j].op)
4430 ops[i] = ops[j];
4431 i++;
4433 n_ops = i;
4435 while (changed);
4437 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4438 if (n_ops == 2
4439 && CONST_INT_P (ops[1].op)
4440 && CONSTANT_P (ops[0].op)
4441 && ops[0].neg)
4442 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4444 /* We suppressed creation of trivial CONST expressions in the
4445 combination loop to avoid recursion. Create one manually now.
4446 The combination loop should have ensured that there is exactly
4447 one CONST_INT, and the sort will have ensured that it is last
4448 in the array and that any other constant will be next-to-last. */
4450 if (n_ops > 1
4451 && CONST_INT_P (ops[n_ops - 1].op)
4452 && CONSTANT_P (ops[n_ops - 2].op))
4454 rtx value = ops[n_ops - 1].op;
4455 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4456 value = neg_const_int (mode, value);
4457 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4458 INTVAL (value));
4459 n_ops--;
4462 /* Put a non-negated operand first, if possible. */
4464 for (i = 0; i < n_ops && ops[i].neg; i++)
4465 continue;
4466 if (i == n_ops)
4467 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4468 else if (i != 0)
4470 tem = ops[0].op;
4471 ops[0] = ops[i];
4472 ops[i].op = tem;
4473 ops[i].neg = 1;
4476 /* Now make the result by performing the requested operations. */
4477 result = ops[0].op;
4478 for (i = 1; i < n_ops; i++)
4479 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4480 mode, result, ops[i].op);
4482 return result;
4485 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4486 static bool
4487 plus_minus_operand_p (const_rtx x)
4489 return GET_CODE (x) == PLUS
4490 || GET_CODE (x) == MINUS
4491 || (GET_CODE (x) == CONST
4492 && GET_CODE (XEXP (x, 0)) == PLUS
4493 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4494 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4497 /* Like simplify_binary_operation except used for relational operators.
4498 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4499 not also be VOIDmode.
4501 CMP_MODE specifies in which mode the comparison is done in, so it is
4502 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4503 the operands or, if both are VOIDmode, the operands are compared in
4504 "infinite precision". */
4506 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4507 enum machine_mode cmp_mode, rtx op0, rtx op1)
4509 rtx tem, trueop0, trueop1;
4511 if (cmp_mode == VOIDmode)
4512 cmp_mode = GET_MODE (op0);
4513 if (cmp_mode == VOIDmode)
4514 cmp_mode = GET_MODE (op1);
4516 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4517 if (tem)
4519 if (SCALAR_FLOAT_MODE_P (mode))
4521 if (tem == const0_rtx)
4522 return CONST0_RTX (mode);
4523 #ifdef FLOAT_STORE_FLAG_VALUE
4525 REAL_VALUE_TYPE val;
4526 val = FLOAT_STORE_FLAG_VALUE (mode);
4527 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4529 #else
4530 return NULL_RTX;
4531 #endif
4533 if (VECTOR_MODE_P (mode))
4535 if (tem == const0_rtx)
4536 return CONST0_RTX (mode);
4537 #ifdef VECTOR_STORE_FLAG_VALUE
4539 int i, units;
4540 rtvec v;
4542 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4543 if (val == NULL_RTX)
4544 return NULL_RTX;
4545 if (val == const1_rtx)
4546 return CONST1_RTX (mode);
4548 units = GET_MODE_NUNITS (mode);
4549 v = rtvec_alloc (units);
4550 for (i = 0; i < units; i++)
4551 RTVEC_ELT (v, i) = val;
4552 return gen_rtx_raw_CONST_VECTOR (mode, v);
4554 #else
4555 return NULL_RTX;
4556 #endif
4559 return tem;
4562 /* For the following tests, ensure const0_rtx is op1. */
4563 if (swap_commutative_operands_p (op0, op1)
4564 || (op0 == const0_rtx && op1 != const0_rtx))
4565 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4567 /* If op0 is a compare, extract the comparison arguments from it. */
4568 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4569 return simplify_gen_relational (code, mode, VOIDmode,
4570 XEXP (op0, 0), XEXP (op0, 1));
4572 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4573 || CC0_P (op0))
4574 return NULL_RTX;
4576 trueop0 = avoid_constant_pool_reference (op0);
4577 trueop1 = avoid_constant_pool_reference (op1);
4578 return simplify_relational_operation_1 (code, mode, cmp_mode,
4579 trueop0, trueop1);
4582 /* This part of simplify_relational_operation is only used when CMP_MODE
4583 is not in class MODE_CC (i.e. it is a real comparison).
4585 MODE is the mode of the result, while CMP_MODE specifies in which
4586 mode the comparison is done in, so it is the mode of the operands. */
4588 static rtx
4589 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4590 enum machine_mode cmp_mode, rtx op0, rtx op1)
4592 enum rtx_code op0code = GET_CODE (op0);
4594 if (op1 == const0_rtx && COMPARISON_P (op0))
4596 /* If op0 is a comparison, extract the comparison arguments
4597 from it. */
4598 if (code == NE)
4600 if (GET_MODE (op0) == mode)
4601 return simplify_rtx (op0);
4602 else
4603 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4604 XEXP (op0, 0), XEXP (op0, 1));
4606 else if (code == EQ)
4608 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4609 if (new_code != UNKNOWN)
4610 return simplify_gen_relational (new_code, mode, VOIDmode,
4611 XEXP (op0, 0), XEXP (op0, 1));
4615 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4616 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4617 if ((code == LTU || code == GEU)
4618 && GET_CODE (op0) == PLUS
4619 && CONST_INT_P (XEXP (op0, 1))
4620 && (rtx_equal_p (op1, XEXP (op0, 0))
4621 || rtx_equal_p (op1, XEXP (op0, 1)))
4622 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4623 && XEXP (op0, 1) != const0_rtx)
4625 rtx new_cmp
4626 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4627 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4628 cmp_mode, XEXP (op0, 0), new_cmp);
4631 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4632 if ((code == LTU || code == GEU)
4633 && GET_CODE (op0) == PLUS
4634 && rtx_equal_p (op1, XEXP (op0, 1))
4635 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4636 && !rtx_equal_p (op1, XEXP (op0, 0)))
4637 return simplify_gen_relational (code, mode, cmp_mode, op0,
4638 copy_rtx (XEXP (op0, 0)));
4640 if (op1 == const0_rtx)
4642 /* Canonicalize (GTU x 0) as (NE x 0). */
4643 if (code == GTU)
4644 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4645 /* Canonicalize (LEU x 0) as (EQ x 0). */
4646 if (code == LEU)
4647 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4649 else if (op1 == const1_rtx)
4651 switch (code)
4653 case GE:
4654 /* Canonicalize (GE x 1) as (GT x 0). */
4655 return simplify_gen_relational (GT, mode, cmp_mode,
4656 op0, const0_rtx);
4657 case GEU:
4658 /* Canonicalize (GEU x 1) as (NE x 0). */
4659 return simplify_gen_relational (NE, mode, cmp_mode,
4660 op0, const0_rtx);
4661 case LT:
4662 /* Canonicalize (LT x 1) as (LE x 0). */
4663 return simplify_gen_relational (LE, mode, cmp_mode,
4664 op0, const0_rtx);
4665 case LTU:
4666 /* Canonicalize (LTU x 1) as (EQ x 0). */
4667 return simplify_gen_relational (EQ, mode, cmp_mode,
4668 op0, const0_rtx);
4669 default:
4670 break;
4673 else if (op1 == constm1_rtx)
4675 /* Canonicalize (LE x -1) as (LT x 0). */
4676 if (code == LE)
4677 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4678 /* Canonicalize (GT x -1) as (GE x 0). */
4679 if (code == GT)
4680 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4683 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4684 if ((code == EQ || code == NE)
4685 && (op0code == PLUS || op0code == MINUS)
4686 && CONSTANT_P (op1)
4687 && CONSTANT_P (XEXP (op0, 1))
4688 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4690 rtx x = XEXP (op0, 0);
4691 rtx c = XEXP (op0, 1);
4692 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4693 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4695 /* Detect an infinite recursive condition, where we oscillate at this
4696 simplification case between:
4697 A + B == C <---> C - B == A,
4698 where A, B, and C are all constants with non-simplifiable expressions,
4699 usually SYMBOL_REFs. */
4700 if (GET_CODE (tem) == invcode
4701 && CONSTANT_P (x)
4702 && rtx_equal_p (c, XEXP (tem, 1)))
4703 return NULL_RTX;
4705 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4708 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4709 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4710 if (code == NE
4711 && op1 == const0_rtx
4712 && GET_MODE_CLASS (mode) == MODE_INT
4713 && cmp_mode != VOIDmode
4714 /* ??? Work-around BImode bugs in the ia64 backend. */
4715 && mode != BImode
4716 && cmp_mode != BImode
4717 && nonzero_bits (op0, cmp_mode) == 1
4718 && STORE_FLAG_VALUE == 1)
4719 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4720 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4721 : lowpart_subreg (mode, op0, cmp_mode);
4723 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4724 if ((code == EQ || code == NE)
4725 && op1 == const0_rtx
4726 && op0code == XOR)
4727 return simplify_gen_relational (code, mode, cmp_mode,
4728 XEXP (op0, 0), XEXP (op0, 1));
4730 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4731 if ((code == EQ || code == NE)
4732 && op0code == XOR
4733 && rtx_equal_p (XEXP (op0, 0), op1)
4734 && !side_effects_p (XEXP (op0, 0)))
4735 return simplify_gen_relational (code, mode, cmp_mode,
4736 XEXP (op0, 1), const0_rtx);
4738 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4739 if ((code == EQ || code == NE)
4740 && op0code == XOR
4741 && rtx_equal_p (XEXP (op0, 1), op1)
4742 && !side_effects_p (XEXP (op0, 1)))
4743 return simplify_gen_relational (code, mode, cmp_mode,
4744 XEXP (op0, 0), const0_rtx);
4746 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4747 if ((code == EQ || code == NE)
4748 && op0code == XOR
4749 && CONST_SCALAR_INT_P (op1)
4750 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4751 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4752 simplify_gen_binary (XOR, cmp_mode,
4753 XEXP (op0, 1), op1));
4755 if (op0code == POPCOUNT && op1 == const0_rtx)
4756 switch (code)
4758 case EQ:
4759 case LE:
4760 case LEU:
4761 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4762 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4763 XEXP (op0, 0), const0_rtx);
4765 case NE:
4766 case GT:
4767 case GTU:
4768 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4769 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4770 XEXP (op0, 0), const0_rtx);
4772 default:
4773 break;
4776 return NULL_RTX;
4779 enum
4781 CMP_EQ = 1,
4782 CMP_LT = 2,
4783 CMP_GT = 4,
4784 CMP_LTU = 8,
4785 CMP_GTU = 16
4789 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4790 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4791 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4792 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4793 For floating-point comparisons, assume that the operands were ordered. */
4795 static rtx
4796 comparison_result (enum rtx_code code, int known_results)
4798 switch (code)
4800 case EQ:
4801 case UNEQ:
4802 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4803 case NE:
4804 case LTGT:
4805 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4807 case LT:
4808 case UNLT:
4809 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4810 case GE:
4811 case UNGE:
4812 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4814 case GT:
4815 case UNGT:
4816 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4817 case LE:
4818 case UNLE:
4819 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4821 case LTU:
4822 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4823 case GEU:
4824 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4826 case GTU:
4827 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4828 case LEU:
4829 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4831 case ORDERED:
4832 return const_true_rtx;
4833 case UNORDERED:
4834 return const0_rtx;
4835 default:
4836 gcc_unreachable ();
4840 /* Check if the given comparison (done in the given MODE) is actually a
4841 tautology or a contradiction.
4842 If no simplification is possible, this function returns zero.
4843 Otherwise, it returns either const_true_rtx or const0_rtx. */
4846 simplify_const_relational_operation (enum rtx_code code,
4847 enum machine_mode mode,
4848 rtx op0, rtx op1)
4850 rtx tem;
4851 rtx trueop0;
4852 rtx trueop1;
4854 gcc_assert (mode != VOIDmode
4855 || (GET_MODE (op0) == VOIDmode
4856 && GET_MODE (op1) == VOIDmode));
4858 /* If op0 is a compare, extract the comparison arguments from it. */
4859 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4861 op1 = XEXP (op0, 1);
4862 op0 = XEXP (op0, 0);
4864 if (GET_MODE (op0) != VOIDmode)
4865 mode = GET_MODE (op0);
4866 else if (GET_MODE (op1) != VOIDmode)
4867 mode = GET_MODE (op1);
4868 else
4869 return 0;
4872 /* We can't simplify MODE_CC values since we don't know what the
4873 actual comparison is. */
4874 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4875 return 0;
4877 /* Make sure the constant is second. */
4878 if (swap_commutative_operands_p (op0, op1))
4880 tem = op0, op0 = op1, op1 = tem;
4881 code = swap_condition (code);
4884 trueop0 = avoid_constant_pool_reference (op0);
4885 trueop1 = avoid_constant_pool_reference (op1);
4887 /* For integer comparisons of A and B maybe we can simplify A - B and can
4888 then simplify a comparison of that with zero. If A and B are both either
4889 a register or a CONST_INT, this can't help; testing for these cases will
4890 prevent infinite recursion here and speed things up.
4892 We can only do this for EQ and NE comparisons as otherwise we may
4893 lose or introduce overflow which we cannot disregard as undefined as
4894 we do not know the signedness of the operation on either the left or
4895 the right hand side of the comparison. */
4897 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4898 && (code == EQ || code == NE)
4899 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4900 && (REG_P (op1) || CONST_INT_P (trueop1)))
4901 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4902 /* We cannot do this if tem is a nonzero address. */
4903 && ! nonzero_address_p (tem))
4904 return simplify_const_relational_operation (signed_condition (code),
4905 mode, tem, const0_rtx);
4907 if (! HONOR_NANS (mode) && code == ORDERED)
4908 return const_true_rtx;
4910 if (! HONOR_NANS (mode) && code == UNORDERED)
4911 return const0_rtx;
4913 /* For modes without NaNs, if the two operands are equal, we know the
4914 result except if they have side-effects. Even with NaNs we know
4915 the result of unordered comparisons and, if signaling NaNs are
4916 irrelevant, also the result of LT/GT/LTGT. */
4917 if ((! HONOR_NANS (GET_MODE (trueop0))
4918 || code == UNEQ || code == UNLE || code == UNGE
4919 || ((code == LT || code == GT || code == LTGT)
4920 && ! HONOR_SNANS (GET_MODE (trueop0))))
4921 && rtx_equal_p (trueop0, trueop1)
4922 && ! side_effects_p (trueop0))
4923 return comparison_result (code, CMP_EQ);
4925 /* If the operands are floating-point constants, see if we can fold
4926 the result. */
4927 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4928 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4929 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4931 REAL_VALUE_TYPE d0, d1;
4933 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4934 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4936 /* Comparisons are unordered iff at least one of the values is NaN. */
4937 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4938 switch (code)
4940 case UNEQ:
4941 case UNLT:
4942 case UNGT:
4943 case UNLE:
4944 case UNGE:
4945 case NE:
4946 case UNORDERED:
4947 return const_true_rtx;
4948 case EQ:
4949 case LT:
4950 case GT:
4951 case LE:
4952 case GE:
4953 case LTGT:
4954 case ORDERED:
4955 return const0_rtx;
4956 default:
4957 return 0;
4960 return comparison_result (code,
4961 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4962 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4965 /* Otherwise, see if the operands are both integers. */
4966 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4967 && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
4968 && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
4970 int width = GET_MODE_PRECISION (mode);
4971 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4972 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4974 /* Get the two words comprising each integer constant. */
4975 if (CONST_DOUBLE_AS_INT_P (trueop0))
4977 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4978 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4980 else
4982 l0u = l0s = INTVAL (trueop0);
4983 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4986 if (CONST_DOUBLE_AS_INT_P (trueop1))
4988 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4989 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4991 else
4993 l1u = l1s = INTVAL (trueop1);
4994 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4997 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4998 we have to sign or zero-extend the values. */
4999 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
5001 l0u &= GET_MODE_MASK (mode);
5002 l1u &= GET_MODE_MASK (mode);
5004 if (val_signbit_known_set_p (mode, l0s))
5005 l0s |= ~GET_MODE_MASK (mode);
5007 if (val_signbit_known_set_p (mode, l1s))
5008 l1s |= ~GET_MODE_MASK (mode);
5010 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
5011 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
5013 if (h0u == h1u && l0u == l1u)
5014 return comparison_result (code, CMP_EQ);
5015 else
5017 int cr;
5018 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
5019 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
5020 return comparison_result (code, cr);
5024 /* Optimize comparisons with upper and lower bounds. */
5025 if (HWI_COMPUTABLE_MODE_P (mode)
5026 && CONST_INT_P (trueop1))
5028 int sign;
5029 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5030 HOST_WIDE_INT val = INTVAL (trueop1);
5031 HOST_WIDE_INT mmin, mmax;
5033 if (code == GEU
5034 || code == LEU
5035 || code == GTU
5036 || code == LTU)
5037 sign = 0;
5038 else
5039 sign = 1;
5041 /* Get a reduced range if the sign bit is zero. */
5042 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5044 mmin = 0;
5045 mmax = nonzero;
5047 else
5049 rtx mmin_rtx, mmax_rtx;
5050 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5052 mmin = INTVAL (mmin_rtx);
5053 mmax = INTVAL (mmax_rtx);
5054 if (sign)
5056 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5058 mmin >>= (sign_copies - 1);
5059 mmax >>= (sign_copies - 1);
5063 switch (code)
5065 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5066 case GEU:
5067 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5068 return const_true_rtx;
5069 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5070 return const0_rtx;
5071 break;
5072 case GE:
5073 if (val <= mmin)
5074 return const_true_rtx;
5075 if (val > mmax)
5076 return const0_rtx;
5077 break;
5079 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5080 case LEU:
5081 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5082 return const_true_rtx;
5083 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5084 return const0_rtx;
5085 break;
5086 case LE:
5087 if (val >= mmax)
5088 return const_true_rtx;
5089 if (val < mmin)
5090 return const0_rtx;
5091 break;
5093 case EQ:
5094 /* x == y is always false for y out of range. */
5095 if (val < mmin || val > mmax)
5096 return const0_rtx;
5097 break;
5099 /* x > y is always false for y >= mmax, always true for y < mmin. */
5100 case GTU:
5101 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5102 return const0_rtx;
5103 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5104 return const_true_rtx;
5105 break;
5106 case GT:
5107 if (val >= mmax)
5108 return const0_rtx;
5109 if (val < mmin)
5110 return const_true_rtx;
5111 break;
5113 /* x < y is always false for y <= mmin, always true for y > mmax. */
5114 case LTU:
5115 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5116 return const0_rtx;
5117 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5118 return const_true_rtx;
5119 break;
5120 case LT:
5121 if (val <= mmin)
5122 return const0_rtx;
5123 if (val > mmax)
5124 return const_true_rtx;
5125 break;
5127 case NE:
5128 /* x != y is always true for y out of range. */
5129 if (val < mmin || val > mmax)
5130 return const_true_rtx;
5131 break;
5133 default:
5134 break;
5138 /* Optimize integer comparisons with zero. */
5139 if (trueop1 == const0_rtx)
5141 /* Some addresses are known to be nonzero. We don't know
5142 their sign, but equality comparisons are known. */
5143 if (nonzero_address_p (trueop0))
5145 if (code == EQ || code == LEU)
5146 return const0_rtx;
5147 if (code == NE || code == GTU)
5148 return const_true_rtx;
5151 /* See if the first operand is an IOR with a constant. If so, we
5152 may be able to determine the result of this comparison. */
5153 if (GET_CODE (op0) == IOR)
5155 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5156 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5158 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5159 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5160 && (UINTVAL (inner_const)
5161 & ((unsigned HOST_WIDE_INT) 1
5162 << sign_bitnum)));
5164 switch (code)
5166 case EQ:
5167 case LEU:
5168 return const0_rtx;
5169 case NE:
5170 case GTU:
5171 return const_true_rtx;
5172 case LT:
5173 case LE:
5174 if (has_sign)
5175 return const_true_rtx;
5176 break;
5177 case GT:
5178 case GE:
5179 if (has_sign)
5180 return const0_rtx;
5181 break;
5182 default:
5183 break;
5189 /* Optimize comparison of ABS with zero. */
5190 if (trueop1 == CONST0_RTX (mode)
5191 && (GET_CODE (trueop0) == ABS
5192 || (GET_CODE (trueop0) == FLOAT_EXTEND
5193 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5195 switch (code)
5197 case LT:
5198 /* Optimize abs(x) < 0.0. */
5199 if (!HONOR_SNANS (mode)
5200 && (!INTEGRAL_MODE_P (mode)
5201 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5203 if (INTEGRAL_MODE_P (mode)
5204 && (issue_strict_overflow_warning
5205 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5206 warning (OPT_Wstrict_overflow,
5207 ("assuming signed overflow does not occur when "
5208 "assuming abs (x) < 0 is false"));
5209 return const0_rtx;
5211 break;
5213 case GE:
5214 /* Optimize abs(x) >= 0.0. */
5215 if (!HONOR_NANS (mode)
5216 && (!INTEGRAL_MODE_P (mode)
5217 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5219 if (INTEGRAL_MODE_P (mode)
5220 && (issue_strict_overflow_warning
5221 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5222 warning (OPT_Wstrict_overflow,
5223 ("assuming signed overflow does not occur when "
5224 "assuming abs (x) >= 0 is true"));
5225 return const_true_rtx;
5227 break;
5229 case UNGE:
5230 /* Optimize ! (abs(x) < 0.0). */
5231 return const_true_rtx;
5233 default:
5234 break;
5238 return 0;
5241 /* Simplify CODE, an operation with result mode MODE and three operands,
5242 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5243 a constant. Return 0 if no simplifications is possible. */
5246 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
5247 enum machine_mode op0_mode, rtx op0, rtx op1,
5248 rtx op2)
5250 unsigned int width = GET_MODE_PRECISION (mode);
5251 bool any_change = false;
5252 rtx tem, trueop2;
5254 /* VOIDmode means "infinite" precision. */
5255 if (width == 0)
5256 width = HOST_BITS_PER_WIDE_INT;
5258 switch (code)
5260 case FMA:
5261 /* Simplify negations around the multiplication. */
5262 /* -a * -b + c => a * b + c. */
5263 if (GET_CODE (op0) == NEG)
5265 tem = simplify_unary_operation (NEG, mode, op1, mode);
5266 if (tem)
5267 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5269 else if (GET_CODE (op1) == NEG)
5271 tem = simplify_unary_operation (NEG, mode, op0, mode);
5272 if (tem)
5273 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5276 /* Canonicalize the two multiplication operands. */
5277 /* a * -b + c => -b * a + c. */
5278 if (swap_commutative_operands_p (op0, op1))
5279 tem = op0, op0 = op1, op1 = tem, any_change = true;
5281 if (any_change)
5282 return gen_rtx_FMA (mode, op0, op1, op2);
5283 return NULL_RTX;
5285 case SIGN_EXTRACT:
5286 case ZERO_EXTRACT:
5287 if (CONST_INT_P (op0)
5288 && CONST_INT_P (op1)
5289 && CONST_INT_P (op2)
5290 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5291 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5293 /* Extracting a bit-field from a constant */
5294 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5295 HOST_WIDE_INT op1val = INTVAL (op1);
5296 HOST_WIDE_INT op2val = INTVAL (op2);
5297 if (BITS_BIG_ENDIAN)
5298 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5299 else
5300 val >>= op2val;
5302 if (HOST_BITS_PER_WIDE_INT != op1val)
5304 /* First zero-extend. */
5305 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5306 /* If desired, propagate sign bit. */
5307 if (code == SIGN_EXTRACT
5308 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5309 != 0)
5310 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5313 return gen_int_mode (val, mode);
5315 break;
5317 case IF_THEN_ELSE:
5318 if (CONST_INT_P (op0))
5319 return op0 != const0_rtx ? op1 : op2;
5321 /* Convert c ? a : a into "a". */
5322 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5323 return op1;
5325 /* Convert a != b ? a : b into "a". */
5326 if (GET_CODE (op0) == NE
5327 && ! side_effects_p (op0)
5328 && ! HONOR_NANS (mode)
5329 && ! HONOR_SIGNED_ZEROS (mode)
5330 && ((rtx_equal_p (XEXP (op0, 0), op1)
5331 && rtx_equal_p (XEXP (op0, 1), op2))
5332 || (rtx_equal_p (XEXP (op0, 0), op2)
5333 && rtx_equal_p (XEXP (op0, 1), op1))))
5334 return op1;
5336 /* Convert a == b ? a : b into "b". */
5337 if (GET_CODE (op0) == EQ
5338 && ! side_effects_p (op0)
5339 && ! HONOR_NANS (mode)
5340 && ! HONOR_SIGNED_ZEROS (mode)
5341 && ((rtx_equal_p (XEXP (op0, 0), op1)
5342 && rtx_equal_p (XEXP (op0, 1), op2))
5343 || (rtx_equal_p (XEXP (op0, 0), op2)
5344 && rtx_equal_p (XEXP (op0, 1), op1))))
5345 return op2;
5347 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5349 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5350 ? GET_MODE (XEXP (op0, 1))
5351 : GET_MODE (XEXP (op0, 0)));
5352 rtx temp;
5354 /* Look for happy constants in op1 and op2. */
5355 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5357 HOST_WIDE_INT t = INTVAL (op1);
5358 HOST_WIDE_INT f = INTVAL (op2);
5360 if (t == STORE_FLAG_VALUE && f == 0)
5361 code = GET_CODE (op0);
5362 else if (t == 0 && f == STORE_FLAG_VALUE)
5364 enum rtx_code tmp;
5365 tmp = reversed_comparison_code (op0, NULL_RTX);
5366 if (tmp == UNKNOWN)
5367 break;
5368 code = tmp;
5370 else
5371 break;
5373 return simplify_gen_relational (code, mode, cmp_mode,
5374 XEXP (op0, 0), XEXP (op0, 1));
5377 if (cmp_mode == VOIDmode)
5378 cmp_mode = op0_mode;
5379 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5380 cmp_mode, XEXP (op0, 0),
5381 XEXP (op0, 1));
5383 /* See if any simplifications were possible. */
5384 if (temp)
5386 if (CONST_INT_P (temp))
5387 return temp == const0_rtx ? op2 : op1;
5388 else if (temp)
5389 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5392 break;
5394 case VEC_MERGE:
5395 gcc_assert (GET_MODE (op0) == mode);
5396 gcc_assert (GET_MODE (op1) == mode);
5397 gcc_assert (VECTOR_MODE_P (mode));
5398 trueop2 = avoid_constant_pool_reference (op2);
5399 if (CONST_INT_P (trueop2))
5401 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5402 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5403 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5404 unsigned HOST_WIDE_INT mask;
5405 if (n_elts == HOST_BITS_PER_WIDE_INT)
5406 mask = -1;
5407 else
5408 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5410 if (!(sel & mask) && !side_effects_p (op0))
5411 return op1;
5412 if ((sel & mask) == mask && !side_effects_p (op1))
5413 return op0;
5415 rtx trueop0 = avoid_constant_pool_reference (op0);
5416 rtx trueop1 = avoid_constant_pool_reference (op1);
5417 if (GET_CODE (trueop0) == CONST_VECTOR
5418 && GET_CODE (trueop1) == CONST_VECTOR)
5420 rtvec v = rtvec_alloc (n_elts);
5421 unsigned int i;
5423 for (i = 0; i < n_elts; i++)
5424 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5425 ? CONST_VECTOR_ELT (trueop0, i)
5426 : CONST_VECTOR_ELT (trueop1, i));
5427 return gen_rtx_CONST_VECTOR (mode, v);
5430 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5431 if no element from a appears in the result. */
5432 if (GET_CODE (op0) == VEC_MERGE)
5434 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5435 if (CONST_INT_P (tem))
5437 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5438 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5439 return simplify_gen_ternary (code, mode, mode,
5440 XEXP (op0, 1), op1, op2);
5441 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5442 return simplify_gen_ternary (code, mode, mode,
5443 XEXP (op0, 0), op1, op2);
5446 if (GET_CODE (op1) == VEC_MERGE)
5448 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5449 if (CONST_INT_P (tem))
5451 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5452 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5453 return simplify_gen_ternary (code, mode, mode,
5454 op0, XEXP (op1, 1), op2);
5455 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5456 return simplify_gen_ternary (code, mode, mode,
5457 op0, XEXP (op1, 0), op2);
5462 if (rtx_equal_p (op0, op1)
5463 && !side_effects_p (op2) && !side_effects_p (op1))
5464 return op0;
5466 break;
5468 default:
5469 gcc_unreachable ();
5472 return 0;
5475 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5476 or CONST_VECTOR,
5477 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5479 Works by unpacking OP into a collection of 8-bit values
5480 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5481 and then repacking them again for OUTERMODE. */
5483 static rtx
5484 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5485 enum machine_mode innermode, unsigned int byte)
5487 /* We support up to 512-bit values (for V8DFmode). */
5488 enum {
5489 max_bitsize = 512,
5490 value_bit = 8,
5491 value_mask = (1 << value_bit) - 1
5493 unsigned char value[max_bitsize / value_bit];
5494 int value_start;
5495 int i;
5496 int elem;
5498 int num_elem;
5499 rtx * elems;
5500 int elem_bitsize;
5501 rtx result_s;
5502 rtvec result_v = NULL;
5503 enum mode_class outer_class;
5504 enum machine_mode outer_submode;
5506 /* Some ports misuse CCmode. */
5507 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5508 return op;
5510 /* We have no way to represent a complex constant at the rtl level. */
5511 if (COMPLEX_MODE_P (outermode))
5512 return NULL_RTX;
5514 /* Unpack the value. */
5516 if (GET_CODE (op) == CONST_VECTOR)
5518 num_elem = CONST_VECTOR_NUNITS (op);
5519 elems = &CONST_VECTOR_ELT (op, 0);
5520 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5522 else
5524 num_elem = 1;
5525 elems = &op;
5526 elem_bitsize = max_bitsize;
5528 /* If this asserts, it is too complicated; reducing value_bit may help. */
5529 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5530 /* I don't know how to handle endianness of sub-units. */
5531 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5533 for (elem = 0; elem < num_elem; elem++)
5535 unsigned char * vp;
5536 rtx el = elems[elem];
5538 /* Vectors are kept in target memory order. (This is probably
5539 a mistake.) */
5541 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5542 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5543 / BITS_PER_UNIT);
5544 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5545 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5546 unsigned bytele = (subword_byte % UNITS_PER_WORD
5547 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5548 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5551 switch (GET_CODE (el))
5553 case CONST_INT:
5554 for (i = 0;
5555 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5556 i += value_bit)
5557 *vp++ = INTVAL (el) >> i;
5558 /* CONST_INTs are always logically sign-extended. */
5559 for (; i < elem_bitsize; i += value_bit)
5560 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5561 break;
5563 case CONST_DOUBLE:
5564 if (GET_MODE (el) == VOIDmode)
5566 unsigned char extend = 0;
5567 /* If this triggers, someone should have generated a
5568 CONST_INT instead. */
5569 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5571 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5572 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5573 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5575 *vp++
5576 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5577 i += value_bit;
5580 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5581 extend = -1;
5582 for (; i < elem_bitsize; i += value_bit)
5583 *vp++ = extend;
5585 else
5587 long tmp[max_bitsize / 32];
5588 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5590 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5591 gcc_assert (bitsize <= elem_bitsize);
5592 gcc_assert (bitsize % value_bit == 0);
5594 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5595 GET_MODE (el));
5597 /* real_to_target produces its result in words affected by
5598 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5599 and use WORDS_BIG_ENDIAN instead; see the documentation
5600 of SUBREG in rtl.texi. */
5601 for (i = 0; i < bitsize; i += value_bit)
5603 int ibase;
5604 if (WORDS_BIG_ENDIAN)
5605 ibase = bitsize - 1 - i;
5606 else
5607 ibase = i;
5608 *vp++ = tmp[ibase / 32] >> i % 32;
5611 /* It shouldn't matter what's done here, so fill it with
5612 zero. */
5613 for (; i < elem_bitsize; i += value_bit)
5614 *vp++ = 0;
5616 break;
5618 case CONST_FIXED:
5619 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5621 for (i = 0; i < elem_bitsize; i += value_bit)
5622 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5624 else
5626 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5627 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5628 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5629 i += value_bit)
5630 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5631 >> (i - HOST_BITS_PER_WIDE_INT);
5632 for (; i < elem_bitsize; i += value_bit)
5633 *vp++ = 0;
5635 break;
5637 default:
5638 gcc_unreachable ();
5642 /* Now, pick the right byte to start with. */
5643 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5644 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5645 will already have offset 0. */
5646 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5648 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5649 - byte);
5650 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5651 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5652 byte = (subword_byte % UNITS_PER_WORD
5653 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5656 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5657 so if it's become negative it will instead be very large.) */
5658 gcc_assert (byte < GET_MODE_SIZE (innermode));
5660 /* Convert from bytes to chunks of size value_bit. */
5661 value_start = byte * (BITS_PER_UNIT / value_bit);
5663 /* Re-pack the value. */
5665 if (VECTOR_MODE_P (outermode))
5667 num_elem = GET_MODE_NUNITS (outermode);
5668 result_v = rtvec_alloc (num_elem);
5669 elems = &RTVEC_ELT (result_v, 0);
5670 outer_submode = GET_MODE_INNER (outermode);
5672 else
5674 num_elem = 1;
5675 elems = &result_s;
5676 outer_submode = outermode;
5679 outer_class = GET_MODE_CLASS (outer_submode);
5680 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5682 gcc_assert (elem_bitsize % value_bit == 0);
5683 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5685 for (elem = 0; elem < num_elem; elem++)
5687 unsigned char *vp;
5689 /* Vectors are stored in target memory order. (This is probably
5690 a mistake.) */
5692 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5693 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5694 / BITS_PER_UNIT);
5695 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5696 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5697 unsigned bytele = (subword_byte % UNITS_PER_WORD
5698 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5699 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5702 switch (outer_class)
5704 case MODE_INT:
5705 case MODE_PARTIAL_INT:
5707 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5709 for (i = 0;
5710 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5711 i += value_bit)
5712 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5713 for (; i < elem_bitsize; i += value_bit)
5714 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5715 << (i - HOST_BITS_PER_WIDE_INT);
5717 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5718 know why. */
5719 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5720 elems[elem] = gen_int_mode (lo, outer_submode);
5721 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5722 elems[elem] = immed_double_const (lo, hi, outer_submode);
5723 else
5724 return NULL_RTX;
5726 break;
5728 case MODE_FLOAT:
5729 case MODE_DECIMAL_FLOAT:
5731 REAL_VALUE_TYPE r;
5732 long tmp[max_bitsize / 32];
5734 /* real_from_target wants its input in words affected by
5735 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5736 and use WORDS_BIG_ENDIAN instead; see the documentation
5737 of SUBREG in rtl.texi. */
5738 for (i = 0; i < max_bitsize / 32; i++)
5739 tmp[i] = 0;
5740 for (i = 0; i < elem_bitsize; i += value_bit)
5742 int ibase;
5743 if (WORDS_BIG_ENDIAN)
5744 ibase = elem_bitsize - 1 - i;
5745 else
5746 ibase = i;
5747 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5750 real_from_target (&r, tmp, outer_submode);
5751 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5753 break;
5755 case MODE_FRACT:
5756 case MODE_UFRACT:
5757 case MODE_ACCUM:
5758 case MODE_UACCUM:
5760 FIXED_VALUE_TYPE f;
5761 f.data.low = 0;
5762 f.data.high = 0;
5763 f.mode = outer_submode;
5765 for (i = 0;
5766 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5767 i += value_bit)
5768 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5769 for (; i < elem_bitsize; i += value_bit)
5770 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5771 << (i - HOST_BITS_PER_WIDE_INT));
5773 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5775 break;
5777 default:
5778 gcc_unreachable ();
5781 if (VECTOR_MODE_P (outermode))
5782 return gen_rtx_CONST_VECTOR (outermode, result_v);
5783 else
5784 return result_s;
5787 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5788 Return 0 if no simplifications are possible. */
5790 simplify_subreg (enum machine_mode outermode, rtx op,
5791 enum machine_mode innermode, unsigned int byte)
5793 /* Little bit of sanity checking. */
5794 gcc_assert (innermode != VOIDmode);
5795 gcc_assert (outermode != VOIDmode);
5796 gcc_assert (innermode != BLKmode);
5797 gcc_assert (outermode != BLKmode);
5799 gcc_assert (GET_MODE (op) == innermode
5800 || GET_MODE (op) == VOIDmode);
5802 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5803 return NULL_RTX;
5805 if (byte >= GET_MODE_SIZE (innermode))
5806 return NULL_RTX;
5808 if (outermode == innermode && !byte)
5809 return op;
5811 if (CONST_SCALAR_INT_P (op)
5812 || CONST_DOUBLE_AS_FLOAT_P (op)
5813 || GET_CODE (op) == CONST_FIXED
5814 || GET_CODE (op) == CONST_VECTOR)
5815 return simplify_immed_subreg (outermode, op, innermode, byte);
5817 /* Changing mode twice with SUBREG => just change it once,
5818 or not at all if changing back op starting mode. */
5819 if (GET_CODE (op) == SUBREG)
5821 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5822 int final_offset = byte + SUBREG_BYTE (op);
5823 rtx newx;
5825 if (outermode == innermostmode
5826 && byte == 0 && SUBREG_BYTE (op) == 0)
5827 return SUBREG_REG (op);
5829 /* The SUBREG_BYTE represents offset, as if the value were stored
5830 in memory. Irritating exception is paradoxical subreg, where
5831 we define SUBREG_BYTE to be 0. On big endian machines, this
5832 value should be negative. For a moment, undo this exception. */
5833 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5835 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5836 if (WORDS_BIG_ENDIAN)
5837 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5838 if (BYTES_BIG_ENDIAN)
5839 final_offset += difference % UNITS_PER_WORD;
5841 if (SUBREG_BYTE (op) == 0
5842 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5844 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5845 if (WORDS_BIG_ENDIAN)
5846 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5847 if (BYTES_BIG_ENDIAN)
5848 final_offset += difference % UNITS_PER_WORD;
5851 /* See whether resulting subreg will be paradoxical. */
5852 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5854 /* In nonparadoxical subregs we can't handle negative offsets. */
5855 if (final_offset < 0)
5856 return NULL_RTX;
5857 /* Bail out in case resulting subreg would be incorrect. */
5858 if (final_offset % GET_MODE_SIZE (outermode)
5859 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5860 return NULL_RTX;
5862 else
5864 int offset = 0;
5865 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5867 /* In paradoxical subreg, see if we are still looking on lower part.
5868 If so, our SUBREG_BYTE will be 0. */
5869 if (WORDS_BIG_ENDIAN)
5870 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5871 if (BYTES_BIG_ENDIAN)
5872 offset += difference % UNITS_PER_WORD;
5873 if (offset == final_offset)
5874 final_offset = 0;
5875 else
5876 return NULL_RTX;
5879 /* Recurse for further possible simplifications. */
5880 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5881 final_offset);
5882 if (newx)
5883 return newx;
5884 if (validate_subreg (outermode, innermostmode,
5885 SUBREG_REG (op), final_offset))
5887 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5888 if (SUBREG_PROMOTED_VAR_P (op)
5889 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5890 && GET_MODE_CLASS (outermode) == MODE_INT
5891 && IN_RANGE (GET_MODE_SIZE (outermode),
5892 GET_MODE_SIZE (innermode),
5893 GET_MODE_SIZE (innermostmode))
5894 && subreg_lowpart_p (newx))
5896 SUBREG_PROMOTED_VAR_P (newx) = 1;
5897 SUBREG_PROMOTED_UNSIGNED_SET
5898 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5900 return newx;
5902 return NULL_RTX;
5905 /* SUBREG of a hard register => just change the register number
5906 and/or mode. If the hard register is not valid in that mode,
5907 suppress this simplification. If the hard register is the stack,
5908 frame, or argument pointer, leave this as a SUBREG. */
5910 if (REG_P (op) && HARD_REGISTER_P (op))
5912 unsigned int regno, final_regno;
5914 regno = REGNO (op);
5915 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5916 if (HARD_REGISTER_NUM_P (final_regno))
5918 rtx x;
5919 int final_offset = byte;
5921 /* Adjust offset for paradoxical subregs. */
5922 if (byte == 0
5923 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5925 int difference = (GET_MODE_SIZE (innermode)
5926 - GET_MODE_SIZE (outermode));
5927 if (WORDS_BIG_ENDIAN)
5928 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5929 if (BYTES_BIG_ENDIAN)
5930 final_offset += difference % UNITS_PER_WORD;
5933 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5935 /* Propagate original regno. We don't have any way to specify
5936 the offset inside original regno, so do so only for lowpart.
5937 The information is used only by alias analysis that can not
5938 grog partial register anyway. */
5940 if (subreg_lowpart_offset (outermode, innermode) == byte)
5941 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5942 return x;
5946 /* If we have a SUBREG of a register that we are replacing and we are
5947 replacing it with a MEM, make a new MEM and try replacing the
5948 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5949 or if we would be widening it. */
5951 if (MEM_P (op)
5952 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5953 /* Allow splitting of volatile memory references in case we don't
5954 have instruction to move the whole thing. */
5955 && (! MEM_VOLATILE_P (op)
5956 || ! have_insn_for (SET, innermode))
5957 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5958 return adjust_address_nv (op, outermode, byte);
5960 /* Handle complex values represented as CONCAT
5961 of real and imaginary part. */
5962 if (GET_CODE (op) == CONCAT)
5964 unsigned int part_size, final_offset;
5965 rtx part, res;
5967 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5968 if (byte < part_size)
5970 part = XEXP (op, 0);
5971 final_offset = byte;
5973 else
5975 part = XEXP (op, 1);
5976 final_offset = byte - part_size;
5979 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5980 return NULL_RTX;
5982 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5983 if (res)
5984 return res;
5985 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5986 return gen_rtx_SUBREG (outermode, part, final_offset);
5987 return NULL_RTX;
5990 /* A SUBREG resulting from a zero extension may fold to zero if
5991 it extracts higher bits that the ZERO_EXTEND's source bits. */
5992 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5994 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5995 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5996 return CONST0_RTX (outermode);
5999 if (SCALAR_INT_MODE_P (outermode)
6000 && SCALAR_INT_MODE_P (innermode)
6001 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6002 && byte == subreg_lowpart_offset (outermode, innermode))
6004 rtx tem = simplify_truncation (outermode, op, innermode);
6005 if (tem)
6006 return tem;
6009 return NULL_RTX;
6012 /* Make a SUBREG operation or equivalent if it folds. */
6015 simplify_gen_subreg (enum machine_mode outermode, rtx op,
6016 enum machine_mode innermode, unsigned int byte)
6018 rtx newx;
6020 newx = simplify_subreg (outermode, op, innermode, byte);
6021 if (newx)
6022 return newx;
6024 if (GET_CODE (op) == SUBREG
6025 || GET_CODE (op) == CONCAT
6026 || GET_MODE (op) == VOIDmode)
6027 return NULL_RTX;
6029 if (validate_subreg (outermode, innermode, op, byte))
6030 return gen_rtx_SUBREG (outermode, op, byte);
6032 return NULL_RTX;
6035 /* Simplify X, an rtx expression.
6037 Return the simplified expression or NULL if no simplifications
6038 were possible.
6040 This is the preferred entry point into the simplification routines;
6041 however, we still allow passes to call the more specific routines.
6043 Right now GCC has three (yes, three) major bodies of RTL simplification
6044 code that need to be unified.
6046 1. fold_rtx in cse.c. This code uses various CSE specific
6047 information to aid in RTL simplification.
6049 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6050 it uses combine specific information to aid in RTL
6051 simplification.
6053 3. The routines in this file.
6056 Long term we want to only have one body of simplification code; to
6057 get to that state I recommend the following steps:
6059 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6060 which are not pass dependent state into these routines.
6062 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6063 use this routine whenever possible.
6065 3. Allow for pass dependent state to be provided to these
6066 routines and add simplifications based on the pass dependent
6067 state. Remove code from cse.c & combine.c that becomes
6068 redundant/dead.
6070 It will take time, but ultimately the compiler will be easier to
6071 maintain and improve. It's totally silly that when we add a
6072 simplification that it needs to be added to 4 places (3 for RTL
6073 simplification and 1 for tree simplification. */
6076 simplify_rtx (const_rtx x)
6078 const enum rtx_code code = GET_CODE (x);
6079 const enum machine_mode mode = GET_MODE (x);
6081 switch (GET_RTX_CLASS (code))
6083 case RTX_UNARY:
6084 return simplify_unary_operation (code, mode,
6085 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6086 case RTX_COMM_ARITH:
6087 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6088 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6090 /* Fall through.... */
6092 case RTX_BIN_ARITH:
6093 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6095 case RTX_TERNARY:
6096 case RTX_BITFIELD_OPS:
6097 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6098 XEXP (x, 0), XEXP (x, 1),
6099 XEXP (x, 2));
6101 case RTX_COMPARE:
6102 case RTX_COMM_COMPARE:
6103 return simplify_relational_operation (code, mode,
6104 ((GET_MODE (XEXP (x, 0))
6105 != VOIDmode)
6106 ? GET_MODE (XEXP (x, 0))
6107 : GET_MODE (XEXP (x, 1))),
6108 XEXP (x, 0),
6109 XEXP (x, 1));
6111 case RTX_EXTRA:
6112 if (code == SUBREG)
6113 return simplify_subreg (mode, SUBREG_REG (x),
6114 GET_MODE (SUBREG_REG (x)),
6115 SUBREG_BYTE (x));
6116 break;
6118 case RTX_OBJ:
6119 if (code == LO_SUM)
6121 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6122 if (GET_CODE (XEXP (x, 0)) == HIGH
6123 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6124 return XEXP (x, 1);
6126 break;
6128 default:
6129 break;
6131 return NULL;