* gcc.dg/guality/guality.exp: Skip on AIX.
[official-gcc.git] / gcc / simplify-rtx.c
blob17a3d12e076e52cd8c68278d83168fd96fbbf210
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "recog.h"
33 #include "function.h"
34 #include "expr.h"
35 #include "diagnostic-core.h"
36 #include "ggc.h"
37 #include "target.h"
39 /* Simplification and canonicalization of RTL. */
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
44 signed wide int. */
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
48 static rtx neg_const_int (enum machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
51 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
52 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
53 unsigned int);
54 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
55 rtx, rtx);
56 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
57 enum machine_mode, rtx, rtx);
58 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
59 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
60 rtx, rtx, rtx, rtx);
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
64 static rtx
65 neg_const_int (enum machine_mode mode, const_rtx i)
67 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
70 /* Test whether expression, X, is an immediate constant that represents
71 the most significant bit of machine mode MODE. */
73 bool
74 mode_signbit_p (enum machine_mode mode, const_rtx x)
76 unsigned HOST_WIDE_INT val;
77 unsigned int width;
79 if (GET_MODE_CLASS (mode) != MODE_INT)
80 return false;
82 width = GET_MODE_PRECISION (mode);
83 if (width == 0)
84 return false;
86 if (width <= HOST_BITS_PER_WIDE_INT
87 && CONST_INT_P (x))
88 val = INTVAL (x);
89 else if (width <= HOST_BITS_PER_DOUBLE_INT
90 && CONST_DOUBLE_AS_INT_P (x)
91 && CONST_DOUBLE_LOW (x) == 0)
93 val = CONST_DOUBLE_HIGH (x);
94 width -= HOST_BITS_PER_WIDE_INT;
96 else
97 /* FIXME: We don't yet have a representation for wider modes. */
98 return false;
100 if (width < HOST_BITS_PER_WIDE_INT)
101 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
102 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
105 /* Test whether VAL is equal to the most significant bit of mode MODE
106 (after masking with the mode mask of MODE). Returns false if the
107 precision of MODE is too large to handle. */
109 bool
110 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
112 unsigned int width;
114 if (GET_MODE_CLASS (mode) != MODE_INT)
115 return false;
117 width = GET_MODE_PRECISION (mode);
118 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
119 return false;
121 val &= GET_MODE_MASK (mode);
122 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
125 /* Test whether the most significant bit of mode MODE is set in VAL.
126 Returns false if the precision of MODE is too large to handle. */
127 bool
128 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
130 unsigned int width;
132 if (GET_MODE_CLASS (mode) != MODE_INT)
133 return false;
135 width = GET_MODE_PRECISION (mode);
136 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
137 return false;
139 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
140 return val != 0;
143 /* Test whether the most significant bit of mode MODE is clear in VAL.
144 Returns false if the precision of MODE is too large to handle. */
145 bool
146 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
148 unsigned int width;
150 if (GET_MODE_CLASS (mode) != MODE_INT)
151 return false;
153 width = GET_MODE_PRECISION (mode);
154 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
155 return false;
157 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
158 return val == 0;
161 /* Make a binary operation by properly ordering the operands and
162 seeing if the expression folds. */
165 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
166 rtx op1)
168 rtx tem;
170 /* If this simplifies, do it. */
171 tem = simplify_binary_operation (code, mode, op0, op1);
172 if (tem)
173 return tem;
175 /* Put complex operands first and constants second if commutative. */
176 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
177 && swap_commutative_operands_p (op0, op1))
178 tem = op0, op0 = op1, op1 = tem;
180 return gen_rtx_fmt_ee (code, mode, op0, op1);
183 /* If X is a MEM referencing the constant pool, return the real value.
184 Otherwise return X. */
186 avoid_constant_pool_reference (rtx x)
188 rtx c, tmp, addr;
189 enum machine_mode cmode;
190 HOST_WIDE_INT offset = 0;
192 switch (GET_CODE (x))
194 case MEM:
195 break;
197 case FLOAT_EXTEND:
198 /* Handle float extensions of constant pool references. */
199 tmp = XEXP (x, 0);
200 c = avoid_constant_pool_reference (tmp);
201 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
203 REAL_VALUE_TYPE d;
205 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
206 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
208 return x;
210 default:
211 return x;
214 if (GET_MODE (x) == BLKmode)
215 return x;
217 addr = XEXP (x, 0);
219 /* Call target hook to avoid the effects of -fpic etc.... */
220 addr = targetm.delegitimize_address (addr);
222 /* Split the address into a base and integer offset. */
223 if (GET_CODE (addr) == CONST
224 && GET_CODE (XEXP (addr, 0)) == PLUS
225 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
227 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
228 addr = XEXP (XEXP (addr, 0), 0);
231 if (GET_CODE (addr) == LO_SUM)
232 addr = XEXP (addr, 1);
234 /* If this is a constant pool reference, we can turn it into its
235 constant and hope that simplifications happen. */
236 if (GET_CODE (addr) == SYMBOL_REF
237 && CONSTANT_POOL_ADDRESS_P (addr))
239 c = get_pool_constant (addr);
240 cmode = get_pool_mode (addr);
242 /* If we're accessing the constant in a different mode than it was
243 originally stored, attempt to fix that up via subreg simplifications.
244 If that fails we have no choice but to return the original memory. */
245 if ((offset != 0 || cmode != GET_MODE (x))
246 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
248 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
249 if (tem && CONSTANT_P (tem))
250 return tem;
252 else
253 return c;
256 return x;
259 /* Simplify a MEM based on its attributes. This is the default
260 delegitimize_address target hook, and it's recommended that every
261 overrider call it. */
264 delegitimize_mem_from_attrs (rtx x)
266 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
267 use their base addresses as equivalent. */
268 if (MEM_P (x)
269 && MEM_EXPR (x)
270 && MEM_OFFSET_KNOWN_P (x))
272 tree decl = MEM_EXPR (x);
273 enum machine_mode mode = GET_MODE (x);
274 HOST_WIDE_INT offset = 0;
276 switch (TREE_CODE (decl))
278 default:
279 decl = NULL;
280 break;
282 case VAR_DECL:
283 break;
285 case ARRAY_REF:
286 case ARRAY_RANGE_REF:
287 case COMPONENT_REF:
288 case BIT_FIELD_REF:
289 case REALPART_EXPR:
290 case IMAGPART_EXPR:
291 case VIEW_CONVERT_EXPR:
293 HOST_WIDE_INT bitsize, bitpos;
294 tree toffset;
295 int unsignedp, volatilep = 0;
297 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
298 &mode, &unsignedp, &volatilep, false);
299 if (bitsize != GET_MODE_BITSIZE (mode)
300 || (bitpos % BITS_PER_UNIT)
301 || (toffset && !host_integerp (toffset, 0)))
302 decl = NULL;
303 else
305 offset += bitpos / BITS_PER_UNIT;
306 if (toffset)
307 offset += TREE_INT_CST_LOW (toffset);
309 break;
313 if (decl
314 && mode == GET_MODE (x)
315 && TREE_CODE (decl) == VAR_DECL
316 && (TREE_STATIC (decl)
317 || DECL_THREAD_LOCAL_P (decl))
318 && DECL_RTL_SET_P (decl)
319 && MEM_P (DECL_RTL (decl)))
321 rtx newx;
323 offset += MEM_OFFSET (x);
325 newx = DECL_RTL (decl);
327 if (MEM_P (newx))
329 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
331 /* Avoid creating a new MEM needlessly if we already had
332 the same address. We do if there's no OFFSET and the
333 old address X is identical to NEWX, or if X is of the
334 form (plus NEWX OFFSET), or the NEWX is of the form
335 (plus Y (const_int Z)) and X is that with the offset
336 added: (plus Y (const_int Z+OFFSET)). */
337 if (!((offset == 0
338 || (GET_CODE (o) == PLUS
339 && GET_CODE (XEXP (o, 1)) == CONST_INT
340 && (offset == INTVAL (XEXP (o, 1))
341 || (GET_CODE (n) == PLUS
342 && GET_CODE (XEXP (n, 1)) == CONST_INT
343 && (INTVAL (XEXP (n, 1)) + offset
344 == INTVAL (XEXP (o, 1)))
345 && (n = XEXP (n, 0))))
346 && (o = XEXP (o, 0))))
347 && rtx_equal_p (o, n)))
348 x = adjust_address_nv (newx, mode, offset);
350 else if (GET_MODE (x) == GET_MODE (newx)
351 && offset == 0)
352 x = newx;
356 return x;
359 /* Make a unary operation by first seeing if it folds and otherwise making
360 the specified operation. */
363 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
364 enum machine_mode op_mode)
366 rtx tem;
368 /* If this simplifies, use it. */
369 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
370 return tem;
372 return gen_rtx_fmt_e (code, mode, op);
375 /* Likewise for ternary operations. */
378 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
379 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
381 rtx tem;
383 /* If this simplifies, use it. */
384 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
385 op0, op1, op2)))
386 return tem;
388 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
391 /* Likewise, for relational operations.
392 CMP_MODE specifies mode comparison is done in. */
395 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
396 enum machine_mode cmp_mode, rtx op0, rtx op1)
398 rtx tem;
400 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
401 op0, op1)))
402 return tem;
404 return gen_rtx_fmt_ee (code, mode, op0, op1);
407 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
408 and simplify the result. If FN is non-NULL, call this callback on each
409 X, if it returns non-NULL, replace X with its return value and simplify the
410 result. */
413 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
414 rtx (*fn) (rtx, const_rtx, void *), void *data)
416 enum rtx_code code = GET_CODE (x);
417 enum machine_mode mode = GET_MODE (x);
418 enum machine_mode op_mode;
419 const char *fmt;
420 rtx op0, op1, op2, newx, op;
421 rtvec vec, newvec;
422 int i, j;
424 if (__builtin_expect (fn != NULL, 0))
426 newx = fn (x, old_rtx, data);
427 if (newx)
428 return newx;
430 else if (rtx_equal_p (x, old_rtx))
431 return copy_rtx ((rtx) data);
433 switch (GET_RTX_CLASS (code))
435 case RTX_UNARY:
436 op0 = XEXP (x, 0);
437 op_mode = GET_MODE (op0);
438 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
439 if (op0 == XEXP (x, 0))
440 return x;
441 return simplify_gen_unary (code, mode, op0, op_mode);
443 case RTX_BIN_ARITH:
444 case RTX_COMM_ARITH:
445 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
446 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
447 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
448 return x;
449 return simplify_gen_binary (code, mode, op0, op1);
451 case RTX_COMPARE:
452 case RTX_COMM_COMPARE:
453 op0 = XEXP (x, 0);
454 op1 = XEXP (x, 1);
455 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
456 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
457 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
458 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
459 return x;
460 return simplify_gen_relational (code, mode, op_mode, op0, op1);
462 case RTX_TERNARY:
463 case RTX_BITFIELD_OPS:
464 op0 = XEXP (x, 0);
465 op_mode = GET_MODE (op0);
466 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
467 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
468 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
469 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
470 return x;
471 if (op_mode == VOIDmode)
472 op_mode = GET_MODE (op0);
473 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
475 case RTX_EXTRA:
476 if (code == SUBREG)
478 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
479 if (op0 == SUBREG_REG (x))
480 return x;
481 op0 = simplify_gen_subreg (GET_MODE (x), op0,
482 GET_MODE (SUBREG_REG (x)),
483 SUBREG_BYTE (x));
484 return op0 ? op0 : x;
486 break;
488 case RTX_OBJ:
489 if (code == MEM)
491 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
492 if (op0 == XEXP (x, 0))
493 return x;
494 return replace_equiv_address_nv (x, op0);
496 else if (code == LO_SUM)
498 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
499 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
501 /* (lo_sum (high x) x) -> x */
502 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
503 return op1;
505 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
506 return x;
507 return gen_rtx_LO_SUM (mode, op0, op1);
509 break;
511 default:
512 break;
515 newx = x;
516 fmt = GET_RTX_FORMAT (code);
517 for (i = 0; fmt[i]; i++)
518 switch (fmt[i])
520 case 'E':
521 vec = XVEC (x, i);
522 newvec = XVEC (newx, i);
523 for (j = 0; j < GET_NUM_ELEM (vec); j++)
525 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
526 old_rtx, fn, data);
527 if (op != RTVEC_ELT (vec, j))
529 if (newvec == vec)
531 newvec = shallow_copy_rtvec (vec);
532 if (x == newx)
533 newx = shallow_copy_rtx (x);
534 XVEC (newx, i) = newvec;
536 RTVEC_ELT (newvec, j) = op;
539 break;
541 case 'e':
542 if (XEXP (x, i))
544 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
545 if (op != XEXP (x, i))
547 if (x == newx)
548 newx = shallow_copy_rtx (x);
549 XEXP (newx, i) = op;
552 break;
554 return newx;
557 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
558 resulting RTX. Return a new RTX which is as simplified as possible. */
561 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
563 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
566 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
567 Only handle cases where the truncated value is inherently an rvalue.
569 RTL provides two ways of truncating a value:
571 1. a lowpart subreg. This form is only a truncation when both
572 the outer and inner modes (here MODE and OP_MODE respectively)
573 are scalar integers, and only then when the subreg is used as
574 an rvalue.
576 It is only valid to form such truncating subregs if the
577 truncation requires no action by the target. The onus for
578 proving this is on the creator of the subreg -- e.g. the
579 caller to simplify_subreg or simplify_gen_subreg -- and typically
580 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
582 2. a TRUNCATE. This form handles both scalar and compound integers.
584 The first form is preferred where valid. However, the TRUNCATE
585 handling in simplify_unary_operation turns the second form into the
586 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
587 so it is generally safe to form rvalue truncations using:
589 simplify_gen_unary (TRUNCATE, ...)
591 and leave simplify_unary_operation to work out which representation
592 should be used.
594 Because of the proof requirements on (1), simplify_truncation must
595 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
596 regardless of whether the outer truncation came from a SUBREG or a
597 TRUNCATE. For example, if the caller has proven that an SImode
598 truncation of:
600 (and:DI X Y)
602 is a no-op and can be represented as a subreg, it does not follow
603 that SImode truncations of X and Y are also no-ops. On a target
604 like 64-bit MIPS that requires SImode values to be stored in
605 sign-extended form, an SImode truncation of:
607 (and:DI (reg:DI X) (const_int 63))
609 is trivially a no-op because only the lower 6 bits can be set.
610 However, X is still an arbitrary 64-bit number and so we cannot
611 assume that truncating it too is a no-op. */
613 static rtx
614 simplify_truncation (enum machine_mode mode, rtx op,
615 enum machine_mode op_mode)
617 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
618 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
619 gcc_assert (precision <= op_precision);
621 /* Optimize truncations of zero and sign extended values. */
622 if (GET_CODE (op) == ZERO_EXTEND
623 || GET_CODE (op) == SIGN_EXTEND)
625 /* There are three possibilities. If MODE is the same as the
626 origmode, we can omit both the extension and the subreg.
627 If MODE is not larger than the origmode, we can apply the
628 truncation without the extension. Finally, if the outermode
629 is larger than the origmode, we can just extend to the appropriate
630 mode. */
631 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
632 if (mode == origmode)
633 return XEXP (op, 0);
634 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
635 return simplify_gen_unary (TRUNCATE, mode,
636 XEXP (op, 0), origmode);
637 else
638 return simplify_gen_unary (GET_CODE (op), mode,
639 XEXP (op, 0), origmode);
642 /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
643 to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
644 if (GET_CODE (op) == PLUS
645 || GET_CODE (op) == MINUS
646 || GET_CODE (op) == MULT)
648 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
649 if (op0)
651 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
652 if (op1)
653 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
657 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
658 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
659 the outer subreg is effectively a truncation to the original mode. */
660 if ((GET_CODE (op) == LSHIFTRT
661 || GET_CODE (op) == ASHIFTRT)
662 /* Ensure that OP_MODE is at least twice as wide as MODE
663 to avoid the possibility that an outer LSHIFTRT shifts by more
664 than the sign extension's sign_bit_copies and introduces zeros
665 into the high bits of the result. */
666 && 2 * precision <= op_precision
667 && CONST_INT_P (XEXP (op, 1))
668 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
669 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
670 && UINTVAL (XEXP (op, 1)) < precision)
671 return simplify_gen_binary (ASHIFTRT, mode,
672 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
674 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
675 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
676 the outer subreg is effectively a truncation to the original mode. */
677 if ((GET_CODE (op) == LSHIFTRT
678 || GET_CODE (op) == ASHIFTRT)
679 && CONST_INT_P (XEXP (op, 1))
680 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
681 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
682 && UINTVAL (XEXP (op, 1)) < precision)
683 return simplify_gen_binary (LSHIFTRT, mode,
684 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
686 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
687 to (ashift:QI (x:QI) C), where C is a suitable small constant and
688 the outer subreg is effectively a truncation to the original mode. */
689 if (GET_CODE (op) == ASHIFT
690 && CONST_INT_P (XEXP (op, 1))
691 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
692 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
693 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
694 && UINTVAL (XEXP (op, 1)) < precision)
695 return simplify_gen_binary (ASHIFT, mode,
696 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
698 /* Recognize a word extraction from a multi-word subreg. */
699 if ((GET_CODE (op) == LSHIFTRT
700 || GET_CODE (op) == ASHIFTRT)
701 && SCALAR_INT_MODE_P (mode)
702 && SCALAR_INT_MODE_P (op_mode)
703 && precision >= BITS_PER_WORD
704 && 2 * precision <= op_precision
705 && CONST_INT_P (XEXP (op, 1))
706 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
707 && UINTVAL (XEXP (op, 1)) < op_precision)
709 int byte = subreg_lowpart_offset (mode, op_mode);
710 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
711 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
712 (WORDS_BIG_ENDIAN
713 ? byte - shifted_bytes
714 : byte + shifted_bytes));
717 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
718 and try replacing the TRUNCATE and shift with it. Don't do this
719 if the MEM has a mode-dependent address. */
720 if ((GET_CODE (op) == LSHIFTRT
721 || GET_CODE (op) == ASHIFTRT)
722 && SCALAR_INT_MODE_P (op_mode)
723 && MEM_P (XEXP (op, 0))
724 && CONST_INT_P (XEXP (op, 1))
725 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
726 && INTVAL (XEXP (op, 1)) > 0
727 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
728 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
729 MEM_ADDR_SPACE (XEXP (op, 0)))
730 && ! MEM_VOLATILE_P (XEXP (op, 0))
731 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
732 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
734 int byte = subreg_lowpart_offset (mode, op_mode);
735 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
736 return adjust_address_nv (XEXP (op, 0), mode,
737 (WORDS_BIG_ENDIAN
738 ? byte - shifted_bytes
739 : byte + shifted_bytes));
742 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
743 (OP:SI foo:SI) if OP is NEG or ABS. */
744 if ((GET_CODE (op) == ABS
745 || GET_CODE (op) == NEG)
746 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
747 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
748 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
749 return simplify_gen_unary (GET_CODE (op), mode,
750 XEXP (XEXP (op, 0), 0), mode);
752 /* (truncate:A (subreg:B (truncate:C X) 0)) is
753 (truncate:A X). */
754 if (GET_CODE (op) == SUBREG
755 && SCALAR_INT_MODE_P (mode)
756 && SCALAR_INT_MODE_P (op_mode)
757 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
758 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
759 && subreg_lowpart_p (op))
761 rtx inner = XEXP (SUBREG_REG (op), 0);
762 if (GET_MODE_PRECISION (mode)
763 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
764 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
765 else
766 /* If subreg above is paradoxical and C is narrower
767 than A, return (subreg:A (truncate:C X) 0). */
768 return simplify_gen_subreg (mode, SUBREG_REG (op),
769 GET_MODE (SUBREG_REG (op)), 0);
772 /* (truncate:A (truncate:B X)) is (truncate:A X). */
773 if (GET_CODE (op) == TRUNCATE)
774 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
775 GET_MODE (XEXP (op, 0)));
777 return NULL_RTX;
780 /* Try to simplify a unary operation CODE whose output mode is to be
781 MODE with input operand OP whose mode was originally OP_MODE.
782 Return zero if no simplification can be made. */
784 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
785 rtx op, enum machine_mode op_mode)
787 rtx trueop, tem;
789 trueop = avoid_constant_pool_reference (op);
791 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
792 if (tem)
793 return tem;
795 return simplify_unary_operation_1 (code, mode, op);
798 /* Perform some simplifications we can do even if the operands
799 aren't constant. */
800 static rtx
801 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
803 enum rtx_code reversed;
804 rtx temp;
806 switch (code)
808 case NOT:
809 /* (not (not X)) == X. */
810 if (GET_CODE (op) == NOT)
811 return XEXP (op, 0);
813 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
814 comparison is all ones. */
815 if (COMPARISON_P (op)
816 && (mode == BImode || STORE_FLAG_VALUE == -1)
817 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
818 return simplify_gen_relational (reversed, mode, VOIDmode,
819 XEXP (op, 0), XEXP (op, 1));
821 /* (not (plus X -1)) can become (neg X). */
822 if (GET_CODE (op) == PLUS
823 && XEXP (op, 1) == constm1_rtx)
824 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
826 /* Similarly, (not (neg X)) is (plus X -1). */
827 if (GET_CODE (op) == NEG)
828 return plus_constant (mode, XEXP (op, 0), -1);
830 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
831 if (GET_CODE (op) == XOR
832 && CONST_INT_P (XEXP (op, 1))
833 && (temp = simplify_unary_operation (NOT, mode,
834 XEXP (op, 1), mode)) != 0)
835 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
837 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
838 if (GET_CODE (op) == PLUS
839 && CONST_INT_P (XEXP (op, 1))
840 && mode_signbit_p (mode, XEXP (op, 1))
841 && (temp = simplify_unary_operation (NOT, mode,
842 XEXP (op, 1), mode)) != 0)
843 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
846 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
847 operands other than 1, but that is not valid. We could do a
848 similar simplification for (not (lshiftrt C X)) where C is
849 just the sign bit, but this doesn't seem common enough to
850 bother with. */
851 if (GET_CODE (op) == ASHIFT
852 && XEXP (op, 0) == const1_rtx)
854 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
855 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
858 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
859 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
860 so we can perform the above simplification. */
861 if (STORE_FLAG_VALUE == -1
862 && GET_CODE (op) == ASHIFTRT
863 && GET_CODE (XEXP (op, 1))
864 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
865 return simplify_gen_relational (GE, mode, VOIDmode,
866 XEXP (op, 0), const0_rtx);
869 if (GET_CODE (op) == SUBREG
870 && subreg_lowpart_p (op)
871 && (GET_MODE_SIZE (GET_MODE (op))
872 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
873 && GET_CODE (SUBREG_REG (op)) == ASHIFT
874 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
876 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
877 rtx x;
879 x = gen_rtx_ROTATE (inner_mode,
880 simplify_gen_unary (NOT, inner_mode, const1_rtx,
881 inner_mode),
882 XEXP (SUBREG_REG (op), 1));
883 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
884 if (temp)
885 return temp;
888 /* Apply De Morgan's laws to reduce number of patterns for machines
889 with negating logical insns (and-not, nand, etc.). If result has
890 only one NOT, put it first, since that is how the patterns are
891 coded. */
892 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
894 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
895 enum machine_mode op_mode;
897 op_mode = GET_MODE (in1);
898 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
900 op_mode = GET_MODE (in2);
901 if (op_mode == VOIDmode)
902 op_mode = mode;
903 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
905 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
907 rtx tem = in2;
908 in2 = in1; in1 = tem;
911 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
912 mode, in1, in2);
915 /* (not (bswap x)) -> (bswap (not x)). */
916 if (GET_CODE (op) == BSWAP)
918 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
919 return simplify_gen_unary (BSWAP, mode, x, mode);
921 break;
923 case NEG:
924 /* (neg (neg X)) == X. */
925 if (GET_CODE (op) == NEG)
926 return XEXP (op, 0);
928 /* (neg (plus X 1)) can become (not X). */
929 if (GET_CODE (op) == PLUS
930 && XEXP (op, 1) == const1_rtx)
931 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
933 /* Similarly, (neg (not X)) is (plus X 1). */
934 if (GET_CODE (op) == NOT)
935 return plus_constant (mode, XEXP (op, 0), 1);
937 /* (neg (minus X Y)) can become (minus Y X). This transformation
938 isn't safe for modes with signed zeros, since if X and Y are
939 both +0, (minus Y X) is the same as (minus X Y). If the
940 rounding mode is towards +infinity (or -infinity) then the two
941 expressions will be rounded differently. */
942 if (GET_CODE (op) == MINUS
943 && !HONOR_SIGNED_ZEROS (mode)
944 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
945 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
947 if (GET_CODE (op) == PLUS
948 && !HONOR_SIGNED_ZEROS (mode)
949 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
951 /* (neg (plus A C)) is simplified to (minus -C A). */
952 if (CONST_SCALAR_INT_P (XEXP (op, 1))
953 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
955 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
956 if (temp)
957 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
960 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
961 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
962 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
965 /* (neg (mult A B)) becomes (mult A (neg B)).
966 This works even for floating-point values. */
967 if (GET_CODE (op) == MULT
968 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
970 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
971 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
974 /* NEG commutes with ASHIFT since it is multiplication. Only do
975 this if we can then eliminate the NEG (e.g., if the operand
976 is a constant). */
977 if (GET_CODE (op) == ASHIFT)
979 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
980 if (temp)
981 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
984 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
985 C is equal to the width of MODE minus 1. */
986 if (GET_CODE (op) == ASHIFTRT
987 && CONST_INT_P (XEXP (op, 1))
988 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
989 return simplify_gen_binary (LSHIFTRT, mode,
990 XEXP (op, 0), XEXP (op, 1));
992 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
993 C is equal to the width of MODE minus 1. */
994 if (GET_CODE (op) == LSHIFTRT
995 && CONST_INT_P (XEXP (op, 1))
996 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
997 return simplify_gen_binary (ASHIFTRT, mode,
998 XEXP (op, 0), XEXP (op, 1));
1000 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1001 if (GET_CODE (op) == XOR
1002 && XEXP (op, 1) == const1_rtx
1003 && nonzero_bits (XEXP (op, 0), mode) == 1)
1004 return plus_constant (mode, XEXP (op, 0), -1);
1006 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1007 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1008 if (GET_CODE (op) == LT
1009 && XEXP (op, 1) == const0_rtx
1010 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1012 enum machine_mode inner = GET_MODE (XEXP (op, 0));
1013 int isize = GET_MODE_PRECISION (inner);
1014 if (STORE_FLAG_VALUE == 1)
1016 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1017 GEN_INT (isize - 1));
1018 if (mode == inner)
1019 return temp;
1020 if (GET_MODE_PRECISION (mode) > isize)
1021 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1022 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1024 else if (STORE_FLAG_VALUE == -1)
1026 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1027 GEN_INT (isize - 1));
1028 if (mode == inner)
1029 return temp;
1030 if (GET_MODE_PRECISION (mode) > isize)
1031 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1032 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1035 break;
1037 case TRUNCATE:
1038 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1039 with the umulXi3_highpart patterns. */
1040 if (GET_CODE (op) == LSHIFTRT
1041 && GET_CODE (XEXP (op, 0)) == MULT)
1042 break;
1044 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1046 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1048 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1049 if (temp)
1050 return temp;
1052 /* We can't handle truncation to a partial integer mode here
1053 because we don't know the real bitsize of the partial
1054 integer mode. */
1055 break;
1058 if (GET_MODE (op) != VOIDmode)
1060 temp = simplify_truncation (mode, op, GET_MODE (op));
1061 if (temp)
1062 return temp;
1065 /* If we know that the value is already truncated, we can
1066 replace the TRUNCATE with a SUBREG. */
1067 if (GET_MODE_NUNITS (mode) == 1
1068 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1069 || truncated_to_mode (mode, op)))
1071 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1072 if (temp)
1073 return temp;
1076 /* A truncate of a comparison can be replaced with a subreg if
1077 STORE_FLAG_VALUE permits. This is like the previous test,
1078 but it works even if the comparison is done in a mode larger
1079 than HOST_BITS_PER_WIDE_INT. */
1080 if (HWI_COMPUTABLE_MODE_P (mode)
1081 && COMPARISON_P (op)
1082 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1084 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1085 if (temp)
1086 return temp;
1089 /* A truncate of a memory is just loading the low part of the memory
1090 if we are not changing the meaning of the address. */
1091 if (GET_CODE (op) == MEM
1092 && !VECTOR_MODE_P (mode)
1093 && !MEM_VOLATILE_P (op)
1094 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1096 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1097 if (temp)
1098 return temp;
1101 break;
1103 case FLOAT_TRUNCATE:
1104 if (DECIMAL_FLOAT_MODE_P (mode))
1105 break;
1107 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1108 if (GET_CODE (op) == FLOAT_EXTEND
1109 && GET_MODE (XEXP (op, 0)) == mode)
1110 return XEXP (op, 0);
1112 /* (float_truncate:SF (float_truncate:DF foo:XF))
1113 = (float_truncate:SF foo:XF).
1114 This may eliminate double rounding, so it is unsafe.
1116 (float_truncate:SF (float_extend:XF foo:DF))
1117 = (float_truncate:SF foo:DF).
1119 (float_truncate:DF (float_extend:XF foo:SF))
1120 = (float_extend:SF foo:DF). */
1121 if ((GET_CODE (op) == FLOAT_TRUNCATE
1122 && flag_unsafe_math_optimizations)
1123 || GET_CODE (op) == FLOAT_EXTEND)
1124 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1125 0)))
1126 > GET_MODE_SIZE (mode)
1127 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1128 mode,
1129 XEXP (op, 0), mode);
1131 /* (float_truncate (float x)) is (float x) */
1132 if (GET_CODE (op) == FLOAT
1133 && (flag_unsafe_math_optimizations
1134 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1135 && ((unsigned)significand_size (GET_MODE (op))
1136 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1137 - num_sign_bit_copies (XEXP (op, 0),
1138 GET_MODE (XEXP (op, 0))))))))
1139 return simplify_gen_unary (FLOAT, mode,
1140 XEXP (op, 0),
1141 GET_MODE (XEXP (op, 0)));
1143 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1144 (OP:SF foo:SF) if OP is NEG or ABS. */
1145 if ((GET_CODE (op) == ABS
1146 || GET_CODE (op) == NEG)
1147 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1148 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1149 return simplify_gen_unary (GET_CODE (op), mode,
1150 XEXP (XEXP (op, 0), 0), mode);
1152 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1153 is (float_truncate:SF x). */
1154 if (GET_CODE (op) == SUBREG
1155 && subreg_lowpart_p (op)
1156 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1157 return SUBREG_REG (op);
1158 break;
1160 case FLOAT_EXTEND:
1161 if (DECIMAL_FLOAT_MODE_P (mode))
1162 break;
1164 /* (float_extend (float_extend x)) is (float_extend x)
1166 (float_extend (float x)) is (float x) assuming that double
1167 rounding can't happen.
1169 if (GET_CODE (op) == FLOAT_EXTEND
1170 || (GET_CODE (op) == FLOAT
1171 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1172 && ((unsigned)significand_size (GET_MODE (op))
1173 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1174 - num_sign_bit_copies (XEXP (op, 0),
1175 GET_MODE (XEXP (op, 0)))))))
1176 return simplify_gen_unary (GET_CODE (op), mode,
1177 XEXP (op, 0),
1178 GET_MODE (XEXP (op, 0)));
1180 break;
1182 case ABS:
1183 /* (abs (neg <foo>)) -> (abs <foo>) */
1184 if (GET_CODE (op) == NEG)
1185 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1186 GET_MODE (XEXP (op, 0)));
1188 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1189 do nothing. */
1190 if (GET_MODE (op) == VOIDmode)
1191 break;
1193 /* If operand is something known to be positive, ignore the ABS. */
1194 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1195 || val_signbit_known_clear_p (GET_MODE (op),
1196 nonzero_bits (op, GET_MODE (op))))
1197 return op;
1199 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1200 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1201 return gen_rtx_NEG (mode, op);
1203 break;
1205 case FFS:
1206 /* (ffs (*_extend <X>)) = (ffs <X>) */
1207 if (GET_CODE (op) == SIGN_EXTEND
1208 || GET_CODE (op) == ZERO_EXTEND)
1209 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1210 GET_MODE (XEXP (op, 0)));
1211 break;
1213 case POPCOUNT:
1214 switch (GET_CODE (op))
1216 case BSWAP:
1217 case ZERO_EXTEND:
1218 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1219 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1220 GET_MODE (XEXP (op, 0)));
1222 case ROTATE:
1223 case ROTATERT:
1224 /* Rotations don't affect popcount. */
1225 if (!side_effects_p (XEXP (op, 1)))
1226 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1227 GET_MODE (XEXP (op, 0)));
1228 break;
1230 default:
1231 break;
1233 break;
1235 case PARITY:
1236 switch (GET_CODE (op))
1238 case NOT:
1239 case BSWAP:
1240 case ZERO_EXTEND:
1241 case SIGN_EXTEND:
1242 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1243 GET_MODE (XEXP (op, 0)));
1245 case ROTATE:
1246 case ROTATERT:
1247 /* Rotations don't affect parity. */
1248 if (!side_effects_p (XEXP (op, 1)))
1249 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1250 GET_MODE (XEXP (op, 0)));
1251 break;
1253 default:
1254 break;
1256 break;
1258 case BSWAP:
1259 /* (bswap (bswap x)) -> x. */
1260 if (GET_CODE (op) == BSWAP)
1261 return XEXP (op, 0);
1262 break;
1264 case FLOAT:
1265 /* (float (sign_extend <X>)) = (float <X>). */
1266 if (GET_CODE (op) == SIGN_EXTEND)
1267 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1268 GET_MODE (XEXP (op, 0)));
1269 break;
1271 case SIGN_EXTEND:
1272 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1273 becomes just the MINUS if its mode is MODE. This allows
1274 folding switch statements on machines using casesi (such as
1275 the VAX). */
1276 if (GET_CODE (op) == TRUNCATE
1277 && GET_MODE (XEXP (op, 0)) == mode
1278 && GET_CODE (XEXP (op, 0)) == MINUS
1279 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1280 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1281 return XEXP (op, 0);
1283 /* Extending a widening multiplication should be canonicalized to
1284 a wider widening multiplication. */
1285 if (GET_CODE (op) == MULT)
1287 rtx lhs = XEXP (op, 0);
1288 rtx rhs = XEXP (op, 1);
1289 enum rtx_code lcode = GET_CODE (lhs);
1290 enum rtx_code rcode = GET_CODE (rhs);
1292 /* Widening multiplies usually extend both operands, but sometimes
1293 they use a shift to extract a portion of a register. */
1294 if ((lcode == SIGN_EXTEND
1295 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1296 && (rcode == SIGN_EXTEND
1297 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1299 enum machine_mode lmode = GET_MODE (lhs);
1300 enum machine_mode rmode = GET_MODE (rhs);
1301 int bits;
1303 if (lcode == ASHIFTRT)
1304 /* Number of bits not shifted off the end. */
1305 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1306 else /* lcode == SIGN_EXTEND */
1307 /* Size of inner mode. */
1308 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1310 if (rcode == ASHIFTRT)
1311 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1312 else /* rcode == SIGN_EXTEND */
1313 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1315 /* We can only widen multiplies if the result is mathematiclly
1316 equivalent. I.e. if overflow was impossible. */
1317 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1318 return simplify_gen_binary
1319 (MULT, mode,
1320 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1321 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1325 /* Check for a sign extension of a subreg of a promoted
1326 variable, where the promotion is sign-extended, and the
1327 target mode is the same as the variable's promotion. */
1328 if (GET_CODE (op) == SUBREG
1329 && SUBREG_PROMOTED_VAR_P (op)
1330 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1331 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1333 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1334 if (temp)
1335 return temp;
1338 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1339 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1340 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1342 gcc_assert (GET_MODE_BITSIZE (mode)
1343 > GET_MODE_BITSIZE (GET_MODE (op)));
1344 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1345 GET_MODE (XEXP (op, 0)));
1348 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1349 is (sign_extend:M (subreg:O <X>)) if there is mode with
1350 GET_MODE_BITSIZE (N) - I bits.
1351 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1352 is similarly (zero_extend:M (subreg:O <X>)). */
1353 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1354 && GET_CODE (XEXP (op, 0)) == ASHIFT
1355 && CONST_INT_P (XEXP (op, 1))
1356 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1357 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1359 enum machine_mode tmode
1360 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1361 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1362 gcc_assert (GET_MODE_BITSIZE (mode)
1363 > GET_MODE_BITSIZE (GET_MODE (op)));
1364 if (tmode != BLKmode)
1366 rtx inner =
1367 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1368 if (inner)
1369 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1370 ? SIGN_EXTEND : ZERO_EXTEND,
1371 mode, inner, tmode);
1375 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1376 /* As we do not know which address space the pointer is referring to,
1377 we can do this only if the target does not support different pointer
1378 or address modes depending on the address space. */
1379 if (target_default_pointer_address_modes_p ()
1380 && ! POINTERS_EXTEND_UNSIGNED
1381 && mode == Pmode && GET_MODE (op) == ptr_mode
1382 && (CONSTANT_P (op)
1383 || (GET_CODE (op) == SUBREG
1384 && REG_P (SUBREG_REG (op))
1385 && REG_POINTER (SUBREG_REG (op))
1386 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1387 return convert_memory_address (Pmode, op);
1388 #endif
1389 break;
1391 case ZERO_EXTEND:
1392 /* Check for a zero extension of a subreg of a promoted
1393 variable, where the promotion is zero-extended, and the
1394 target mode is the same as the variable's promotion. */
1395 if (GET_CODE (op) == SUBREG
1396 && SUBREG_PROMOTED_VAR_P (op)
1397 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1398 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1400 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1401 if (temp)
1402 return temp;
1405 /* Extending a widening multiplication should be canonicalized to
1406 a wider widening multiplication. */
1407 if (GET_CODE (op) == MULT)
1409 rtx lhs = XEXP (op, 0);
1410 rtx rhs = XEXP (op, 1);
1411 enum rtx_code lcode = GET_CODE (lhs);
1412 enum rtx_code rcode = GET_CODE (rhs);
1414 /* Widening multiplies usually extend both operands, but sometimes
1415 they use a shift to extract a portion of a register. */
1416 if ((lcode == ZERO_EXTEND
1417 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1418 && (rcode == ZERO_EXTEND
1419 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1421 enum machine_mode lmode = GET_MODE (lhs);
1422 enum machine_mode rmode = GET_MODE (rhs);
1423 int bits;
1425 if (lcode == LSHIFTRT)
1426 /* Number of bits not shifted off the end. */
1427 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1428 else /* lcode == ZERO_EXTEND */
1429 /* Size of inner mode. */
1430 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1432 if (rcode == LSHIFTRT)
1433 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1434 else /* rcode == ZERO_EXTEND */
1435 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1437 /* We can only widen multiplies if the result is mathematiclly
1438 equivalent. I.e. if overflow was impossible. */
1439 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1440 return simplify_gen_binary
1441 (MULT, mode,
1442 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1443 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1447 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1448 if (GET_CODE (op) == ZERO_EXTEND)
1449 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1450 GET_MODE (XEXP (op, 0)));
1452 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1453 is (zero_extend:M (subreg:O <X>)) if there is mode with
1454 GET_MODE_BITSIZE (N) - I bits. */
1455 if (GET_CODE (op) == LSHIFTRT
1456 && GET_CODE (XEXP (op, 0)) == ASHIFT
1457 && CONST_INT_P (XEXP (op, 1))
1458 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1459 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1461 enum machine_mode tmode
1462 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1463 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1464 if (tmode != BLKmode)
1466 rtx inner =
1467 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1468 if (inner)
1469 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1473 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1474 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1475 of mode N. E.g.
1476 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1477 (and:SI (reg:SI) (const_int 63)). */
1478 if (GET_CODE (op) == SUBREG
1479 && GET_MODE_PRECISION (GET_MODE (op))
1480 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1481 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1482 <= HOST_BITS_PER_WIDE_INT
1483 && GET_MODE_PRECISION (mode)
1484 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1485 && subreg_lowpart_p (op)
1486 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1487 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1489 if (GET_MODE_PRECISION (mode)
1490 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1491 return SUBREG_REG (op);
1492 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1493 GET_MODE (SUBREG_REG (op)));
1496 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1497 /* As we do not know which address space the pointer is referring to,
1498 we can do this only if the target does not support different pointer
1499 or address modes depending on the address space. */
1500 if (target_default_pointer_address_modes_p ()
1501 && POINTERS_EXTEND_UNSIGNED > 0
1502 && mode == Pmode && GET_MODE (op) == ptr_mode
1503 && (CONSTANT_P (op)
1504 || (GET_CODE (op) == SUBREG
1505 && REG_P (SUBREG_REG (op))
1506 && REG_POINTER (SUBREG_REG (op))
1507 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1508 return convert_memory_address (Pmode, op);
1509 #endif
1510 break;
1512 default:
1513 break;
1516 return 0;
1519 /* Try to compute the value of a unary operation CODE whose output mode is to
1520 be MODE with input operand OP whose mode was originally OP_MODE.
1521 Return zero if the value cannot be computed. */
1523 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1524 rtx op, enum machine_mode op_mode)
1526 unsigned int width = GET_MODE_PRECISION (mode);
1527 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1529 if (code == VEC_DUPLICATE)
1531 gcc_assert (VECTOR_MODE_P (mode));
1532 if (GET_MODE (op) != VOIDmode)
1534 if (!VECTOR_MODE_P (GET_MODE (op)))
1535 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1536 else
1537 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1538 (GET_MODE (op)));
1540 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1541 || GET_CODE (op) == CONST_VECTOR)
1543 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1544 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1545 rtvec v = rtvec_alloc (n_elts);
1546 unsigned int i;
1548 if (GET_CODE (op) != CONST_VECTOR)
1549 for (i = 0; i < n_elts; i++)
1550 RTVEC_ELT (v, i) = op;
1551 else
1553 enum machine_mode inmode = GET_MODE (op);
1554 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1555 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1557 gcc_assert (in_n_elts < n_elts);
1558 gcc_assert ((n_elts % in_n_elts) == 0);
1559 for (i = 0; i < n_elts; i++)
1560 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1562 return gen_rtx_CONST_VECTOR (mode, v);
1566 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1568 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1569 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1570 enum machine_mode opmode = GET_MODE (op);
1571 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1572 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1573 rtvec v = rtvec_alloc (n_elts);
1574 unsigned int i;
1576 gcc_assert (op_n_elts == n_elts);
1577 for (i = 0; i < n_elts; i++)
1579 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1580 CONST_VECTOR_ELT (op, i),
1581 GET_MODE_INNER (opmode));
1582 if (!x)
1583 return 0;
1584 RTVEC_ELT (v, i) = x;
1586 return gen_rtx_CONST_VECTOR (mode, v);
1589 /* The order of these tests is critical so that, for example, we don't
1590 check the wrong mode (input vs. output) for a conversion operation,
1591 such as FIX. At some point, this should be simplified. */
1593 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1595 HOST_WIDE_INT hv, lv;
1596 REAL_VALUE_TYPE d;
1598 if (CONST_INT_P (op))
1599 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1600 else
1601 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1603 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1604 d = real_value_truncate (mode, d);
1605 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1607 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1609 HOST_WIDE_INT hv, lv;
1610 REAL_VALUE_TYPE d;
1612 if (CONST_INT_P (op))
1613 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1614 else
1615 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1617 if (op_mode == VOIDmode
1618 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1619 /* We should never get a negative number. */
1620 gcc_assert (hv >= 0);
1621 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1622 hv = 0, lv &= GET_MODE_MASK (op_mode);
1624 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1625 d = real_value_truncate (mode, d);
1626 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1629 if (CONST_INT_P (op)
1630 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1632 HOST_WIDE_INT arg0 = INTVAL (op);
1633 HOST_WIDE_INT val;
1635 switch (code)
1637 case NOT:
1638 val = ~ arg0;
1639 break;
1641 case NEG:
1642 val = - arg0;
1643 break;
1645 case ABS:
1646 val = (arg0 >= 0 ? arg0 : - arg0);
1647 break;
1649 case FFS:
1650 arg0 &= GET_MODE_MASK (mode);
1651 val = ffs_hwi (arg0);
1652 break;
1654 case CLZ:
1655 arg0 &= GET_MODE_MASK (mode);
1656 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1658 else
1659 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1660 break;
1662 case CLRSB:
1663 arg0 &= GET_MODE_MASK (mode);
1664 if (arg0 == 0)
1665 val = GET_MODE_PRECISION (mode) - 1;
1666 else if (arg0 >= 0)
1667 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1668 else if (arg0 < 0)
1669 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1670 break;
1672 case CTZ:
1673 arg0 &= GET_MODE_MASK (mode);
1674 if (arg0 == 0)
1676 /* Even if the value at zero is undefined, we have to come
1677 up with some replacement. Seems good enough. */
1678 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1679 val = GET_MODE_PRECISION (mode);
1681 else
1682 val = ctz_hwi (arg0);
1683 break;
1685 case POPCOUNT:
1686 arg0 &= GET_MODE_MASK (mode);
1687 val = 0;
1688 while (arg0)
1689 val++, arg0 &= arg0 - 1;
1690 break;
1692 case PARITY:
1693 arg0 &= GET_MODE_MASK (mode);
1694 val = 0;
1695 while (arg0)
1696 val++, arg0 &= arg0 - 1;
1697 val &= 1;
1698 break;
1700 case BSWAP:
1702 unsigned int s;
1704 val = 0;
1705 for (s = 0; s < width; s += 8)
1707 unsigned int d = width - s - 8;
1708 unsigned HOST_WIDE_INT byte;
1709 byte = (arg0 >> s) & 0xff;
1710 val |= byte << d;
1713 break;
1715 case TRUNCATE:
1716 val = arg0;
1717 break;
1719 case ZERO_EXTEND:
1720 /* When zero-extending a CONST_INT, we need to know its
1721 original mode. */
1722 gcc_assert (op_mode != VOIDmode);
1723 if (op_width == HOST_BITS_PER_WIDE_INT)
1725 /* If we were really extending the mode,
1726 we would have to distinguish between zero-extension
1727 and sign-extension. */
1728 gcc_assert (width == op_width);
1729 val = arg0;
1731 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1732 val = arg0 & GET_MODE_MASK (op_mode);
1733 else
1734 return 0;
1735 break;
1737 case SIGN_EXTEND:
1738 if (op_mode == VOIDmode)
1739 op_mode = mode;
1740 op_width = GET_MODE_PRECISION (op_mode);
1741 if (op_width == HOST_BITS_PER_WIDE_INT)
1743 /* If we were really extending the mode,
1744 we would have to distinguish between zero-extension
1745 and sign-extension. */
1746 gcc_assert (width == op_width);
1747 val = arg0;
1749 else if (op_width < HOST_BITS_PER_WIDE_INT)
1751 val = arg0 & GET_MODE_MASK (op_mode);
1752 if (val_signbit_known_set_p (op_mode, val))
1753 val |= ~GET_MODE_MASK (op_mode);
1755 else
1756 return 0;
1757 break;
1759 case SQRT:
1760 case FLOAT_EXTEND:
1761 case FLOAT_TRUNCATE:
1762 case SS_TRUNCATE:
1763 case US_TRUNCATE:
1764 case SS_NEG:
1765 case US_NEG:
1766 case SS_ABS:
1767 return 0;
1769 default:
1770 gcc_unreachable ();
1773 return gen_int_mode (val, mode);
1776 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1777 for a DImode operation on a CONST_INT. */
1778 else if (width <= HOST_BITS_PER_DOUBLE_INT
1779 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1781 double_int first, value;
1783 if (CONST_DOUBLE_AS_INT_P (op))
1784 first = double_int::from_pair (CONST_DOUBLE_HIGH (op),
1785 CONST_DOUBLE_LOW (op));
1786 else
1787 first = double_int::from_shwi (INTVAL (op));
1789 switch (code)
1791 case NOT:
1792 value = ~first;
1793 break;
1795 case NEG:
1796 value = -first;
1797 break;
1799 case ABS:
1800 if (first.is_negative ())
1801 value = -first;
1802 else
1803 value = first;
1804 break;
1806 case FFS:
1807 value.high = 0;
1808 if (first.low != 0)
1809 value.low = ffs_hwi (first.low);
1810 else if (first.high != 0)
1811 value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high);
1812 else
1813 value.low = 0;
1814 break;
1816 case CLZ:
1817 value.high = 0;
1818 if (first.high != 0)
1819 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1
1820 - HOST_BITS_PER_WIDE_INT;
1821 else if (first.low != 0)
1822 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1;
1823 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1824 value.low = GET_MODE_PRECISION (mode);
1825 break;
1827 case CTZ:
1828 value.high = 0;
1829 if (first.low != 0)
1830 value.low = ctz_hwi (first.low);
1831 else if (first.high != 0)
1832 value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high);
1833 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1834 value.low = GET_MODE_PRECISION (mode);
1835 break;
1837 case POPCOUNT:
1838 value = double_int_zero;
1839 while (first.low)
1841 value.low++;
1842 first.low &= first.low - 1;
1844 while (first.high)
1846 value.low++;
1847 first.high &= first.high - 1;
1849 break;
1851 case PARITY:
1852 value = double_int_zero;
1853 while (first.low)
1855 value.low++;
1856 first.low &= first.low - 1;
1858 while (first.high)
1860 value.low++;
1861 first.high &= first.high - 1;
1863 value.low &= 1;
1864 break;
1866 case BSWAP:
1868 unsigned int s;
1870 value = double_int_zero;
1871 for (s = 0; s < width; s += 8)
1873 unsigned int d = width - s - 8;
1874 unsigned HOST_WIDE_INT byte;
1876 if (s < HOST_BITS_PER_WIDE_INT)
1877 byte = (first.low >> s) & 0xff;
1878 else
1879 byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1881 if (d < HOST_BITS_PER_WIDE_INT)
1882 value.low |= byte << d;
1883 else
1884 value.high |= byte << (d - HOST_BITS_PER_WIDE_INT);
1887 break;
1889 case TRUNCATE:
1890 /* This is just a change-of-mode, so do nothing. */
1891 value = first;
1892 break;
1894 case ZERO_EXTEND:
1895 gcc_assert (op_mode != VOIDmode);
1897 if (op_width > HOST_BITS_PER_WIDE_INT)
1898 return 0;
1900 value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode));
1901 break;
1903 case SIGN_EXTEND:
1904 if (op_mode == VOIDmode
1905 || op_width > HOST_BITS_PER_WIDE_INT)
1906 return 0;
1907 else
1909 value.low = first.low & GET_MODE_MASK (op_mode);
1910 if (val_signbit_known_set_p (op_mode, value.low))
1911 value.low |= ~GET_MODE_MASK (op_mode);
1913 value.high = HWI_SIGN_EXTEND (value.low);
1915 break;
1917 case SQRT:
1918 return 0;
1920 default:
1921 return 0;
1924 return immed_double_int_const (value, mode);
1927 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1928 && SCALAR_FLOAT_MODE_P (mode)
1929 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1931 REAL_VALUE_TYPE d, t;
1932 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1934 switch (code)
1936 case SQRT:
1937 if (HONOR_SNANS (mode) && real_isnan (&d))
1938 return 0;
1939 real_sqrt (&t, mode, &d);
1940 d = t;
1941 break;
1942 case ABS:
1943 d = real_value_abs (&d);
1944 break;
1945 case NEG:
1946 d = real_value_negate (&d);
1947 break;
1948 case FLOAT_TRUNCATE:
1949 d = real_value_truncate (mode, d);
1950 break;
1951 case FLOAT_EXTEND:
1952 /* All this does is change the mode, unless changing
1953 mode class. */
1954 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1955 real_convert (&d, mode, &d);
1956 break;
1957 case FIX:
1958 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1959 break;
1960 case NOT:
1962 long tmp[4];
1963 int i;
1965 real_to_target (tmp, &d, GET_MODE (op));
1966 for (i = 0; i < 4; i++)
1967 tmp[i] = ~tmp[i];
1968 real_from_target (&d, tmp, mode);
1969 break;
1971 default:
1972 gcc_unreachable ();
1974 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1977 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1978 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1979 && GET_MODE_CLASS (mode) == MODE_INT
1980 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1982 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1983 operators are intentionally left unspecified (to ease implementation
1984 by target backends), for consistency, this routine implements the
1985 same semantics for constant folding as used by the middle-end. */
1987 /* This was formerly used only for non-IEEE float.
1988 eggert@twinsun.com says it is safe for IEEE also. */
1989 HOST_WIDE_INT xh, xl, th, tl;
1990 REAL_VALUE_TYPE x, t;
1991 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1992 switch (code)
1994 case FIX:
1995 if (REAL_VALUE_ISNAN (x))
1996 return const0_rtx;
1998 /* Test against the signed upper bound. */
1999 if (width > HOST_BITS_PER_WIDE_INT)
2001 th = ((unsigned HOST_WIDE_INT) 1
2002 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
2003 tl = -1;
2005 else
2007 th = 0;
2008 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
2010 real_from_integer (&t, VOIDmode, tl, th, 0);
2011 if (REAL_VALUES_LESS (t, x))
2013 xh = th;
2014 xl = tl;
2015 break;
2018 /* Test against the signed lower bound. */
2019 if (width > HOST_BITS_PER_WIDE_INT)
2021 th = (unsigned HOST_WIDE_INT) (-1)
2022 << (width - HOST_BITS_PER_WIDE_INT - 1);
2023 tl = 0;
2025 else
2027 th = -1;
2028 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
2030 real_from_integer (&t, VOIDmode, tl, th, 0);
2031 if (REAL_VALUES_LESS (x, t))
2033 xh = th;
2034 xl = tl;
2035 break;
2037 REAL_VALUE_TO_INT (&xl, &xh, x);
2038 break;
2040 case UNSIGNED_FIX:
2041 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
2042 return const0_rtx;
2044 /* Test against the unsigned upper bound. */
2045 if (width == HOST_BITS_PER_DOUBLE_INT)
2047 th = -1;
2048 tl = -1;
2050 else if (width >= HOST_BITS_PER_WIDE_INT)
2052 th = ((unsigned HOST_WIDE_INT) 1
2053 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
2054 tl = -1;
2056 else
2058 th = 0;
2059 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
2061 real_from_integer (&t, VOIDmode, tl, th, 1);
2062 if (REAL_VALUES_LESS (t, x))
2064 xh = th;
2065 xl = tl;
2066 break;
2069 REAL_VALUE_TO_INT (&xl, &xh, x);
2070 break;
2072 default:
2073 gcc_unreachable ();
2075 return immed_double_const (xl, xh, mode);
2078 return NULL_RTX;
2081 /* Subroutine of simplify_binary_operation to simplify a binary operation
2082 CODE that can commute with byte swapping, with result mode MODE and
2083 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2084 Return zero if no simplification or canonicalization is possible. */
2086 static rtx
2087 simplify_byte_swapping_operation (enum rtx_code code, enum machine_mode mode,
2088 rtx op0, rtx op1)
2090 rtx tem;
2092 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2093 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2095 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2096 simplify_gen_unary (BSWAP, mode, op1, mode));
2097 return simplify_gen_unary (BSWAP, mode, tem, mode);
2100 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2101 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2103 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2104 return simplify_gen_unary (BSWAP, mode, tem, mode);
2107 return NULL_RTX;
2110 /* Subroutine of simplify_binary_operation to simplify a commutative,
2111 associative binary operation CODE with result mode MODE, operating
2112 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2113 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2114 canonicalization is possible. */
2116 static rtx
2117 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
2118 rtx op0, rtx op1)
2120 rtx tem;
2122 /* Linearize the operator to the left. */
2123 if (GET_CODE (op1) == code)
2125 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2126 if (GET_CODE (op0) == code)
2128 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2129 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2132 /* "a op (b op c)" becomes "(b op c) op a". */
2133 if (! swap_commutative_operands_p (op1, op0))
2134 return simplify_gen_binary (code, mode, op1, op0);
2136 tem = op0;
2137 op0 = op1;
2138 op1 = tem;
2141 if (GET_CODE (op0) == code)
2143 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2144 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2146 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2147 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2150 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2151 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2152 if (tem != 0)
2153 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2155 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2156 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2157 if (tem != 0)
2158 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2161 return 0;
2165 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2166 and OP1. Return 0 if no simplification is possible.
2168 Don't use this for relational operations such as EQ or LT.
2169 Use simplify_relational_operation instead. */
2171 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
2172 rtx op0, rtx op1)
2174 rtx trueop0, trueop1;
2175 rtx tem;
2177 /* Relational operations don't work here. We must know the mode
2178 of the operands in order to do the comparison correctly.
2179 Assuming a full word can give incorrect results.
2180 Consider comparing 128 with -128 in QImode. */
2181 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2182 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2184 /* Make sure the constant is second. */
2185 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2186 && swap_commutative_operands_p (op0, op1))
2188 tem = op0, op0 = op1, op1 = tem;
2191 trueop0 = avoid_constant_pool_reference (op0);
2192 trueop1 = avoid_constant_pool_reference (op1);
2194 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2195 if (tem)
2196 return tem;
2197 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2200 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2201 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2202 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2203 actual constants. */
2205 static rtx
2206 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
2207 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2209 rtx tem, reversed, opleft, opright;
2210 HOST_WIDE_INT val;
2211 unsigned int width = GET_MODE_PRECISION (mode);
2213 /* Even if we can't compute a constant result,
2214 there are some cases worth simplifying. */
2216 switch (code)
2218 case PLUS:
2219 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2220 when x is NaN, infinite, or finite and nonzero. They aren't
2221 when x is -0 and the rounding mode is not towards -infinity,
2222 since (-0) + 0 is then 0. */
2223 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2224 return op0;
2226 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2227 transformations are safe even for IEEE. */
2228 if (GET_CODE (op0) == NEG)
2229 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2230 else if (GET_CODE (op1) == NEG)
2231 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2233 /* (~a) + 1 -> -a */
2234 if (INTEGRAL_MODE_P (mode)
2235 && GET_CODE (op0) == NOT
2236 && trueop1 == const1_rtx)
2237 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2239 /* Handle both-operands-constant cases. We can only add
2240 CONST_INTs to constants since the sum of relocatable symbols
2241 can't be handled by most assemblers. Don't add CONST_INT
2242 to CONST_INT since overflow won't be computed properly if wider
2243 than HOST_BITS_PER_WIDE_INT. */
2245 if ((GET_CODE (op0) == CONST
2246 || GET_CODE (op0) == SYMBOL_REF
2247 || GET_CODE (op0) == LABEL_REF)
2248 && CONST_INT_P (op1))
2249 return plus_constant (mode, op0, INTVAL (op1));
2250 else if ((GET_CODE (op1) == CONST
2251 || GET_CODE (op1) == SYMBOL_REF
2252 || GET_CODE (op1) == LABEL_REF)
2253 && CONST_INT_P (op0))
2254 return plus_constant (mode, op1, INTVAL (op0));
2256 /* See if this is something like X * C - X or vice versa or
2257 if the multiplication is written as a shift. If so, we can
2258 distribute and make a new multiply, shift, or maybe just
2259 have X (if C is 2 in the example above). But don't make
2260 something more expensive than we had before. */
2262 if (SCALAR_INT_MODE_P (mode))
2264 double_int coeff0, coeff1;
2265 rtx lhs = op0, rhs = op1;
2267 coeff0 = double_int_one;
2268 coeff1 = double_int_one;
2270 if (GET_CODE (lhs) == NEG)
2272 coeff0 = double_int_minus_one;
2273 lhs = XEXP (lhs, 0);
2275 else if (GET_CODE (lhs) == MULT
2276 && CONST_INT_P (XEXP (lhs, 1)))
2278 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2279 lhs = XEXP (lhs, 0);
2281 else if (GET_CODE (lhs) == ASHIFT
2282 && CONST_INT_P (XEXP (lhs, 1))
2283 && INTVAL (XEXP (lhs, 1)) >= 0
2284 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2286 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2287 lhs = XEXP (lhs, 0);
2290 if (GET_CODE (rhs) == NEG)
2292 coeff1 = double_int_minus_one;
2293 rhs = XEXP (rhs, 0);
2295 else if (GET_CODE (rhs) == MULT
2296 && CONST_INT_P (XEXP (rhs, 1)))
2298 coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
2299 rhs = XEXP (rhs, 0);
2301 else if (GET_CODE (rhs) == ASHIFT
2302 && CONST_INT_P (XEXP (rhs, 1))
2303 && INTVAL (XEXP (rhs, 1)) >= 0
2304 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2306 coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2307 rhs = XEXP (rhs, 0);
2310 if (rtx_equal_p (lhs, rhs))
2312 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2313 rtx coeff;
2314 double_int val;
2315 bool speed = optimize_function_for_speed_p (cfun);
2317 val = coeff0 + coeff1;
2318 coeff = immed_double_int_const (val, mode);
2320 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2321 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2322 ? tem : 0;
2326 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2327 if (CONST_SCALAR_INT_P (op1)
2328 && GET_CODE (op0) == XOR
2329 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2330 && mode_signbit_p (mode, op1))
2331 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2332 simplify_gen_binary (XOR, mode, op1,
2333 XEXP (op0, 1)));
2335 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2336 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2337 && GET_CODE (op0) == MULT
2338 && GET_CODE (XEXP (op0, 0)) == NEG)
2340 rtx in1, in2;
2342 in1 = XEXP (XEXP (op0, 0), 0);
2343 in2 = XEXP (op0, 1);
2344 return simplify_gen_binary (MINUS, mode, op1,
2345 simplify_gen_binary (MULT, mode,
2346 in1, in2));
2349 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2350 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2351 is 1. */
2352 if (COMPARISON_P (op0)
2353 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2354 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2355 && (reversed = reversed_comparison (op0, mode)))
2356 return
2357 simplify_gen_unary (NEG, mode, reversed, mode);
2359 /* If one of the operands is a PLUS or a MINUS, see if we can
2360 simplify this by the associative law.
2361 Don't use the associative law for floating point.
2362 The inaccuracy makes it nonassociative,
2363 and subtle programs can break if operations are associated. */
2365 if (INTEGRAL_MODE_P (mode)
2366 && (plus_minus_operand_p (op0)
2367 || plus_minus_operand_p (op1))
2368 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2369 return tem;
2371 /* Reassociate floating point addition only when the user
2372 specifies associative math operations. */
2373 if (FLOAT_MODE_P (mode)
2374 && flag_associative_math)
2376 tem = simplify_associative_operation (code, mode, op0, op1);
2377 if (tem)
2378 return tem;
2380 break;
2382 case COMPARE:
2383 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2384 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2385 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2386 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2388 rtx xop00 = XEXP (op0, 0);
2389 rtx xop10 = XEXP (op1, 0);
2391 #ifdef HAVE_cc0
2392 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2393 #else
2394 if (REG_P (xop00) && REG_P (xop10)
2395 && GET_MODE (xop00) == GET_MODE (xop10)
2396 && REGNO (xop00) == REGNO (xop10)
2397 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2398 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2399 #endif
2400 return xop00;
2402 break;
2404 case MINUS:
2405 /* We can't assume x-x is 0 even with non-IEEE floating point,
2406 but since it is zero except in very strange circumstances, we
2407 will treat it as zero with -ffinite-math-only. */
2408 if (rtx_equal_p (trueop0, trueop1)
2409 && ! side_effects_p (op0)
2410 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2411 return CONST0_RTX (mode);
2413 /* Change subtraction from zero into negation. (0 - x) is the
2414 same as -x when x is NaN, infinite, or finite and nonzero.
2415 But if the mode has signed zeros, and does not round towards
2416 -infinity, then 0 - 0 is 0, not -0. */
2417 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2418 return simplify_gen_unary (NEG, mode, op1, mode);
2420 /* (-1 - a) is ~a. */
2421 if (trueop0 == constm1_rtx)
2422 return simplify_gen_unary (NOT, mode, op1, mode);
2424 /* Subtracting 0 has no effect unless the mode has signed zeros
2425 and supports rounding towards -infinity. In such a case,
2426 0 - 0 is -0. */
2427 if (!(HONOR_SIGNED_ZEROS (mode)
2428 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2429 && trueop1 == CONST0_RTX (mode))
2430 return op0;
2432 /* See if this is something like X * C - X or vice versa or
2433 if the multiplication is written as a shift. If so, we can
2434 distribute and make a new multiply, shift, or maybe just
2435 have X (if C is 2 in the example above). But don't make
2436 something more expensive than we had before. */
2438 if (SCALAR_INT_MODE_P (mode))
2440 double_int coeff0, negcoeff1;
2441 rtx lhs = op0, rhs = op1;
2443 coeff0 = double_int_one;
2444 negcoeff1 = double_int_minus_one;
2446 if (GET_CODE (lhs) == NEG)
2448 coeff0 = double_int_minus_one;
2449 lhs = XEXP (lhs, 0);
2451 else if (GET_CODE (lhs) == MULT
2452 && CONST_INT_P (XEXP (lhs, 1)))
2454 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2455 lhs = XEXP (lhs, 0);
2457 else if (GET_CODE (lhs) == ASHIFT
2458 && CONST_INT_P (XEXP (lhs, 1))
2459 && INTVAL (XEXP (lhs, 1)) >= 0
2460 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2462 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2463 lhs = XEXP (lhs, 0);
2466 if (GET_CODE (rhs) == NEG)
2468 negcoeff1 = double_int_one;
2469 rhs = XEXP (rhs, 0);
2471 else if (GET_CODE (rhs) == MULT
2472 && CONST_INT_P (XEXP (rhs, 1)))
2474 negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
2475 rhs = XEXP (rhs, 0);
2477 else if (GET_CODE (rhs) == ASHIFT
2478 && CONST_INT_P (XEXP (rhs, 1))
2479 && INTVAL (XEXP (rhs, 1)) >= 0
2480 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2482 negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2483 negcoeff1 = -negcoeff1;
2484 rhs = XEXP (rhs, 0);
2487 if (rtx_equal_p (lhs, rhs))
2489 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2490 rtx coeff;
2491 double_int val;
2492 bool speed = optimize_function_for_speed_p (cfun);
2494 val = coeff0 + negcoeff1;
2495 coeff = immed_double_int_const (val, mode);
2497 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2498 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2499 ? tem : 0;
2503 /* (a - (-b)) -> (a + b). True even for IEEE. */
2504 if (GET_CODE (op1) == NEG)
2505 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2507 /* (-x - c) may be simplified as (-c - x). */
2508 if (GET_CODE (op0) == NEG
2509 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2511 tem = simplify_unary_operation (NEG, mode, op1, mode);
2512 if (tem)
2513 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2516 /* Don't let a relocatable value get a negative coeff. */
2517 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2518 return simplify_gen_binary (PLUS, mode,
2519 op0,
2520 neg_const_int (mode, op1));
2522 /* (x - (x & y)) -> (x & ~y) */
2523 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2525 if (rtx_equal_p (op0, XEXP (op1, 0)))
2527 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2528 GET_MODE (XEXP (op1, 1)));
2529 return simplify_gen_binary (AND, mode, op0, tem);
2531 if (rtx_equal_p (op0, XEXP (op1, 1)))
2533 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2534 GET_MODE (XEXP (op1, 0)));
2535 return simplify_gen_binary (AND, mode, op0, tem);
2539 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2540 by reversing the comparison code if valid. */
2541 if (STORE_FLAG_VALUE == 1
2542 && trueop0 == const1_rtx
2543 && COMPARISON_P (op1)
2544 && (reversed = reversed_comparison (op1, mode)))
2545 return reversed;
2547 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2548 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2549 && GET_CODE (op1) == MULT
2550 && GET_CODE (XEXP (op1, 0)) == NEG)
2552 rtx in1, in2;
2554 in1 = XEXP (XEXP (op1, 0), 0);
2555 in2 = XEXP (op1, 1);
2556 return simplify_gen_binary (PLUS, mode,
2557 simplify_gen_binary (MULT, mode,
2558 in1, in2),
2559 op0);
2562 /* Canonicalize (minus (neg A) (mult B C)) to
2563 (minus (mult (neg B) C) A). */
2564 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2565 && GET_CODE (op1) == MULT
2566 && GET_CODE (op0) == NEG)
2568 rtx in1, in2;
2570 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2571 in2 = XEXP (op1, 1);
2572 return simplify_gen_binary (MINUS, mode,
2573 simplify_gen_binary (MULT, mode,
2574 in1, in2),
2575 XEXP (op0, 0));
2578 /* If one of the operands is a PLUS or a MINUS, see if we can
2579 simplify this by the associative law. This will, for example,
2580 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2581 Don't use the associative law for floating point.
2582 The inaccuracy makes it nonassociative,
2583 and subtle programs can break if operations are associated. */
2585 if (INTEGRAL_MODE_P (mode)
2586 && (plus_minus_operand_p (op0)
2587 || plus_minus_operand_p (op1))
2588 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2589 return tem;
2590 break;
2592 case MULT:
2593 if (trueop1 == constm1_rtx)
2594 return simplify_gen_unary (NEG, mode, op0, mode);
2596 if (GET_CODE (op0) == NEG)
2598 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2599 /* If op1 is a MULT as well and simplify_unary_operation
2600 just moved the NEG to the second operand, simplify_gen_binary
2601 below could through simplify_associative_operation move
2602 the NEG around again and recurse endlessly. */
2603 if (temp
2604 && GET_CODE (op1) == MULT
2605 && GET_CODE (temp) == MULT
2606 && XEXP (op1, 0) == XEXP (temp, 0)
2607 && GET_CODE (XEXP (temp, 1)) == NEG
2608 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2609 temp = NULL_RTX;
2610 if (temp)
2611 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2613 if (GET_CODE (op1) == NEG)
2615 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2616 /* If op0 is a MULT as well and simplify_unary_operation
2617 just moved the NEG to the second operand, simplify_gen_binary
2618 below could through simplify_associative_operation move
2619 the NEG around again and recurse endlessly. */
2620 if (temp
2621 && GET_CODE (op0) == MULT
2622 && GET_CODE (temp) == MULT
2623 && XEXP (op0, 0) == XEXP (temp, 0)
2624 && GET_CODE (XEXP (temp, 1)) == NEG
2625 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2626 temp = NULL_RTX;
2627 if (temp)
2628 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2631 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2632 x is NaN, since x * 0 is then also NaN. Nor is it valid
2633 when the mode has signed zeros, since multiplying a negative
2634 number by 0 will give -0, not 0. */
2635 if (!HONOR_NANS (mode)
2636 && !HONOR_SIGNED_ZEROS (mode)
2637 && trueop1 == CONST0_RTX (mode)
2638 && ! side_effects_p (op0))
2639 return op1;
2641 /* In IEEE floating point, x*1 is not equivalent to x for
2642 signalling NaNs. */
2643 if (!HONOR_SNANS (mode)
2644 && trueop1 == CONST1_RTX (mode))
2645 return op0;
2647 /* Convert multiply by constant power of two into shift unless
2648 we are still generating RTL. This test is a kludge. */
2649 if (CONST_INT_P (trueop1)
2650 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2651 /* If the mode is larger than the host word size, and the
2652 uppermost bit is set, then this isn't a power of two due
2653 to implicit sign extension. */
2654 && (width <= HOST_BITS_PER_WIDE_INT
2655 || val != HOST_BITS_PER_WIDE_INT - 1))
2656 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2658 /* Likewise for multipliers wider than a word. */
2659 if (CONST_DOUBLE_AS_INT_P (trueop1)
2660 && GET_MODE (op0) == mode
2661 && CONST_DOUBLE_LOW (trueop1) == 0
2662 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2663 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2664 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2665 return simplify_gen_binary (ASHIFT, mode, op0,
2666 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2668 /* x*2 is x+x and x*(-1) is -x */
2669 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2670 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2671 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2672 && GET_MODE (op0) == mode)
2674 REAL_VALUE_TYPE d;
2675 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2677 if (REAL_VALUES_EQUAL (d, dconst2))
2678 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2680 if (!HONOR_SNANS (mode)
2681 && REAL_VALUES_EQUAL (d, dconstm1))
2682 return simplify_gen_unary (NEG, mode, op0, mode);
2685 /* Optimize -x * -x as x * x. */
2686 if (FLOAT_MODE_P (mode)
2687 && GET_CODE (op0) == NEG
2688 && GET_CODE (op1) == NEG
2689 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2690 && !side_effects_p (XEXP (op0, 0)))
2691 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2693 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2694 if (SCALAR_FLOAT_MODE_P (mode)
2695 && GET_CODE (op0) == ABS
2696 && GET_CODE (op1) == ABS
2697 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2698 && !side_effects_p (XEXP (op0, 0)))
2699 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2701 /* Reassociate multiplication, but for floating point MULTs
2702 only when the user specifies unsafe math optimizations. */
2703 if (! FLOAT_MODE_P (mode)
2704 || flag_unsafe_math_optimizations)
2706 tem = simplify_associative_operation (code, mode, op0, op1);
2707 if (tem)
2708 return tem;
2710 break;
2712 case IOR:
2713 if (trueop1 == CONST0_RTX (mode))
2714 return op0;
2715 if (INTEGRAL_MODE_P (mode)
2716 && trueop1 == CONSTM1_RTX (mode)
2717 && !side_effects_p (op0))
2718 return op1;
2719 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2720 return op0;
2721 /* A | (~A) -> -1 */
2722 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2723 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2724 && ! side_effects_p (op0)
2725 && SCALAR_INT_MODE_P (mode))
2726 return constm1_rtx;
2728 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2729 if (CONST_INT_P (op1)
2730 && HWI_COMPUTABLE_MODE_P (mode)
2731 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2732 && !side_effects_p (op0))
2733 return op1;
2735 /* Canonicalize (X & C1) | C2. */
2736 if (GET_CODE (op0) == AND
2737 && CONST_INT_P (trueop1)
2738 && CONST_INT_P (XEXP (op0, 1)))
2740 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2741 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2742 HOST_WIDE_INT c2 = INTVAL (trueop1);
2744 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2745 if ((c1 & c2) == c1
2746 && !side_effects_p (XEXP (op0, 0)))
2747 return trueop1;
2749 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2750 if (((c1|c2) & mask) == mask)
2751 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2753 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2754 if (((c1 & ~c2) & mask) != (c1 & mask))
2756 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2757 gen_int_mode (c1 & ~c2, mode));
2758 return simplify_gen_binary (IOR, mode, tem, op1);
2762 /* Convert (A & B) | A to A. */
2763 if (GET_CODE (op0) == AND
2764 && (rtx_equal_p (XEXP (op0, 0), op1)
2765 || rtx_equal_p (XEXP (op0, 1), op1))
2766 && ! side_effects_p (XEXP (op0, 0))
2767 && ! side_effects_p (XEXP (op0, 1)))
2768 return op1;
2770 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2771 mode size to (rotate A CX). */
2773 if (GET_CODE (op1) == ASHIFT
2774 || GET_CODE (op1) == SUBREG)
2776 opleft = op1;
2777 opright = op0;
2779 else
2781 opright = op1;
2782 opleft = op0;
2785 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2786 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2787 && CONST_INT_P (XEXP (opleft, 1))
2788 && CONST_INT_P (XEXP (opright, 1))
2789 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2790 == GET_MODE_PRECISION (mode)))
2791 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2793 /* Same, but for ashift that has been "simplified" to a wider mode
2794 by simplify_shift_const. */
2796 if (GET_CODE (opleft) == SUBREG
2797 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2798 && GET_CODE (opright) == LSHIFTRT
2799 && GET_CODE (XEXP (opright, 0)) == SUBREG
2800 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2801 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2802 && (GET_MODE_SIZE (GET_MODE (opleft))
2803 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2804 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2805 SUBREG_REG (XEXP (opright, 0)))
2806 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2807 && CONST_INT_P (XEXP (opright, 1))
2808 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2809 == GET_MODE_PRECISION (mode)))
2810 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2811 XEXP (SUBREG_REG (opleft), 1));
2813 /* If we have (ior (and (X C1) C2)), simplify this by making
2814 C1 as small as possible if C1 actually changes. */
2815 if (CONST_INT_P (op1)
2816 && (HWI_COMPUTABLE_MODE_P (mode)
2817 || INTVAL (op1) > 0)
2818 && GET_CODE (op0) == AND
2819 && CONST_INT_P (XEXP (op0, 1))
2820 && CONST_INT_P (op1)
2821 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2822 return simplify_gen_binary (IOR, mode,
2823 simplify_gen_binary
2824 (AND, mode, XEXP (op0, 0),
2825 GEN_INT (UINTVAL (XEXP (op0, 1))
2826 & ~UINTVAL (op1))),
2827 op1);
2829 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2830 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2831 the PLUS does not affect any of the bits in OP1: then we can do
2832 the IOR as a PLUS and we can associate. This is valid if OP1
2833 can be safely shifted left C bits. */
2834 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2835 && GET_CODE (XEXP (op0, 0)) == PLUS
2836 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2837 && CONST_INT_P (XEXP (op0, 1))
2838 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2840 int count = INTVAL (XEXP (op0, 1));
2841 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2843 if (mask >> count == INTVAL (trueop1)
2844 && trunc_int_for_mode (mask, mode) == mask
2845 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2846 return simplify_gen_binary (ASHIFTRT, mode,
2847 plus_constant (mode, XEXP (op0, 0),
2848 mask),
2849 XEXP (op0, 1));
2852 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2853 if (tem)
2854 return tem;
2856 tem = simplify_associative_operation (code, mode, op0, op1);
2857 if (tem)
2858 return tem;
2859 break;
2861 case XOR:
2862 if (trueop1 == CONST0_RTX (mode))
2863 return op0;
2864 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2865 return simplify_gen_unary (NOT, mode, op0, mode);
2866 if (rtx_equal_p (trueop0, trueop1)
2867 && ! side_effects_p (op0)
2868 && GET_MODE_CLASS (mode) != MODE_CC)
2869 return CONST0_RTX (mode);
2871 /* Canonicalize XOR of the most significant bit to PLUS. */
2872 if (CONST_SCALAR_INT_P (op1)
2873 && mode_signbit_p (mode, op1))
2874 return simplify_gen_binary (PLUS, mode, op0, op1);
2875 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2876 if (CONST_SCALAR_INT_P (op1)
2877 && GET_CODE (op0) == PLUS
2878 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2879 && mode_signbit_p (mode, XEXP (op0, 1)))
2880 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2881 simplify_gen_binary (XOR, mode, op1,
2882 XEXP (op0, 1)));
2884 /* If we are XORing two things that have no bits in common,
2885 convert them into an IOR. This helps to detect rotation encoded
2886 using those methods and possibly other simplifications. */
2888 if (HWI_COMPUTABLE_MODE_P (mode)
2889 && (nonzero_bits (op0, mode)
2890 & nonzero_bits (op1, mode)) == 0)
2891 return (simplify_gen_binary (IOR, mode, op0, op1));
2893 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2894 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2895 (NOT y). */
2897 int num_negated = 0;
2899 if (GET_CODE (op0) == NOT)
2900 num_negated++, op0 = XEXP (op0, 0);
2901 if (GET_CODE (op1) == NOT)
2902 num_negated++, op1 = XEXP (op1, 0);
2904 if (num_negated == 2)
2905 return simplify_gen_binary (XOR, mode, op0, op1);
2906 else if (num_negated == 1)
2907 return simplify_gen_unary (NOT, mode,
2908 simplify_gen_binary (XOR, mode, op0, op1),
2909 mode);
2912 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2913 correspond to a machine insn or result in further simplifications
2914 if B is a constant. */
2916 if (GET_CODE (op0) == AND
2917 && rtx_equal_p (XEXP (op0, 1), op1)
2918 && ! side_effects_p (op1))
2919 return simplify_gen_binary (AND, mode,
2920 simplify_gen_unary (NOT, mode,
2921 XEXP (op0, 0), mode),
2922 op1);
2924 else if (GET_CODE (op0) == AND
2925 && rtx_equal_p (XEXP (op0, 0), op1)
2926 && ! side_effects_p (op1))
2927 return simplify_gen_binary (AND, mode,
2928 simplify_gen_unary (NOT, mode,
2929 XEXP (op0, 1), mode),
2930 op1);
2932 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2933 we can transform like this:
2934 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2935 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2936 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2937 Attempt a few simplifications when B and C are both constants. */
2938 if (GET_CODE (op0) == AND
2939 && CONST_INT_P (op1)
2940 && CONST_INT_P (XEXP (op0, 1)))
2942 rtx a = XEXP (op0, 0);
2943 rtx b = XEXP (op0, 1);
2944 rtx c = op1;
2945 HOST_WIDE_INT bval = INTVAL (b);
2946 HOST_WIDE_INT cval = INTVAL (c);
2948 rtx na_c
2949 = simplify_binary_operation (AND, mode,
2950 simplify_gen_unary (NOT, mode, a, mode),
2952 if ((~cval & bval) == 0)
2954 /* Try to simplify ~A&C | ~B&C. */
2955 if (na_c != NULL_RTX)
2956 return simplify_gen_binary (IOR, mode, na_c,
2957 GEN_INT (~bval & cval));
2959 else
2961 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2962 if (na_c == const0_rtx)
2964 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2965 GEN_INT (~cval & bval));
2966 return simplify_gen_binary (IOR, mode, a_nc_b,
2967 GEN_INT (~bval & cval));
2972 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2973 comparison if STORE_FLAG_VALUE is 1. */
2974 if (STORE_FLAG_VALUE == 1
2975 && trueop1 == const1_rtx
2976 && COMPARISON_P (op0)
2977 && (reversed = reversed_comparison (op0, mode)))
2978 return reversed;
2980 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2981 is (lt foo (const_int 0)), so we can perform the above
2982 simplification if STORE_FLAG_VALUE is 1. */
2984 if (STORE_FLAG_VALUE == 1
2985 && trueop1 == const1_rtx
2986 && GET_CODE (op0) == LSHIFTRT
2987 && CONST_INT_P (XEXP (op0, 1))
2988 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2989 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2991 /* (xor (comparison foo bar) (const_int sign-bit))
2992 when STORE_FLAG_VALUE is the sign bit. */
2993 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2994 && trueop1 == const_true_rtx
2995 && COMPARISON_P (op0)
2996 && (reversed = reversed_comparison (op0, mode)))
2997 return reversed;
2999 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3000 if (tem)
3001 return tem;
3003 tem = simplify_associative_operation (code, mode, op0, op1);
3004 if (tem)
3005 return tem;
3006 break;
3008 case AND:
3009 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3010 return trueop1;
3011 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3012 return op0;
3013 if (HWI_COMPUTABLE_MODE_P (mode))
3015 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3016 HOST_WIDE_INT nzop1;
3017 if (CONST_INT_P (trueop1))
3019 HOST_WIDE_INT val1 = INTVAL (trueop1);
3020 /* If we are turning off bits already known off in OP0, we need
3021 not do an AND. */
3022 if ((nzop0 & ~val1) == 0)
3023 return op0;
3025 nzop1 = nonzero_bits (trueop1, mode);
3026 /* If we are clearing all the nonzero bits, the result is zero. */
3027 if ((nzop1 & nzop0) == 0
3028 && !side_effects_p (op0) && !side_effects_p (op1))
3029 return CONST0_RTX (mode);
3031 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3032 && GET_MODE_CLASS (mode) != MODE_CC)
3033 return op0;
3034 /* A & (~A) -> 0 */
3035 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3036 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3037 && ! side_effects_p (op0)
3038 && GET_MODE_CLASS (mode) != MODE_CC)
3039 return CONST0_RTX (mode);
3041 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3042 there are no nonzero bits of C outside of X's mode. */
3043 if ((GET_CODE (op0) == SIGN_EXTEND
3044 || GET_CODE (op0) == ZERO_EXTEND)
3045 && CONST_INT_P (trueop1)
3046 && HWI_COMPUTABLE_MODE_P (mode)
3047 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3048 & UINTVAL (trueop1)) == 0)
3050 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3051 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3052 gen_int_mode (INTVAL (trueop1),
3053 imode));
3054 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3057 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3058 we might be able to further simplify the AND with X and potentially
3059 remove the truncation altogether. */
3060 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3062 rtx x = XEXP (op0, 0);
3063 enum machine_mode xmode = GET_MODE (x);
3064 tem = simplify_gen_binary (AND, xmode, x,
3065 gen_int_mode (INTVAL (trueop1), xmode));
3066 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3069 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3070 if (GET_CODE (op0) == IOR
3071 && CONST_INT_P (trueop1)
3072 && CONST_INT_P (XEXP (op0, 1)))
3074 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3075 return simplify_gen_binary (IOR, mode,
3076 simplify_gen_binary (AND, mode,
3077 XEXP (op0, 0), op1),
3078 gen_int_mode (tmp, mode));
3081 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3082 insn (and may simplify more). */
3083 if (GET_CODE (op0) == XOR
3084 && rtx_equal_p (XEXP (op0, 0), op1)
3085 && ! side_effects_p (op1))
3086 return simplify_gen_binary (AND, mode,
3087 simplify_gen_unary (NOT, mode,
3088 XEXP (op0, 1), mode),
3089 op1);
3091 if (GET_CODE (op0) == XOR
3092 && rtx_equal_p (XEXP (op0, 1), op1)
3093 && ! side_effects_p (op1))
3094 return simplify_gen_binary (AND, mode,
3095 simplify_gen_unary (NOT, mode,
3096 XEXP (op0, 0), mode),
3097 op1);
3099 /* Similarly for (~(A ^ B)) & A. */
3100 if (GET_CODE (op0) == NOT
3101 && GET_CODE (XEXP (op0, 0)) == XOR
3102 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3103 && ! side_effects_p (op1))
3104 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3106 if (GET_CODE (op0) == NOT
3107 && GET_CODE (XEXP (op0, 0)) == XOR
3108 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3109 && ! side_effects_p (op1))
3110 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3112 /* Convert (A | B) & A to A. */
3113 if (GET_CODE (op0) == IOR
3114 && (rtx_equal_p (XEXP (op0, 0), op1)
3115 || rtx_equal_p (XEXP (op0, 1), op1))
3116 && ! side_effects_p (XEXP (op0, 0))
3117 && ! side_effects_p (XEXP (op0, 1)))
3118 return op1;
3120 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3121 ((A & N) + B) & M -> (A + B) & M
3122 Similarly if (N & M) == 0,
3123 ((A | N) + B) & M -> (A + B) & M
3124 and for - instead of + and/or ^ instead of |.
3125 Also, if (N & M) == 0, then
3126 (A +- N) & M -> A & M. */
3127 if (CONST_INT_P (trueop1)
3128 && HWI_COMPUTABLE_MODE_P (mode)
3129 && ~UINTVAL (trueop1)
3130 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3131 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3133 rtx pmop[2];
3134 int which;
3136 pmop[0] = XEXP (op0, 0);
3137 pmop[1] = XEXP (op0, 1);
3139 if (CONST_INT_P (pmop[1])
3140 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3141 return simplify_gen_binary (AND, mode, pmop[0], op1);
3143 for (which = 0; which < 2; which++)
3145 tem = pmop[which];
3146 switch (GET_CODE (tem))
3148 case AND:
3149 if (CONST_INT_P (XEXP (tem, 1))
3150 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3151 == UINTVAL (trueop1))
3152 pmop[which] = XEXP (tem, 0);
3153 break;
3154 case IOR:
3155 case XOR:
3156 if (CONST_INT_P (XEXP (tem, 1))
3157 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3158 pmop[which] = XEXP (tem, 0);
3159 break;
3160 default:
3161 break;
3165 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3167 tem = simplify_gen_binary (GET_CODE (op0), mode,
3168 pmop[0], pmop[1]);
3169 return simplify_gen_binary (code, mode, tem, op1);
3173 /* (and X (ior (not X) Y) -> (and X Y) */
3174 if (GET_CODE (op1) == IOR
3175 && GET_CODE (XEXP (op1, 0)) == NOT
3176 && op0 == XEXP (XEXP (op1, 0), 0))
3177 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3179 /* (and (ior (not X) Y) X) -> (and X Y) */
3180 if (GET_CODE (op0) == IOR
3181 && GET_CODE (XEXP (op0, 0)) == NOT
3182 && op1 == XEXP (XEXP (op0, 0), 0))
3183 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3185 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3186 if (tem)
3187 return tem;
3189 tem = simplify_associative_operation (code, mode, op0, op1);
3190 if (tem)
3191 return tem;
3192 break;
3194 case UDIV:
3195 /* 0/x is 0 (or x&0 if x has side-effects). */
3196 if (trueop0 == CONST0_RTX (mode))
3198 if (side_effects_p (op1))
3199 return simplify_gen_binary (AND, mode, op1, trueop0);
3200 return trueop0;
3202 /* x/1 is x. */
3203 if (trueop1 == CONST1_RTX (mode))
3205 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3206 if (tem)
3207 return tem;
3209 /* Convert divide by power of two into shift. */
3210 if (CONST_INT_P (trueop1)
3211 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3212 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3213 break;
3215 case DIV:
3216 /* Handle floating point and integers separately. */
3217 if (SCALAR_FLOAT_MODE_P (mode))
3219 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3220 safe for modes with NaNs, since 0.0 / 0.0 will then be
3221 NaN rather than 0.0. Nor is it safe for modes with signed
3222 zeros, since dividing 0 by a negative number gives -0.0 */
3223 if (trueop0 == CONST0_RTX (mode)
3224 && !HONOR_NANS (mode)
3225 && !HONOR_SIGNED_ZEROS (mode)
3226 && ! side_effects_p (op1))
3227 return op0;
3228 /* x/1.0 is x. */
3229 if (trueop1 == CONST1_RTX (mode)
3230 && !HONOR_SNANS (mode))
3231 return op0;
3233 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3234 && trueop1 != CONST0_RTX (mode))
3236 REAL_VALUE_TYPE d;
3237 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3239 /* x/-1.0 is -x. */
3240 if (REAL_VALUES_EQUAL (d, dconstm1)
3241 && !HONOR_SNANS (mode))
3242 return simplify_gen_unary (NEG, mode, op0, mode);
3244 /* Change FP division by a constant into multiplication.
3245 Only do this with -freciprocal-math. */
3246 if (flag_reciprocal_math
3247 && !REAL_VALUES_EQUAL (d, dconst0))
3249 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3250 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3251 return simplify_gen_binary (MULT, mode, op0, tem);
3255 else if (SCALAR_INT_MODE_P (mode))
3257 /* 0/x is 0 (or x&0 if x has side-effects). */
3258 if (trueop0 == CONST0_RTX (mode)
3259 && !cfun->can_throw_non_call_exceptions)
3261 if (side_effects_p (op1))
3262 return simplify_gen_binary (AND, mode, op1, trueop0);
3263 return trueop0;
3265 /* x/1 is x. */
3266 if (trueop1 == CONST1_RTX (mode))
3268 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3269 if (tem)
3270 return tem;
3272 /* x/-1 is -x. */
3273 if (trueop1 == constm1_rtx)
3275 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3276 if (x)
3277 return simplify_gen_unary (NEG, mode, x, mode);
3280 break;
3282 case UMOD:
3283 /* 0%x is 0 (or x&0 if x has side-effects). */
3284 if (trueop0 == CONST0_RTX (mode))
3286 if (side_effects_p (op1))
3287 return simplify_gen_binary (AND, mode, op1, trueop0);
3288 return trueop0;
3290 /* x%1 is 0 (of x&0 if x has side-effects). */
3291 if (trueop1 == CONST1_RTX (mode))
3293 if (side_effects_p (op0))
3294 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3295 return CONST0_RTX (mode);
3297 /* Implement modulus by power of two as AND. */
3298 if (CONST_INT_P (trueop1)
3299 && exact_log2 (UINTVAL (trueop1)) > 0)
3300 return simplify_gen_binary (AND, mode, op0,
3301 GEN_INT (INTVAL (op1) - 1));
3302 break;
3304 case MOD:
3305 /* 0%x is 0 (or x&0 if x has side-effects). */
3306 if (trueop0 == CONST0_RTX (mode))
3308 if (side_effects_p (op1))
3309 return simplify_gen_binary (AND, mode, op1, trueop0);
3310 return trueop0;
3312 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3313 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3315 if (side_effects_p (op0))
3316 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3317 return CONST0_RTX (mode);
3319 break;
3321 case ROTATERT:
3322 case ROTATE:
3323 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3324 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3325 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3326 amount instead. */
3327 if (CONST_INT_P (trueop1)
3328 && IN_RANGE (INTVAL (trueop1),
3329 GET_MODE_BITSIZE (mode) / 2 + (code == ROTATE),
3330 GET_MODE_BITSIZE (mode) - 1))
3331 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3332 mode, op0, GEN_INT (GET_MODE_BITSIZE (mode)
3333 - INTVAL (trueop1)));
3334 /* FALLTHRU */
3335 case ASHIFTRT:
3336 if (trueop1 == CONST0_RTX (mode))
3337 return op0;
3338 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3339 return op0;
3340 /* Rotating ~0 always results in ~0. */
3341 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3342 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3343 && ! side_effects_p (op1))
3344 return op0;
3345 canonicalize_shift:
3346 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3348 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3349 if (val != INTVAL (op1))
3350 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3352 break;
3354 case ASHIFT:
3355 case SS_ASHIFT:
3356 case US_ASHIFT:
3357 if (trueop1 == CONST0_RTX (mode))
3358 return op0;
3359 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3360 return op0;
3361 goto canonicalize_shift;
3363 case LSHIFTRT:
3364 if (trueop1 == CONST0_RTX (mode))
3365 return op0;
3366 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3367 return op0;
3368 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3369 if (GET_CODE (op0) == CLZ
3370 && CONST_INT_P (trueop1)
3371 && STORE_FLAG_VALUE == 1
3372 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3374 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3375 unsigned HOST_WIDE_INT zero_val = 0;
3377 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3378 && zero_val == GET_MODE_PRECISION (imode)
3379 && INTVAL (trueop1) == exact_log2 (zero_val))
3380 return simplify_gen_relational (EQ, mode, imode,
3381 XEXP (op0, 0), const0_rtx);
3383 goto canonicalize_shift;
3385 case SMIN:
3386 if (width <= HOST_BITS_PER_WIDE_INT
3387 && mode_signbit_p (mode, trueop1)
3388 && ! side_effects_p (op0))
3389 return op1;
3390 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3391 return op0;
3392 tem = simplify_associative_operation (code, mode, op0, op1);
3393 if (tem)
3394 return tem;
3395 break;
3397 case SMAX:
3398 if (width <= HOST_BITS_PER_WIDE_INT
3399 && CONST_INT_P (trueop1)
3400 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3401 && ! side_effects_p (op0))
3402 return op1;
3403 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3404 return op0;
3405 tem = simplify_associative_operation (code, mode, op0, op1);
3406 if (tem)
3407 return tem;
3408 break;
3410 case UMIN:
3411 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3412 return op1;
3413 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3414 return op0;
3415 tem = simplify_associative_operation (code, mode, op0, op1);
3416 if (tem)
3417 return tem;
3418 break;
3420 case UMAX:
3421 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3422 return op1;
3423 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3424 return op0;
3425 tem = simplify_associative_operation (code, mode, op0, op1);
3426 if (tem)
3427 return tem;
3428 break;
3430 case SS_PLUS:
3431 case US_PLUS:
3432 case SS_MINUS:
3433 case US_MINUS:
3434 case SS_MULT:
3435 case US_MULT:
3436 case SS_DIV:
3437 case US_DIV:
3438 /* ??? There are simplifications that can be done. */
3439 return 0;
3441 case VEC_SELECT:
3442 if (!VECTOR_MODE_P (mode))
3444 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3445 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3446 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3447 gcc_assert (XVECLEN (trueop1, 0) == 1);
3448 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3450 if (GET_CODE (trueop0) == CONST_VECTOR)
3451 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3452 (trueop1, 0, 0)));
3454 /* Extract a scalar element from a nested VEC_SELECT expression
3455 (with optional nested VEC_CONCAT expression). Some targets
3456 (i386) extract scalar element from a vector using chain of
3457 nested VEC_SELECT expressions. When input operand is a memory
3458 operand, this operation can be simplified to a simple scalar
3459 load from an offseted memory address. */
3460 if (GET_CODE (trueop0) == VEC_SELECT)
3462 rtx op0 = XEXP (trueop0, 0);
3463 rtx op1 = XEXP (trueop0, 1);
3465 enum machine_mode opmode = GET_MODE (op0);
3466 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3467 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3469 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3470 int elem;
3472 rtvec vec;
3473 rtx tmp_op, tmp;
3475 gcc_assert (GET_CODE (op1) == PARALLEL);
3476 gcc_assert (i < n_elts);
3478 /* Select element, pointed by nested selector. */
3479 elem = INTVAL (XVECEXP (op1, 0, i));
3481 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3482 if (GET_CODE (op0) == VEC_CONCAT)
3484 rtx op00 = XEXP (op0, 0);
3485 rtx op01 = XEXP (op0, 1);
3487 enum machine_mode mode00, mode01;
3488 int n_elts00, n_elts01;
3490 mode00 = GET_MODE (op00);
3491 mode01 = GET_MODE (op01);
3493 /* Find out number of elements of each operand. */
3494 if (VECTOR_MODE_P (mode00))
3496 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3497 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3499 else
3500 n_elts00 = 1;
3502 if (VECTOR_MODE_P (mode01))
3504 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3505 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3507 else
3508 n_elts01 = 1;
3510 gcc_assert (n_elts == n_elts00 + n_elts01);
3512 /* Select correct operand of VEC_CONCAT
3513 and adjust selector. */
3514 if (elem < n_elts01)
3515 tmp_op = op00;
3516 else
3518 tmp_op = op01;
3519 elem -= n_elts00;
3522 else
3523 tmp_op = op0;
3525 vec = rtvec_alloc (1);
3526 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3528 tmp = gen_rtx_fmt_ee (code, mode,
3529 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3530 return tmp;
3532 if (GET_CODE (trueop0) == VEC_DUPLICATE
3533 && GET_MODE (XEXP (trueop0, 0)) == mode)
3534 return XEXP (trueop0, 0);
3536 else
3538 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3539 gcc_assert (GET_MODE_INNER (mode)
3540 == GET_MODE_INNER (GET_MODE (trueop0)));
3541 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3543 if (GET_CODE (trueop0) == CONST_VECTOR)
3545 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3546 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3547 rtvec v = rtvec_alloc (n_elts);
3548 unsigned int i;
3550 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3551 for (i = 0; i < n_elts; i++)
3553 rtx x = XVECEXP (trueop1, 0, i);
3555 gcc_assert (CONST_INT_P (x));
3556 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3557 INTVAL (x));
3560 return gen_rtx_CONST_VECTOR (mode, v);
3563 /* Recognize the identity. */
3564 if (GET_MODE (trueop0) == mode)
3566 bool maybe_ident = true;
3567 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3569 rtx j = XVECEXP (trueop1, 0, i);
3570 if (!CONST_INT_P (j) || INTVAL (j) != i)
3572 maybe_ident = false;
3573 break;
3576 if (maybe_ident)
3577 return trueop0;
3580 /* If we build {a,b} then permute it, build the result directly. */
3581 if (XVECLEN (trueop1, 0) == 2
3582 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3583 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3584 && GET_CODE (trueop0) == VEC_CONCAT
3585 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3586 && GET_MODE (XEXP (trueop0, 0)) == mode
3587 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3588 && GET_MODE (XEXP (trueop0, 1)) == mode)
3590 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3591 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3592 rtx subop0, subop1;
3594 gcc_assert (i0 < 4 && i1 < 4);
3595 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3596 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3598 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3601 if (XVECLEN (trueop1, 0) == 2
3602 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3603 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3604 && GET_CODE (trueop0) == VEC_CONCAT
3605 && GET_MODE (trueop0) == mode)
3607 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3608 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3609 rtx subop0, subop1;
3611 gcc_assert (i0 < 2 && i1 < 2);
3612 subop0 = XEXP (trueop0, i0);
3613 subop1 = XEXP (trueop0, i1);
3615 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3619 if (XVECLEN (trueop1, 0) == 1
3620 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3621 && GET_CODE (trueop0) == VEC_CONCAT)
3623 rtx vec = trueop0;
3624 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3626 /* Try to find the element in the VEC_CONCAT. */
3627 while (GET_MODE (vec) != mode
3628 && GET_CODE (vec) == VEC_CONCAT)
3630 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3631 if (offset < vec_size)
3632 vec = XEXP (vec, 0);
3633 else
3635 offset -= vec_size;
3636 vec = XEXP (vec, 1);
3638 vec = avoid_constant_pool_reference (vec);
3641 if (GET_MODE (vec) == mode)
3642 return vec;
3645 /* If we select elements in a vec_merge that all come from the same
3646 operand, select from that operand directly. */
3647 if (GET_CODE (op0) == VEC_MERGE)
3649 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3650 if (CONST_INT_P (trueop02))
3652 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3653 bool all_operand0 = true;
3654 bool all_operand1 = true;
3655 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3657 rtx j = XVECEXP (trueop1, 0, i);
3658 if (sel & (1 << UINTVAL (j)))
3659 all_operand1 = false;
3660 else
3661 all_operand0 = false;
3663 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3664 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3665 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3666 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3670 return 0;
3671 case VEC_CONCAT:
3673 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3674 ? GET_MODE (trueop0)
3675 : GET_MODE_INNER (mode));
3676 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3677 ? GET_MODE (trueop1)
3678 : GET_MODE_INNER (mode));
3680 gcc_assert (VECTOR_MODE_P (mode));
3681 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3682 == GET_MODE_SIZE (mode));
3684 if (VECTOR_MODE_P (op0_mode))
3685 gcc_assert (GET_MODE_INNER (mode)
3686 == GET_MODE_INNER (op0_mode));
3687 else
3688 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3690 if (VECTOR_MODE_P (op1_mode))
3691 gcc_assert (GET_MODE_INNER (mode)
3692 == GET_MODE_INNER (op1_mode));
3693 else
3694 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3696 if ((GET_CODE (trueop0) == CONST_VECTOR
3697 || CONST_SCALAR_INT_P (trueop0)
3698 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3699 && (GET_CODE (trueop1) == CONST_VECTOR
3700 || CONST_SCALAR_INT_P (trueop1)
3701 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3703 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3704 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3705 rtvec v = rtvec_alloc (n_elts);
3706 unsigned int i;
3707 unsigned in_n_elts = 1;
3709 if (VECTOR_MODE_P (op0_mode))
3710 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3711 for (i = 0; i < n_elts; i++)
3713 if (i < in_n_elts)
3715 if (!VECTOR_MODE_P (op0_mode))
3716 RTVEC_ELT (v, i) = trueop0;
3717 else
3718 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3720 else
3722 if (!VECTOR_MODE_P (op1_mode))
3723 RTVEC_ELT (v, i) = trueop1;
3724 else
3725 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3726 i - in_n_elts);
3730 return gen_rtx_CONST_VECTOR (mode, v);
3733 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3734 Restrict the transformation to avoid generating a VEC_SELECT with a
3735 mode unrelated to its operand. */
3736 if (GET_CODE (trueop0) == VEC_SELECT
3737 && GET_CODE (trueop1) == VEC_SELECT
3738 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3739 && GET_MODE (XEXP (trueop0, 0)) == mode)
3741 rtx par0 = XEXP (trueop0, 1);
3742 rtx par1 = XEXP (trueop1, 1);
3743 int len0 = XVECLEN (par0, 0);
3744 int len1 = XVECLEN (par1, 0);
3745 rtvec vec = rtvec_alloc (len0 + len1);
3746 for (int i = 0; i < len0; i++)
3747 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3748 for (int i = 0; i < len1; i++)
3749 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3750 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3751 gen_rtx_PARALLEL (VOIDmode, vec));
3754 return 0;
3756 default:
3757 gcc_unreachable ();
3760 return 0;
3764 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3765 rtx op0, rtx op1)
3767 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3768 HOST_WIDE_INT val;
3769 unsigned int width = GET_MODE_PRECISION (mode);
3771 if (VECTOR_MODE_P (mode)
3772 && code != VEC_CONCAT
3773 && GET_CODE (op0) == CONST_VECTOR
3774 && GET_CODE (op1) == CONST_VECTOR)
3776 unsigned n_elts = GET_MODE_NUNITS (mode);
3777 enum machine_mode op0mode = GET_MODE (op0);
3778 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3779 enum machine_mode op1mode = GET_MODE (op1);
3780 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3781 rtvec v = rtvec_alloc (n_elts);
3782 unsigned int i;
3784 gcc_assert (op0_n_elts == n_elts);
3785 gcc_assert (op1_n_elts == n_elts);
3786 for (i = 0; i < n_elts; i++)
3788 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3789 CONST_VECTOR_ELT (op0, i),
3790 CONST_VECTOR_ELT (op1, i));
3791 if (!x)
3792 return 0;
3793 RTVEC_ELT (v, i) = x;
3796 return gen_rtx_CONST_VECTOR (mode, v);
3799 if (VECTOR_MODE_P (mode)
3800 && code == VEC_CONCAT
3801 && (CONST_SCALAR_INT_P (op0)
3802 || GET_CODE (op0) == CONST_FIXED
3803 || CONST_DOUBLE_AS_FLOAT_P (op0))
3804 && (CONST_SCALAR_INT_P (op1)
3805 || CONST_DOUBLE_AS_FLOAT_P (op1)
3806 || GET_CODE (op1) == CONST_FIXED))
3808 unsigned n_elts = GET_MODE_NUNITS (mode);
3809 rtvec v = rtvec_alloc (n_elts);
3811 gcc_assert (n_elts >= 2);
3812 if (n_elts == 2)
3814 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3815 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3817 RTVEC_ELT (v, 0) = op0;
3818 RTVEC_ELT (v, 1) = op1;
3820 else
3822 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3823 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3824 unsigned i;
3826 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3827 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3828 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3830 for (i = 0; i < op0_n_elts; ++i)
3831 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3832 for (i = 0; i < op1_n_elts; ++i)
3833 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3836 return gen_rtx_CONST_VECTOR (mode, v);
3839 if (SCALAR_FLOAT_MODE_P (mode)
3840 && CONST_DOUBLE_AS_FLOAT_P (op0)
3841 && CONST_DOUBLE_AS_FLOAT_P (op1)
3842 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3844 if (code == AND
3845 || code == IOR
3846 || code == XOR)
3848 long tmp0[4];
3849 long tmp1[4];
3850 REAL_VALUE_TYPE r;
3851 int i;
3853 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3854 GET_MODE (op0));
3855 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3856 GET_MODE (op1));
3857 for (i = 0; i < 4; i++)
3859 switch (code)
3861 case AND:
3862 tmp0[i] &= tmp1[i];
3863 break;
3864 case IOR:
3865 tmp0[i] |= tmp1[i];
3866 break;
3867 case XOR:
3868 tmp0[i] ^= tmp1[i];
3869 break;
3870 default:
3871 gcc_unreachable ();
3874 real_from_target (&r, tmp0, mode);
3875 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3877 else
3879 REAL_VALUE_TYPE f0, f1, value, result;
3880 bool inexact;
3882 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3883 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3884 real_convert (&f0, mode, &f0);
3885 real_convert (&f1, mode, &f1);
3887 if (HONOR_SNANS (mode)
3888 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3889 return 0;
3891 if (code == DIV
3892 && REAL_VALUES_EQUAL (f1, dconst0)
3893 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3894 return 0;
3896 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3897 && flag_trapping_math
3898 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3900 int s0 = REAL_VALUE_NEGATIVE (f0);
3901 int s1 = REAL_VALUE_NEGATIVE (f1);
3903 switch (code)
3905 case PLUS:
3906 /* Inf + -Inf = NaN plus exception. */
3907 if (s0 != s1)
3908 return 0;
3909 break;
3910 case MINUS:
3911 /* Inf - Inf = NaN plus exception. */
3912 if (s0 == s1)
3913 return 0;
3914 break;
3915 case DIV:
3916 /* Inf / Inf = NaN plus exception. */
3917 return 0;
3918 default:
3919 break;
3923 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3924 && flag_trapping_math
3925 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3926 || (REAL_VALUE_ISINF (f1)
3927 && REAL_VALUES_EQUAL (f0, dconst0))))
3928 /* Inf * 0 = NaN plus exception. */
3929 return 0;
3931 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3932 &f0, &f1);
3933 real_convert (&result, mode, &value);
3935 /* Don't constant fold this floating point operation if
3936 the result has overflowed and flag_trapping_math. */
3938 if (flag_trapping_math
3939 && MODE_HAS_INFINITIES (mode)
3940 && REAL_VALUE_ISINF (result)
3941 && !REAL_VALUE_ISINF (f0)
3942 && !REAL_VALUE_ISINF (f1))
3943 /* Overflow plus exception. */
3944 return 0;
3946 /* Don't constant fold this floating point operation if the
3947 result may dependent upon the run-time rounding mode and
3948 flag_rounding_math is set, or if GCC's software emulation
3949 is unable to accurately represent the result. */
3951 if ((flag_rounding_math
3952 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3953 && (inexact || !real_identical (&result, &value)))
3954 return NULL_RTX;
3956 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3960 /* We can fold some multi-word operations. */
3961 if (GET_MODE_CLASS (mode) == MODE_INT
3962 && width == HOST_BITS_PER_DOUBLE_INT
3963 && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3964 && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3966 double_int o0, o1, res, tmp;
3967 bool overflow;
3969 o0 = rtx_to_double_int (op0);
3970 o1 = rtx_to_double_int (op1);
3972 switch (code)
3974 case MINUS:
3975 /* A - B == A + (-B). */
3976 o1 = -o1;
3978 /* Fall through.... */
3980 case PLUS:
3981 res = o0 + o1;
3982 break;
3984 case MULT:
3985 res = o0 * o1;
3986 break;
3988 case DIV:
3989 res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3990 &tmp, &overflow);
3991 if (overflow)
3992 return 0;
3993 break;
3995 case MOD:
3996 tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3997 &res, &overflow);
3998 if (overflow)
3999 return 0;
4000 break;
4002 case UDIV:
4003 res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
4004 &tmp, &overflow);
4005 if (overflow)
4006 return 0;
4007 break;
4009 case UMOD:
4010 tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
4011 &res, &overflow);
4012 if (overflow)
4013 return 0;
4014 break;
4016 case AND:
4017 res = o0 & o1;
4018 break;
4020 case IOR:
4021 res = o0 | o1;
4022 break;
4024 case XOR:
4025 res = o0 ^ o1;
4026 break;
4028 case SMIN:
4029 res = o0.smin (o1);
4030 break;
4032 case SMAX:
4033 res = o0.smax (o1);
4034 break;
4036 case UMIN:
4037 res = o0.umin (o1);
4038 break;
4040 case UMAX:
4041 res = o0.umax (o1);
4042 break;
4044 case LSHIFTRT: case ASHIFTRT:
4045 case ASHIFT:
4046 case ROTATE: case ROTATERT:
4048 unsigned HOST_WIDE_INT cnt;
4050 if (SHIFT_COUNT_TRUNCATED)
4052 o1.high = 0;
4053 o1.low &= GET_MODE_PRECISION (mode) - 1;
4056 if (!o1.fits_uhwi ()
4057 || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
4058 return 0;
4060 cnt = o1.to_uhwi ();
4061 unsigned short prec = GET_MODE_PRECISION (mode);
4063 if (code == LSHIFTRT || code == ASHIFTRT)
4064 res = o0.rshift (cnt, prec, code == ASHIFTRT);
4065 else if (code == ASHIFT)
4066 res = o0.alshift (cnt, prec);
4067 else if (code == ROTATE)
4068 res = o0.lrotate (cnt, prec);
4069 else /* code == ROTATERT */
4070 res = o0.rrotate (cnt, prec);
4072 break;
4074 default:
4075 return 0;
4078 return immed_double_int_const (res, mode);
4081 if (CONST_INT_P (op0) && CONST_INT_P (op1)
4082 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
4084 /* Get the integer argument values in two forms:
4085 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
4087 arg0 = INTVAL (op0);
4088 arg1 = INTVAL (op1);
4090 if (width < HOST_BITS_PER_WIDE_INT)
4092 arg0 &= GET_MODE_MASK (mode);
4093 arg1 &= GET_MODE_MASK (mode);
4095 arg0s = arg0;
4096 if (val_signbit_known_set_p (mode, arg0s))
4097 arg0s |= ~GET_MODE_MASK (mode);
4099 arg1s = arg1;
4100 if (val_signbit_known_set_p (mode, arg1s))
4101 arg1s |= ~GET_MODE_MASK (mode);
4103 else
4105 arg0s = arg0;
4106 arg1s = arg1;
4109 /* Compute the value of the arithmetic. */
4111 switch (code)
4113 case PLUS:
4114 val = arg0s + arg1s;
4115 break;
4117 case MINUS:
4118 val = arg0s - arg1s;
4119 break;
4121 case MULT:
4122 val = arg0s * arg1s;
4123 break;
4125 case DIV:
4126 if (arg1s == 0
4127 || ((unsigned HOST_WIDE_INT) arg0s
4128 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4129 && arg1s == -1))
4130 return 0;
4131 val = arg0s / arg1s;
4132 break;
4134 case MOD:
4135 if (arg1s == 0
4136 || ((unsigned HOST_WIDE_INT) arg0s
4137 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4138 && arg1s == -1))
4139 return 0;
4140 val = arg0s % arg1s;
4141 break;
4143 case UDIV:
4144 if (arg1 == 0
4145 || ((unsigned HOST_WIDE_INT) arg0s
4146 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4147 && arg1s == -1))
4148 return 0;
4149 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
4150 break;
4152 case UMOD:
4153 if (arg1 == 0
4154 || ((unsigned HOST_WIDE_INT) arg0s
4155 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4156 && arg1s == -1))
4157 return 0;
4158 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4159 break;
4161 case AND:
4162 val = arg0 & arg1;
4163 break;
4165 case IOR:
4166 val = arg0 | arg1;
4167 break;
4169 case XOR:
4170 val = arg0 ^ arg1;
4171 break;
4173 case LSHIFTRT:
4174 case ASHIFT:
4175 case ASHIFTRT:
4176 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4177 the value is in range. We can't return any old value for
4178 out-of-range arguments because either the middle-end (via
4179 shift_truncation_mask) or the back-end might be relying on
4180 target-specific knowledge. Nor can we rely on
4181 shift_truncation_mask, since the shift might not be part of an
4182 ashlM3, lshrM3 or ashrM3 instruction. */
4183 if (SHIFT_COUNT_TRUNCATED)
4184 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
4185 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
4186 return 0;
4188 val = (code == ASHIFT
4189 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
4190 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
4192 /* Sign-extend the result for arithmetic right shifts. */
4193 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
4194 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
4195 break;
4197 case ROTATERT:
4198 if (arg1 < 0)
4199 return 0;
4201 arg1 %= width;
4202 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4203 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4204 break;
4206 case ROTATE:
4207 if (arg1 < 0)
4208 return 0;
4210 arg1 %= width;
4211 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4212 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4213 break;
4215 case COMPARE:
4216 /* Do nothing here. */
4217 return 0;
4219 case SMIN:
4220 val = arg0s <= arg1s ? arg0s : arg1s;
4221 break;
4223 case UMIN:
4224 val = ((unsigned HOST_WIDE_INT) arg0
4225 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4226 break;
4228 case SMAX:
4229 val = arg0s > arg1s ? arg0s : arg1s;
4230 break;
4232 case UMAX:
4233 val = ((unsigned HOST_WIDE_INT) arg0
4234 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4235 break;
4237 case SS_PLUS:
4238 case US_PLUS:
4239 case SS_MINUS:
4240 case US_MINUS:
4241 case SS_MULT:
4242 case US_MULT:
4243 case SS_DIV:
4244 case US_DIV:
4245 case SS_ASHIFT:
4246 case US_ASHIFT:
4247 /* ??? There are simplifications that can be done. */
4248 return 0;
4250 default:
4251 gcc_unreachable ();
4254 return gen_int_mode (val, mode);
4257 return NULL_RTX;
4262 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4263 PLUS or MINUS.
4265 Rather than test for specific case, we do this by a brute-force method
4266 and do all possible simplifications until no more changes occur. Then
4267 we rebuild the operation. */
4269 struct simplify_plus_minus_op_data
4271 rtx op;
4272 short neg;
4275 static bool
4276 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4278 int result;
4280 result = (commutative_operand_precedence (y)
4281 - commutative_operand_precedence (x));
4282 if (result)
4283 return result > 0;
4285 /* Group together equal REGs to do more simplification. */
4286 if (REG_P (x) && REG_P (y))
4287 return REGNO (x) > REGNO (y);
4288 else
4289 return false;
4292 static rtx
4293 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
4294 rtx op1)
4296 struct simplify_plus_minus_op_data ops[8];
4297 rtx result, tem;
4298 int n_ops = 2, input_ops = 2;
4299 int changed, n_constants = 0, canonicalized = 0;
4300 int i, j;
4302 memset (ops, 0, sizeof ops);
4304 /* Set up the two operands and then expand them until nothing has been
4305 changed. If we run out of room in our array, give up; this should
4306 almost never happen. */
4308 ops[0].op = op0;
4309 ops[0].neg = 0;
4310 ops[1].op = op1;
4311 ops[1].neg = (code == MINUS);
4315 changed = 0;
4317 for (i = 0; i < n_ops; i++)
4319 rtx this_op = ops[i].op;
4320 int this_neg = ops[i].neg;
4321 enum rtx_code this_code = GET_CODE (this_op);
4323 switch (this_code)
4325 case PLUS:
4326 case MINUS:
4327 if (n_ops == 7)
4328 return NULL_RTX;
4330 ops[n_ops].op = XEXP (this_op, 1);
4331 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4332 n_ops++;
4334 ops[i].op = XEXP (this_op, 0);
4335 input_ops++;
4336 changed = 1;
4337 canonicalized |= this_neg;
4338 break;
4340 case NEG:
4341 ops[i].op = XEXP (this_op, 0);
4342 ops[i].neg = ! this_neg;
4343 changed = 1;
4344 canonicalized = 1;
4345 break;
4347 case CONST:
4348 if (n_ops < 7
4349 && GET_CODE (XEXP (this_op, 0)) == PLUS
4350 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4351 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4353 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4354 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4355 ops[n_ops].neg = this_neg;
4356 n_ops++;
4357 changed = 1;
4358 canonicalized = 1;
4360 break;
4362 case NOT:
4363 /* ~a -> (-a - 1) */
4364 if (n_ops != 7)
4366 ops[n_ops].op = CONSTM1_RTX (mode);
4367 ops[n_ops++].neg = this_neg;
4368 ops[i].op = XEXP (this_op, 0);
4369 ops[i].neg = !this_neg;
4370 changed = 1;
4371 canonicalized = 1;
4373 break;
4375 case CONST_INT:
4376 n_constants++;
4377 if (this_neg)
4379 ops[i].op = neg_const_int (mode, this_op);
4380 ops[i].neg = 0;
4381 changed = 1;
4382 canonicalized = 1;
4384 break;
4386 default:
4387 break;
4391 while (changed);
4393 if (n_constants > 1)
4394 canonicalized = 1;
4396 gcc_assert (n_ops >= 2);
4398 /* If we only have two operands, we can avoid the loops. */
4399 if (n_ops == 2)
4401 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4402 rtx lhs, rhs;
4404 /* Get the two operands. Be careful with the order, especially for
4405 the cases where code == MINUS. */
4406 if (ops[0].neg && ops[1].neg)
4408 lhs = gen_rtx_NEG (mode, ops[0].op);
4409 rhs = ops[1].op;
4411 else if (ops[0].neg)
4413 lhs = ops[1].op;
4414 rhs = ops[0].op;
4416 else
4418 lhs = ops[0].op;
4419 rhs = ops[1].op;
4422 return simplify_const_binary_operation (code, mode, lhs, rhs);
4425 /* Now simplify each pair of operands until nothing changes. */
4428 /* Insertion sort is good enough for an eight-element array. */
4429 for (i = 1; i < n_ops; i++)
4431 struct simplify_plus_minus_op_data save;
4432 j = i - 1;
4433 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4434 continue;
4436 canonicalized = 1;
4437 save = ops[i];
4439 ops[j + 1] = ops[j];
4440 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4441 ops[j + 1] = save;
4444 changed = 0;
4445 for (i = n_ops - 1; i > 0; i--)
4446 for (j = i - 1; j >= 0; j--)
4448 rtx lhs = ops[j].op, rhs = ops[i].op;
4449 int lneg = ops[j].neg, rneg = ops[i].neg;
4451 if (lhs != 0 && rhs != 0)
4453 enum rtx_code ncode = PLUS;
4455 if (lneg != rneg)
4457 ncode = MINUS;
4458 if (lneg)
4459 tem = lhs, lhs = rhs, rhs = tem;
4461 else if (swap_commutative_operands_p (lhs, rhs))
4462 tem = lhs, lhs = rhs, rhs = tem;
4464 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4465 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4467 rtx tem_lhs, tem_rhs;
4469 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4470 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4471 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4473 if (tem && !CONSTANT_P (tem))
4474 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4476 else
4477 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4479 /* Reject "simplifications" that just wrap the two
4480 arguments in a CONST. Failure to do so can result
4481 in infinite recursion with simplify_binary_operation
4482 when it calls us to simplify CONST operations. */
4483 if (tem
4484 && ! (GET_CODE (tem) == CONST
4485 && GET_CODE (XEXP (tem, 0)) == ncode
4486 && XEXP (XEXP (tem, 0), 0) == lhs
4487 && XEXP (XEXP (tem, 0), 1) == rhs))
4489 lneg &= rneg;
4490 if (GET_CODE (tem) == NEG)
4491 tem = XEXP (tem, 0), lneg = !lneg;
4492 if (CONST_INT_P (tem) && lneg)
4493 tem = neg_const_int (mode, tem), lneg = 0;
4495 ops[i].op = tem;
4496 ops[i].neg = lneg;
4497 ops[j].op = NULL_RTX;
4498 changed = 1;
4499 canonicalized = 1;
4504 /* If nothing changed, fail. */
4505 if (!canonicalized)
4506 return NULL_RTX;
4508 /* Pack all the operands to the lower-numbered entries. */
4509 for (i = 0, j = 0; j < n_ops; j++)
4510 if (ops[j].op)
4512 ops[i] = ops[j];
4513 i++;
4515 n_ops = i;
4517 while (changed);
4519 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4520 if (n_ops == 2
4521 && CONST_INT_P (ops[1].op)
4522 && CONSTANT_P (ops[0].op)
4523 && ops[0].neg)
4524 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4526 /* We suppressed creation of trivial CONST expressions in the
4527 combination loop to avoid recursion. Create one manually now.
4528 The combination loop should have ensured that there is exactly
4529 one CONST_INT, and the sort will have ensured that it is last
4530 in the array and that any other constant will be next-to-last. */
4532 if (n_ops > 1
4533 && CONST_INT_P (ops[n_ops - 1].op)
4534 && CONSTANT_P (ops[n_ops - 2].op))
4536 rtx value = ops[n_ops - 1].op;
4537 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4538 value = neg_const_int (mode, value);
4539 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4540 INTVAL (value));
4541 n_ops--;
4544 /* Put a non-negated operand first, if possible. */
4546 for (i = 0; i < n_ops && ops[i].neg; i++)
4547 continue;
4548 if (i == n_ops)
4549 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4550 else if (i != 0)
4552 tem = ops[0].op;
4553 ops[0] = ops[i];
4554 ops[i].op = tem;
4555 ops[i].neg = 1;
4558 /* Now make the result by performing the requested operations. */
4559 result = ops[0].op;
4560 for (i = 1; i < n_ops; i++)
4561 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4562 mode, result, ops[i].op);
4564 return result;
4567 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4568 static bool
4569 plus_minus_operand_p (const_rtx x)
4571 return GET_CODE (x) == PLUS
4572 || GET_CODE (x) == MINUS
4573 || (GET_CODE (x) == CONST
4574 && GET_CODE (XEXP (x, 0)) == PLUS
4575 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4576 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4579 /* Like simplify_binary_operation except used for relational operators.
4580 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4581 not also be VOIDmode.
4583 CMP_MODE specifies in which mode the comparison is done in, so it is
4584 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4585 the operands or, if both are VOIDmode, the operands are compared in
4586 "infinite precision". */
4588 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4589 enum machine_mode cmp_mode, rtx op0, rtx op1)
4591 rtx tem, trueop0, trueop1;
4593 if (cmp_mode == VOIDmode)
4594 cmp_mode = GET_MODE (op0);
4595 if (cmp_mode == VOIDmode)
4596 cmp_mode = GET_MODE (op1);
4598 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4599 if (tem)
4601 if (SCALAR_FLOAT_MODE_P (mode))
4603 if (tem == const0_rtx)
4604 return CONST0_RTX (mode);
4605 #ifdef FLOAT_STORE_FLAG_VALUE
4607 REAL_VALUE_TYPE val;
4608 val = FLOAT_STORE_FLAG_VALUE (mode);
4609 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4611 #else
4612 return NULL_RTX;
4613 #endif
4615 if (VECTOR_MODE_P (mode))
4617 if (tem == const0_rtx)
4618 return CONST0_RTX (mode);
4619 #ifdef VECTOR_STORE_FLAG_VALUE
4621 int i, units;
4622 rtvec v;
4624 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4625 if (val == NULL_RTX)
4626 return NULL_RTX;
4627 if (val == const1_rtx)
4628 return CONST1_RTX (mode);
4630 units = GET_MODE_NUNITS (mode);
4631 v = rtvec_alloc (units);
4632 for (i = 0; i < units; i++)
4633 RTVEC_ELT (v, i) = val;
4634 return gen_rtx_raw_CONST_VECTOR (mode, v);
4636 #else
4637 return NULL_RTX;
4638 #endif
4641 return tem;
4644 /* For the following tests, ensure const0_rtx is op1. */
4645 if (swap_commutative_operands_p (op0, op1)
4646 || (op0 == const0_rtx && op1 != const0_rtx))
4647 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4649 /* If op0 is a compare, extract the comparison arguments from it. */
4650 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4651 return simplify_gen_relational (code, mode, VOIDmode,
4652 XEXP (op0, 0), XEXP (op0, 1));
4654 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4655 || CC0_P (op0))
4656 return NULL_RTX;
4658 trueop0 = avoid_constant_pool_reference (op0);
4659 trueop1 = avoid_constant_pool_reference (op1);
4660 return simplify_relational_operation_1 (code, mode, cmp_mode,
4661 trueop0, trueop1);
4664 /* This part of simplify_relational_operation is only used when CMP_MODE
4665 is not in class MODE_CC (i.e. it is a real comparison).
4667 MODE is the mode of the result, while CMP_MODE specifies in which
4668 mode the comparison is done in, so it is the mode of the operands. */
4670 static rtx
4671 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4672 enum machine_mode cmp_mode, rtx op0, rtx op1)
4674 enum rtx_code op0code = GET_CODE (op0);
4676 if (op1 == const0_rtx && COMPARISON_P (op0))
4678 /* If op0 is a comparison, extract the comparison arguments
4679 from it. */
4680 if (code == NE)
4682 if (GET_MODE (op0) == mode)
4683 return simplify_rtx (op0);
4684 else
4685 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4686 XEXP (op0, 0), XEXP (op0, 1));
4688 else if (code == EQ)
4690 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4691 if (new_code != UNKNOWN)
4692 return simplify_gen_relational (new_code, mode, VOIDmode,
4693 XEXP (op0, 0), XEXP (op0, 1));
4697 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4698 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4699 if ((code == LTU || code == GEU)
4700 && GET_CODE (op0) == PLUS
4701 && CONST_INT_P (XEXP (op0, 1))
4702 && (rtx_equal_p (op1, XEXP (op0, 0))
4703 || rtx_equal_p (op1, XEXP (op0, 1)))
4704 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4705 && XEXP (op0, 1) != const0_rtx)
4707 rtx new_cmp
4708 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4709 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4710 cmp_mode, XEXP (op0, 0), new_cmp);
4713 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4714 if ((code == LTU || code == GEU)
4715 && GET_CODE (op0) == PLUS
4716 && rtx_equal_p (op1, XEXP (op0, 1))
4717 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4718 && !rtx_equal_p (op1, XEXP (op0, 0)))
4719 return simplify_gen_relational (code, mode, cmp_mode, op0,
4720 copy_rtx (XEXP (op0, 0)));
4722 if (op1 == const0_rtx)
4724 /* Canonicalize (GTU x 0) as (NE x 0). */
4725 if (code == GTU)
4726 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4727 /* Canonicalize (LEU x 0) as (EQ x 0). */
4728 if (code == LEU)
4729 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4731 else if (op1 == const1_rtx)
4733 switch (code)
4735 case GE:
4736 /* Canonicalize (GE x 1) as (GT x 0). */
4737 return simplify_gen_relational (GT, mode, cmp_mode,
4738 op0, const0_rtx);
4739 case GEU:
4740 /* Canonicalize (GEU x 1) as (NE x 0). */
4741 return simplify_gen_relational (NE, mode, cmp_mode,
4742 op0, const0_rtx);
4743 case LT:
4744 /* Canonicalize (LT x 1) as (LE x 0). */
4745 return simplify_gen_relational (LE, mode, cmp_mode,
4746 op0, const0_rtx);
4747 case LTU:
4748 /* Canonicalize (LTU x 1) as (EQ x 0). */
4749 return simplify_gen_relational (EQ, mode, cmp_mode,
4750 op0, const0_rtx);
4751 default:
4752 break;
4755 else if (op1 == constm1_rtx)
4757 /* Canonicalize (LE x -1) as (LT x 0). */
4758 if (code == LE)
4759 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4760 /* Canonicalize (GT x -1) as (GE x 0). */
4761 if (code == GT)
4762 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4765 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4766 if ((code == EQ || code == NE)
4767 && (op0code == PLUS || op0code == MINUS)
4768 && CONSTANT_P (op1)
4769 && CONSTANT_P (XEXP (op0, 1))
4770 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4772 rtx x = XEXP (op0, 0);
4773 rtx c = XEXP (op0, 1);
4774 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4775 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4777 /* Detect an infinite recursive condition, where we oscillate at this
4778 simplification case between:
4779 A + B == C <---> C - B == A,
4780 where A, B, and C are all constants with non-simplifiable expressions,
4781 usually SYMBOL_REFs. */
4782 if (GET_CODE (tem) == invcode
4783 && CONSTANT_P (x)
4784 && rtx_equal_p (c, XEXP (tem, 1)))
4785 return NULL_RTX;
4787 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4790 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4791 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4792 if (code == NE
4793 && op1 == const0_rtx
4794 && GET_MODE_CLASS (mode) == MODE_INT
4795 && cmp_mode != VOIDmode
4796 /* ??? Work-around BImode bugs in the ia64 backend. */
4797 && mode != BImode
4798 && cmp_mode != BImode
4799 && nonzero_bits (op0, cmp_mode) == 1
4800 && STORE_FLAG_VALUE == 1)
4801 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4802 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4803 : lowpart_subreg (mode, op0, cmp_mode);
4805 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4806 if ((code == EQ || code == NE)
4807 && op1 == const0_rtx
4808 && op0code == XOR)
4809 return simplify_gen_relational (code, mode, cmp_mode,
4810 XEXP (op0, 0), XEXP (op0, 1));
4812 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4813 if ((code == EQ || code == NE)
4814 && op0code == XOR
4815 && rtx_equal_p (XEXP (op0, 0), op1)
4816 && !side_effects_p (XEXP (op0, 0)))
4817 return simplify_gen_relational (code, mode, cmp_mode,
4818 XEXP (op0, 1), const0_rtx);
4820 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4821 if ((code == EQ || code == NE)
4822 && op0code == XOR
4823 && rtx_equal_p (XEXP (op0, 1), op1)
4824 && !side_effects_p (XEXP (op0, 1)))
4825 return simplify_gen_relational (code, mode, cmp_mode,
4826 XEXP (op0, 0), const0_rtx);
4828 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4829 if ((code == EQ || code == NE)
4830 && op0code == XOR
4831 && CONST_SCALAR_INT_P (op1)
4832 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4833 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4834 simplify_gen_binary (XOR, cmp_mode,
4835 XEXP (op0, 1), op1));
4837 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4838 if ((code == EQ || code == NE)
4839 && GET_CODE (op0) == BSWAP
4840 && CONST_SCALAR_INT_P (op1))
4841 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4842 simplify_gen_unary (BSWAP, cmp_mode,
4843 op1, cmp_mode));
4845 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4846 if ((code == EQ || code == NE)
4847 && GET_CODE (op0) == BSWAP
4848 && GET_CODE (op1) == BSWAP)
4849 return simplify_gen_relational (code, mode, cmp_mode,
4850 XEXP (op0, 0), XEXP (op1, 0));
4852 if (op0code == POPCOUNT && op1 == const0_rtx)
4853 switch (code)
4855 case EQ:
4856 case LE:
4857 case LEU:
4858 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4859 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4860 XEXP (op0, 0), const0_rtx);
4862 case NE:
4863 case GT:
4864 case GTU:
4865 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4866 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4867 XEXP (op0, 0), const0_rtx);
4869 default:
4870 break;
4873 return NULL_RTX;
4876 enum
4878 CMP_EQ = 1,
4879 CMP_LT = 2,
4880 CMP_GT = 4,
4881 CMP_LTU = 8,
4882 CMP_GTU = 16
4886 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4887 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4888 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4889 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4890 For floating-point comparisons, assume that the operands were ordered. */
4892 static rtx
4893 comparison_result (enum rtx_code code, int known_results)
4895 switch (code)
4897 case EQ:
4898 case UNEQ:
4899 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4900 case NE:
4901 case LTGT:
4902 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4904 case LT:
4905 case UNLT:
4906 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4907 case GE:
4908 case UNGE:
4909 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4911 case GT:
4912 case UNGT:
4913 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4914 case LE:
4915 case UNLE:
4916 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4918 case LTU:
4919 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4920 case GEU:
4921 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4923 case GTU:
4924 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4925 case LEU:
4926 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4928 case ORDERED:
4929 return const_true_rtx;
4930 case UNORDERED:
4931 return const0_rtx;
4932 default:
4933 gcc_unreachable ();
4937 /* Check if the given comparison (done in the given MODE) is actually a
4938 tautology or a contradiction.
4939 If no simplification is possible, this function returns zero.
4940 Otherwise, it returns either const_true_rtx or const0_rtx. */
4943 simplify_const_relational_operation (enum rtx_code code,
4944 enum machine_mode mode,
4945 rtx op0, rtx op1)
4947 rtx tem;
4948 rtx trueop0;
4949 rtx trueop1;
4951 gcc_assert (mode != VOIDmode
4952 || (GET_MODE (op0) == VOIDmode
4953 && GET_MODE (op1) == VOIDmode));
4955 /* If op0 is a compare, extract the comparison arguments from it. */
4956 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4958 op1 = XEXP (op0, 1);
4959 op0 = XEXP (op0, 0);
4961 if (GET_MODE (op0) != VOIDmode)
4962 mode = GET_MODE (op0);
4963 else if (GET_MODE (op1) != VOIDmode)
4964 mode = GET_MODE (op1);
4965 else
4966 return 0;
4969 /* We can't simplify MODE_CC values since we don't know what the
4970 actual comparison is. */
4971 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4972 return 0;
4974 /* Make sure the constant is second. */
4975 if (swap_commutative_operands_p (op0, op1))
4977 tem = op0, op0 = op1, op1 = tem;
4978 code = swap_condition (code);
4981 trueop0 = avoid_constant_pool_reference (op0);
4982 trueop1 = avoid_constant_pool_reference (op1);
4984 /* For integer comparisons of A and B maybe we can simplify A - B and can
4985 then simplify a comparison of that with zero. If A and B are both either
4986 a register or a CONST_INT, this can't help; testing for these cases will
4987 prevent infinite recursion here and speed things up.
4989 We can only do this for EQ and NE comparisons as otherwise we may
4990 lose or introduce overflow which we cannot disregard as undefined as
4991 we do not know the signedness of the operation on either the left or
4992 the right hand side of the comparison. */
4994 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4995 && (code == EQ || code == NE)
4996 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4997 && (REG_P (op1) || CONST_INT_P (trueop1)))
4998 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4999 /* We cannot do this if tem is a nonzero address. */
5000 && ! nonzero_address_p (tem))
5001 return simplify_const_relational_operation (signed_condition (code),
5002 mode, tem, const0_rtx);
5004 if (! HONOR_NANS (mode) && code == ORDERED)
5005 return const_true_rtx;
5007 if (! HONOR_NANS (mode) && code == UNORDERED)
5008 return const0_rtx;
5010 /* For modes without NaNs, if the two operands are equal, we know the
5011 result except if they have side-effects. Even with NaNs we know
5012 the result of unordered comparisons and, if signaling NaNs are
5013 irrelevant, also the result of LT/GT/LTGT. */
5014 if ((! HONOR_NANS (GET_MODE (trueop0))
5015 || code == UNEQ || code == UNLE || code == UNGE
5016 || ((code == LT || code == GT || code == LTGT)
5017 && ! HONOR_SNANS (GET_MODE (trueop0))))
5018 && rtx_equal_p (trueop0, trueop1)
5019 && ! side_effects_p (trueop0))
5020 return comparison_result (code, CMP_EQ);
5022 /* If the operands are floating-point constants, see if we can fold
5023 the result. */
5024 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5025 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5026 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5028 REAL_VALUE_TYPE d0, d1;
5030 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
5031 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
5033 /* Comparisons are unordered iff at least one of the values is NaN. */
5034 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
5035 switch (code)
5037 case UNEQ:
5038 case UNLT:
5039 case UNGT:
5040 case UNLE:
5041 case UNGE:
5042 case NE:
5043 case UNORDERED:
5044 return const_true_rtx;
5045 case EQ:
5046 case LT:
5047 case GT:
5048 case LE:
5049 case GE:
5050 case LTGT:
5051 case ORDERED:
5052 return const0_rtx;
5053 default:
5054 return 0;
5057 return comparison_result (code,
5058 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
5059 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
5062 /* Otherwise, see if the operands are both integers. */
5063 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5064 && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
5065 && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
5067 int width = GET_MODE_PRECISION (mode);
5068 HOST_WIDE_INT l0s, h0s, l1s, h1s;
5069 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
5071 /* Get the two words comprising each integer constant. */
5072 if (CONST_DOUBLE_AS_INT_P (trueop0))
5074 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
5075 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
5077 else
5079 l0u = l0s = INTVAL (trueop0);
5080 h0u = h0s = HWI_SIGN_EXTEND (l0s);
5083 if (CONST_DOUBLE_AS_INT_P (trueop1))
5085 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
5086 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
5088 else
5090 l1u = l1s = INTVAL (trueop1);
5091 h1u = h1s = HWI_SIGN_EXTEND (l1s);
5094 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
5095 we have to sign or zero-extend the values. */
5096 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
5098 l0u &= GET_MODE_MASK (mode);
5099 l1u &= GET_MODE_MASK (mode);
5101 if (val_signbit_known_set_p (mode, l0s))
5102 l0s |= ~GET_MODE_MASK (mode);
5104 if (val_signbit_known_set_p (mode, l1s))
5105 l1s |= ~GET_MODE_MASK (mode);
5107 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
5108 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
5110 if (h0u == h1u && l0u == l1u)
5111 return comparison_result (code, CMP_EQ);
5112 else
5114 int cr;
5115 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
5116 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
5117 return comparison_result (code, cr);
5121 /* Optimize comparisons with upper and lower bounds. */
5122 if (HWI_COMPUTABLE_MODE_P (mode)
5123 && CONST_INT_P (trueop1))
5125 int sign;
5126 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5127 HOST_WIDE_INT val = INTVAL (trueop1);
5128 HOST_WIDE_INT mmin, mmax;
5130 if (code == GEU
5131 || code == LEU
5132 || code == GTU
5133 || code == LTU)
5134 sign = 0;
5135 else
5136 sign = 1;
5138 /* Get a reduced range if the sign bit is zero. */
5139 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5141 mmin = 0;
5142 mmax = nonzero;
5144 else
5146 rtx mmin_rtx, mmax_rtx;
5147 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5149 mmin = INTVAL (mmin_rtx);
5150 mmax = INTVAL (mmax_rtx);
5151 if (sign)
5153 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5155 mmin >>= (sign_copies - 1);
5156 mmax >>= (sign_copies - 1);
5160 switch (code)
5162 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5163 case GEU:
5164 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5165 return const_true_rtx;
5166 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5167 return const0_rtx;
5168 break;
5169 case GE:
5170 if (val <= mmin)
5171 return const_true_rtx;
5172 if (val > mmax)
5173 return const0_rtx;
5174 break;
5176 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5177 case LEU:
5178 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5179 return const_true_rtx;
5180 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5181 return const0_rtx;
5182 break;
5183 case LE:
5184 if (val >= mmax)
5185 return const_true_rtx;
5186 if (val < mmin)
5187 return const0_rtx;
5188 break;
5190 case EQ:
5191 /* x == y is always false for y out of range. */
5192 if (val < mmin || val > mmax)
5193 return const0_rtx;
5194 break;
5196 /* x > y is always false for y >= mmax, always true for y < mmin. */
5197 case GTU:
5198 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5199 return const0_rtx;
5200 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5201 return const_true_rtx;
5202 break;
5203 case GT:
5204 if (val >= mmax)
5205 return const0_rtx;
5206 if (val < mmin)
5207 return const_true_rtx;
5208 break;
5210 /* x < y is always false for y <= mmin, always true for y > mmax. */
5211 case LTU:
5212 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5213 return const0_rtx;
5214 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5215 return const_true_rtx;
5216 break;
5217 case LT:
5218 if (val <= mmin)
5219 return const0_rtx;
5220 if (val > mmax)
5221 return const_true_rtx;
5222 break;
5224 case NE:
5225 /* x != y is always true for y out of range. */
5226 if (val < mmin || val > mmax)
5227 return const_true_rtx;
5228 break;
5230 default:
5231 break;
5235 /* Optimize integer comparisons with zero. */
5236 if (trueop1 == const0_rtx)
5238 /* Some addresses are known to be nonzero. We don't know
5239 their sign, but equality comparisons are known. */
5240 if (nonzero_address_p (trueop0))
5242 if (code == EQ || code == LEU)
5243 return const0_rtx;
5244 if (code == NE || code == GTU)
5245 return const_true_rtx;
5248 /* See if the first operand is an IOR with a constant. If so, we
5249 may be able to determine the result of this comparison. */
5250 if (GET_CODE (op0) == IOR)
5252 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5253 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5255 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5256 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5257 && (UINTVAL (inner_const)
5258 & ((unsigned HOST_WIDE_INT) 1
5259 << sign_bitnum)));
5261 switch (code)
5263 case EQ:
5264 case LEU:
5265 return const0_rtx;
5266 case NE:
5267 case GTU:
5268 return const_true_rtx;
5269 case LT:
5270 case LE:
5271 if (has_sign)
5272 return const_true_rtx;
5273 break;
5274 case GT:
5275 case GE:
5276 if (has_sign)
5277 return const0_rtx;
5278 break;
5279 default:
5280 break;
5286 /* Optimize comparison of ABS with zero. */
5287 if (trueop1 == CONST0_RTX (mode)
5288 && (GET_CODE (trueop0) == ABS
5289 || (GET_CODE (trueop0) == FLOAT_EXTEND
5290 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5292 switch (code)
5294 case LT:
5295 /* Optimize abs(x) < 0.0. */
5296 if (!HONOR_SNANS (mode)
5297 && (!INTEGRAL_MODE_P (mode)
5298 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5300 if (INTEGRAL_MODE_P (mode)
5301 && (issue_strict_overflow_warning
5302 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5303 warning (OPT_Wstrict_overflow,
5304 ("assuming signed overflow does not occur when "
5305 "assuming abs (x) < 0 is false"));
5306 return const0_rtx;
5308 break;
5310 case GE:
5311 /* Optimize abs(x) >= 0.0. */
5312 if (!HONOR_NANS (mode)
5313 && (!INTEGRAL_MODE_P (mode)
5314 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5316 if (INTEGRAL_MODE_P (mode)
5317 && (issue_strict_overflow_warning
5318 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5319 warning (OPT_Wstrict_overflow,
5320 ("assuming signed overflow does not occur when "
5321 "assuming abs (x) >= 0 is true"));
5322 return const_true_rtx;
5324 break;
5326 case UNGE:
5327 /* Optimize ! (abs(x) < 0.0). */
5328 return const_true_rtx;
5330 default:
5331 break;
5335 return 0;
5338 /* Simplify CODE, an operation with result mode MODE and three operands,
5339 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5340 a constant. Return 0 if no simplifications is possible. */
5343 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
5344 enum machine_mode op0_mode, rtx op0, rtx op1,
5345 rtx op2)
5347 unsigned int width = GET_MODE_PRECISION (mode);
5348 bool any_change = false;
5349 rtx tem, trueop2;
5351 /* VOIDmode means "infinite" precision. */
5352 if (width == 0)
5353 width = HOST_BITS_PER_WIDE_INT;
5355 switch (code)
5357 case FMA:
5358 /* Simplify negations around the multiplication. */
5359 /* -a * -b + c => a * b + c. */
5360 if (GET_CODE (op0) == NEG)
5362 tem = simplify_unary_operation (NEG, mode, op1, mode);
5363 if (tem)
5364 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5366 else if (GET_CODE (op1) == NEG)
5368 tem = simplify_unary_operation (NEG, mode, op0, mode);
5369 if (tem)
5370 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5373 /* Canonicalize the two multiplication operands. */
5374 /* a * -b + c => -b * a + c. */
5375 if (swap_commutative_operands_p (op0, op1))
5376 tem = op0, op0 = op1, op1 = tem, any_change = true;
5378 if (any_change)
5379 return gen_rtx_FMA (mode, op0, op1, op2);
5380 return NULL_RTX;
5382 case SIGN_EXTRACT:
5383 case ZERO_EXTRACT:
5384 if (CONST_INT_P (op0)
5385 && CONST_INT_P (op1)
5386 && CONST_INT_P (op2)
5387 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5388 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5390 /* Extracting a bit-field from a constant */
5391 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5392 HOST_WIDE_INT op1val = INTVAL (op1);
5393 HOST_WIDE_INT op2val = INTVAL (op2);
5394 if (BITS_BIG_ENDIAN)
5395 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5396 else
5397 val >>= op2val;
5399 if (HOST_BITS_PER_WIDE_INT != op1val)
5401 /* First zero-extend. */
5402 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5403 /* If desired, propagate sign bit. */
5404 if (code == SIGN_EXTRACT
5405 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5406 != 0)
5407 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5410 return gen_int_mode (val, mode);
5412 break;
5414 case IF_THEN_ELSE:
5415 if (CONST_INT_P (op0))
5416 return op0 != const0_rtx ? op1 : op2;
5418 /* Convert c ? a : a into "a". */
5419 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5420 return op1;
5422 /* Convert a != b ? a : b into "a". */
5423 if (GET_CODE (op0) == NE
5424 && ! side_effects_p (op0)
5425 && ! HONOR_NANS (mode)
5426 && ! HONOR_SIGNED_ZEROS (mode)
5427 && ((rtx_equal_p (XEXP (op0, 0), op1)
5428 && rtx_equal_p (XEXP (op0, 1), op2))
5429 || (rtx_equal_p (XEXP (op0, 0), op2)
5430 && rtx_equal_p (XEXP (op0, 1), op1))))
5431 return op1;
5433 /* Convert a == b ? a : b into "b". */
5434 if (GET_CODE (op0) == EQ
5435 && ! side_effects_p (op0)
5436 && ! HONOR_NANS (mode)
5437 && ! HONOR_SIGNED_ZEROS (mode)
5438 && ((rtx_equal_p (XEXP (op0, 0), op1)
5439 && rtx_equal_p (XEXP (op0, 1), op2))
5440 || (rtx_equal_p (XEXP (op0, 0), op2)
5441 && rtx_equal_p (XEXP (op0, 1), op1))))
5442 return op2;
5444 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5446 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5447 ? GET_MODE (XEXP (op0, 1))
5448 : GET_MODE (XEXP (op0, 0)));
5449 rtx temp;
5451 /* Look for happy constants in op1 and op2. */
5452 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5454 HOST_WIDE_INT t = INTVAL (op1);
5455 HOST_WIDE_INT f = INTVAL (op2);
5457 if (t == STORE_FLAG_VALUE && f == 0)
5458 code = GET_CODE (op0);
5459 else if (t == 0 && f == STORE_FLAG_VALUE)
5461 enum rtx_code tmp;
5462 tmp = reversed_comparison_code (op0, NULL_RTX);
5463 if (tmp == UNKNOWN)
5464 break;
5465 code = tmp;
5467 else
5468 break;
5470 return simplify_gen_relational (code, mode, cmp_mode,
5471 XEXP (op0, 0), XEXP (op0, 1));
5474 if (cmp_mode == VOIDmode)
5475 cmp_mode = op0_mode;
5476 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5477 cmp_mode, XEXP (op0, 0),
5478 XEXP (op0, 1));
5480 /* See if any simplifications were possible. */
5481 if (temp)
5483 if (CONST_INT_P (temp))
5484 return temp == const0_rtx ? op2 : op1;
5485 else if (temp)
5486 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5489 break;
5491 case VEC_MERGE:
5492 gcc_assert (GET_MODE (op0) == mode);
5493 gcc_assert (GET_MODE (op1) == mode);
5494 gcc_assert (VECTOR_MODE_P (mode));
5495 trueop2 = avoid_constant_pool_reference (op2);
5496 if (CONST_INT_P (trueop2))
5498 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5499 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5500 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5501 unsigned HOST_WIDE_INT mask;
5502 if (n_elts == HOST_BITS_PER_WIDE_INT)
5503 mask = -1;
5504 else
5505 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5507 if (!(sel & mask) && !side_effects_p (op0))
5508 return op1;
5509 if ((sel & mask) == mask && !side_effects_p (op1))
5510 return op0;
5512 rtx trueop0 = avoid_constant_pool_reference (op0);
5513 rtx trueop1 = avoid_constant_pool_reference (op1);
5514 if (GET_CODE (trueop0) == CONST_VECTOR
5515 && GET_CODE (trueop1) == CONST_VECTOR)
5517 rtvec v = rtvec_alloc (n_elts);
5518 unsigned int i;
5520 for (i = 0; i < n_elts; i++)
5521 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5522 ? CONST_VECTOR_ELT (trueop0, i)
5523 : CONST_VECTOR_ELT (trueop1, i));
5524 return gen_rtx_CONST_VECTOR (mode, v);
5527 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5528 if no element from a appears in the result. */
5529 if (GET_CODE (op0) == VEC_MERGE)
5531 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5532 if (CONST_INT_P (tem))
5534 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5535 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5536 return simplify_gen_ternary (code, mode, mode,
5537 XEXP (op0, 1), op1, op2);
5538 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5539 return simplify_gen_ternary (code, mode, mode,
5540 XEXP (op0, 0), op1, op2);
5543 if (GET_CODE (op1) == VEC_MERGE)
5545 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5546 if (CONST_INT_P (tem))
5548 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5549 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5550 return simplify_gen_ternary (code, mode, mode,
5551 op0, XEXP (op1, 1), op2);
5552 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5553 return simplify_gen_ternary (code, mode, mode,
5554 op0, XEXP (op1, 0), op2);
5559 if (rtx_equal_p (op0, op1)
5560 && !side_effects_p (op2) && !side_effects_p (op1))
5561 return op0;
5563 break;
5565 default:
5566 gcc_unreachable ();
5569 return 0;
5572 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5573 or CONST_VECTOR,
5574 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5576 Works by unpacking OP into a collection of 8-bit values
5577 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5578 and then repacking them again for OUTERMODE. */
5580 static rtx
5581 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5582 enum machine_mode innermode, unsigned int byte)
5584 /* We support up to 512-bit values (for V8DFmode). */
5585 enum {
5586 max_bitsize = 512,
5587 value_bit = 8,
5588 value_mask = (1 << value_bit) - 1
5590 unsigned char value[max_bitsize / value_bit];
5591 int value_start;
5592 int i;
5593 int elem;
5595 int num_elem;
5596 rtx * elems;
5597 int elem_bitsize;
5598 rtx result_s;
5599 rtvec result_v = NULL;
5600 enum mode_class outer_class;
5601 enum machine_mode outer_submode;
5603 /* Some ports misuse CCmode. */
5604 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5605 return op;
5607 /* We have no way to represent a complex constant at the rtl level. */
5608 if (COMPLEX_MODE_P (outermode))
5609 return NULL_RTX;
5611 /* Unpack the value. */
5613 if (GET_CODE (op) == CONST_VECTOR)
5615 num_elem = CONST_VECTOR_NUNITS (op);
5616 elems = &CONST_VECTOR_ELT (op, 0);
5617 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5619 else
5621 num_elem = 1;
5622 elems = &op;
5623 elem_bitsize = max_bitsize;
5625 /* If this asserts, it is too complicated; reducing value_bit may help. */
5626 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5627 /* I don't know how to handle endianness of sub-units. */
5628 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5630 for (elem = 0; elem < num_elem; elem++)
5632 unsigned char * vp;
5633 rtx el = elems[elem];
5635 /* Vectors are kept in target memory order. (This is probably
5636 a mistake.) */
5638 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5639 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5640 / BITS_PER_UNIT);
5641 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5642 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5643 unsigned bytele = (subword_byte % UNITS_PER_WORD
5644 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5645 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5648 switch (GET_CODE (el))
5650 case CONST_INT:
5651 for (i = 0;
5652 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5653 i += value_bit)
5654 *vp++ = INTVAL (el) >> i;
5655 /* CONST_INTs are always logically sign-extended. */
5656 for (; i < elem_bitsize; i += value_bit)
5657 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5658 break;
5660 case CONST_DOUBLE:
5661 if (GET_MODE (el) == VOIDmode)
5663 unsigned char extend = 0;
5664 /* If this triggers, someone should have generated a
5665 CONST_INT instead. */
5666 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5668 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5669 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5670 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5672 *vp++
5673 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5674 i += value_bit;
5677 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5678 extend = -1;
5679 for (; i < elem_bitsize; i += value_bit)
5680 *vp++ = extend;
5682 else
5684 long tmp[max_bitsize / 32];
5685 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5687 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5688 gcc_assert (bitsize <= elem_bitsize);
5689 gcc_assert (bitsize % value_bit == 0);
5691 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5692 GET_MODE (el));
5694 /* real_to_target produces its result in words affected by
5695 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5696 and use WORDS_BIG_ENDIAN instead; see the documentation
5697 of SUBREG in rtl.texi. */
5698 for (i = 0; i < bitsize; i += value_bit)
5700 int ibase;
5701 if (WORDS_BIG_ENDIAN)
5702 ibase = bitsize - 1 - i;
5703 else
5704 ibase = i;
5705 *vp++ = tmp[ibase / 32] >> i % 32;
5708 /* It shouldn't matter what's done here, so fill it with
5709 zero. */
5710 for (; i < elem_bitsize; i += value_bit)
5711 *vp++ = 0;
5713 break;
5715 case CONST_FIXED:
5716 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5718 for (i = 0; i < elem_bitsize; i += value_bit)
5719 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5721 else
5723 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5724 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5725 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5726 i += value_bit)
5727 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5728 >> (i - HOST_BITS_PER_WIDE_INT);
5729 for (; i < elem_bitsize; i += value_bit)
5730 *vp++ = 0;
5732 break;
5734 default:
5735 gcc_unreachable ();
5739 /* Now, pick the right byte to start with. */
5740 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5741 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5742 will already have offset 0. */
5743 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5745 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5746 - byte);
5747 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5748 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5749 byte = (subword_byte % UNITS_PER_WORD
5750 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5753 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5754 so if it's become negative it will instead be very large.) */
5755 gcc_assert (byte < GET_MODE_SIZE (innermode));
5757 /* Convert from bytes to chunks of size value_bit. */
5758 value_start = byte * (BITS_PER_UNIT / value_bit);
5760 /* Re-pack the value. */
5762 if (VECTOR_MODE_P (outermode))
5764 num_elem = GET_MODE_NUNITS (outermode);
5765 result_v = rtvec_alloc (num_elem);
5766 elems = &RTVEC_ELT (result_v, 0);
5767 outer_submode = GET_MODE_INNER (outermode);
5769 else
5771 num_elem = 1;
5772 elems = &result_s;
5773 outer_submode = outermode;
5776 outer_class = GET_MODE_CLASS (outer_submode);
5777 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5779 gcc_assert (elem_bitsize % value_bit == 0);
5780 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5782 for (elem = 0; elem < num_elem; elem++)
5784 unsigned char *vp;
5786 /* Vectors are stored in target memory order. (This is probably
5787 a mistake.) */
5789 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5790 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5791 / BITS_PER_UNIT);
5792 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5793 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5794 unsigned bytele = (subword_byte % UNITS_PER_WORD
5795 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5796 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5799 switch (outer_class)
5801 case MODE_INT:
5802 case MODE_PARTIAL_INT:
5804 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5806 for (i = 0;
5807 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5808 i += value_bit)
5809 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5810 for (; i < elem_bitsize; i += value_bit)
5811 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5812 << (i - HOST_BITS_PER_WIDE_INT);
5814 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5815 know why. */
5816 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5817 elems[elem] = gen_int_mode (lo, outer_submode);
5818 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5819 elems[elem] = immed_double_const (lo, hi, outer_submode);
5820 else
5821 return NULL_RTX;
5823 break;
5825 case MODE_FLOAT:
5826 case MODE_DECIMAL_FLOAT:
5828 REAL_VALUE_TYPE r;
5829 long tmp[max_bitsize / 32];
5831 /* real_from_target wants its input in words affected by
5832 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5833 and use WORDS_BIG_ENDIAN instead; see the documentation
5834 of SUBREG in rtl.texi. */
5835 for (i = 0; i < max_bitsize / 32; i++)
5836 tmp[i] = 0;
5837 for (i = 0; i < elem_bitsize; i += value_bit)
5839 int ibase;
5840 if (WORDS_BIG_ENDIAN)
5841 ibase = elem_bitsize - 1 - i;
5842 else
5843 ibase = i;
5844 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5847 real_from_target (&r, tmp, outer_submode);
5848 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5850 break;
5852 case MODE_FRACT:
5853 case MODE_UFRACT:
5854 case MODE_ACCUM:
5855 case MODE_UACCUM:
5857 FIXED_VALUE_TYPE f;
5858 f.data.low = 0;
5859 f.data.high = 0;
5860 f.mode = outer_submode;
5862 for (i = 0;
5863 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5864 i += value_bit)
5865 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5866 for (; i < elem_bitsize; i += value_bit)
5867 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5868 << (i - HOST_BITS_PER_WIDE_INT));
5870 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5872 break;
5874 default:
5875 gcc_unreachable ();
5878 if (VECTOR_MODE_P (outermode))
5879 return gen_rtx_CONST_VECTOR (outermode, result_v);
5880 else
5881 return result_s;
5884 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5885 Return 0 if no simplifications are possible. */
5887 simplify_subreg (enum machine_mode outermode, rtx op,
5888 enum machine_mode innermode, unsigned int byte)
5890 /* Little bit of sanity checking. */
5891 gcc_assert (innermode != VOIDmode);
5892 gcc_assert (outermode != VOIDmode);
5893 gcc_assert (innermode != BLKmode);
5894 gcc_assert (outermode != BLKmode);
5896 gcc_assert (GET_MODE (op) == innermode
5897 || GET_MODE (op) == VOIDmode);
5899 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5900 return NULL_RTX;
5902 if (byte >= GET_MODE_SIZE (innermode))
5903 return NULL_RTX;
5905 if (outermode == innermode && !byte)
5906 return op;
5908 if (CONST_SCALAR_INT_P (op)
5909 || CONST_DOUBLE_AS_FLOAT_P (op)
5910 || GET_CODE (op) == CONST_FIXED
5911 || GET_CODE (op) == CONST_VECTOR)
5912 return simplify_immed_subreg (outermode, op, innermode, byte);
5914 /* Changing mode twice with SUBREG => just change it once,
5915 or not at all if changing back op starting mode. */
5916 if (GET_CODE (op) == SUBREG)
5918 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5919 int final_offset = byte + SUBREG_BYTE (op);
5920 rtx newx;
5922 if (outermode == innermostmode
5923 && byte == 0 && SUBREG_BYTE (op) == 0)
5924 return SUBREG_REG (op);
5926 /* The SUBREG_BYTE represents offset, as if the value were stored
5927 in memory. Irritating exception is paradoxical subreg, where
5928 we define SUBREG_BYTE to be 0. On big endian machines, this
5929 value should be negative. For a moment, undo this exception. */
5930 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5932 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5933 if (WORDS_BIG_ENDIAN)
5934 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5935 if (BYTES_BIG_ENDIAN)
5936 final_offset += difference % UNITS_PER_WORD;
5938 if (SUBREG_BYTE (op) == 0
5939 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5941 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5942 if (WORDS_BIG_ENDIAN)
5943 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5944 if (BYTES_BIG_ENDIAN)
5945 final_offset += difference % UNITS_PER_WORD;
5948 /* See whether resulting subreg will be paradoxical. */
5949 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5951 /* In nonparadoxical subregs we can't handle negative offsets. */
5952 if (final_offset < 0)
5953 return NULL_RTX;
5954 /* Bail out in case resulting subreg would be incorrect. */
5955 if (final_offset % GET_MODE_SIZE (outermode)
5956 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5957 return NULL_RTX;
5959 else
5961 int offset = 0;
5962 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5964 /* In paradoxical subreg, see if we are still looking on lower part.
5965 If so, our SUBREG_BYTE will be 0. */
5966 if (WORDS_BIG_ENDIAN)
5967 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5968 if (BYTES_BIG_ENDIAN)
5969 offset += difference % UNITS_PER_WORD;
5970 if (offset == final_offset)
5971 final_offset = 0;
5972 else
5973 return NULL_RTX;
5976 /* Recurse for further possible simplifications. */
5977 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5978 final_offset);
5979 if (newx)
5980 return newx;
5981 if (validate_subreg (outermode, innermostmode,
5982 SUBREG_REG (op), final_offset))
5984 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5985 if (SUBREG_PROMOTED_VAR_P (op)
5986 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5987 && GET_MODE_CLASS (outermode) == MODE_INT
5988 && IN_RANGE (GET_MODE_SIZE (outermode),
5989 GET_MODE_SIZE (innermode),
5990 GET_MODE_SIZE (innermostmode))
5991 && subreg_lowpart_p (newx))
5993 SUBREG_PROMOTED_VAR_P (newx) = 1;
5994 SUBREG_PROMOTED_UNSIGNED_SET
5995 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5997 return newx;
5999 return NULL_RTX;
6002 /* SUBREG of a hard register => just change the register number
6003 and/or mode. If the hard register is not valid in that mode,
6004 suppress this simplification. If the hard register is the stack,
6005 frame, or argument pointer, leave this as a SUBREG. */
6007 if (REG_P (op) && HARD_REGISTER_P (op))
6009 unsigned int regno, final_regno;
6011 regno = REGNO (op);
6012 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6013 if (HARD_REGISTER_NUM_P (final_regno))
6015 rtx x;
6016 int final_offset = byte;
6018 /* Adjust offset for paradoxical subregs. */
6019 if (byte == 0
6020 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6022 int difference = (GET_MODE_SIZE (innermode)
6023 - GET_MODE_SIZE (outermode));
6024 if (WORDS_BIG_ENDIAN)
6025 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6026 if (BYTES_BIG_ENDIAN)
6027 final_offset += difference % UNITS_PER_WORD;
6030 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
6032 /* Propagate original regno. We don't have any way to specify
6033 the offset inside original regno, so do so only for lowpart.
6034 The information is used only by alias analysis that can not
6035 grog partial register anyway. */
6037 if (subreg_lowpart_offset (outermode, innermode) == byte)
6038 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6039 return x;
6043 /* If we have a SUBREG of a register that we are replacing and we are
6044 replacing it with a MEM, make a new MEM and try replacing the
6045 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6046 or if we would be widening it. */
6048 if (MEM_P (op)
6049 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6050 /* Allow splitting of volatile memory references in case we don't
6051 have instruction to move the whole thing. */
6052 && (! MEM_VOLATILE_P (op)
6053 || ! have_insn_for (SET, innermode))
6054 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6055 return adjust_address_nv (op, outermode, byte);
6057 /* Handle complex values represented as CONCAT
6058 of real and imaginary part. */
6059 if (GET_CODE (op) == CONCAT)
6061 unsigned int part_size, final_offset;
6062 rtx part, res;
6064 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
6065 if (byte < part_size)
6067 part = XEXP (op, 0);
6068 final_offset = byte;
6070 else
6072 part = XEXP (op, 1);
6073 final_offset = byte - part_size;
6076 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6077 return NULL_RTX;
6079 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
6080 if (res)
6081 return res;
6082 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
6083 return gen_rtx_SUBREG (outermode, part, final_offset);
6084 return NULL_RTX;
6087 /* A SUBREG resulting from a zero extension may fold to zero if
6088 it extracts higher bits that the ZERO_EXTEND's source bits. */
6089 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6091 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6092 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6093 return CONST0_RTX (outermode);
6096 if (SCALAR_INT_MODE_P (outermode)
6097 && SCALAR_INT_MODE_P (innermode)
6098 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6099 && byte == subreg_lowpart_offset (outermode, innermode))
6101 rtx tem = simplify_truncation (outermode, op, innermode);
6102 if (tem)
6103 return tem;
6106 return NULL_RTX;
6109 /* Make a SUBREG operation or equivalent if it folds. */
6112 simplify_gen_subreg (enum machine_mode outermode, rtx op,
6113 enum machine_mode innermode, unsigned int byte)
6115 rtx newx;
6117 newx = simplify_subreg (outermode, op, innermode, byte);
6118 if (newx)
6119 return newx;
6121 if (GET_CODE (op) == SUBREG
6122 || GET_CODE (op) == CONCAT
6123 || GET_MODE (op) == VOIDmode)
6124 return NULL_RTX;
6126 if (validate_subreg (outermode, innermode, op, byte))
6127 return gen_rtx_SUBREG (outermode, op, byte);
6129 return NULL_RTX;
6132 /* Simplify X, an rtx expression.
6134 Return the simplified expression or NULL if no simplifications
6135 were possible.
6137 This is the preferred entry point into the simplification routines;
6138 however, we still allow passes to call the more specific routines.
6140 Right now GCC has three (yes, three) major bodies of RTL simplification
6141 code that need to be unified.
6143 1. fold_rtx in cse.c. This code uses various CSE specific
6144 information to aid in RTL simplification.
6146 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6147 it uses combine specific information to aid in RTL
6148 simplification.
6150 3. The routines in this file.
6153 Long term we want to only have one body of simplification code; to
6154 get to that state I recommend the following steps:
6156 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6157 which are not pass dependent state into these routines.
6159 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6160 use this routine whenever possible.
6162 3. Allow for pass dependent state to be provided to these
6163 routines and add simplifications based on the pass dependent
6164 state. Remove code from cse.c & combine.c that becomes
6165 redundant/dead.
6167 It will take time, but ultimately the compiler will be easier to
6168 maintain and improve. It's totally silly that when we add a
6169 simplification that it needs to be added to 4 places (3 for RTL
6170 simplification and 1 for tree simplification. */
6173 simplify_rtx (const_rtx x)
6175 const enum rtx_code code = GET_CODE (x);
6176 const enum machine_mode mode = GET_MODE (x);
6178 switch (GET_RTX_CLASS (code))
6180 case RTX_UNARY:
6181 return simplify_unary_operation (code, mode,
6182 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6183 case RTX_COMM_ARITH:
6184 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6185 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6187 /* Fall through.... */
6189 case RTX_BIN_ARITH:
6190 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6192 case RTX_TERNARY:
6193 case RTX_BITFIELD_OPS:
6194 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6195 XEXP (x, 0), XEXP (x, 1),
6196 XEXP (x, 2));
6198 case RTX_COMPARE:
6199 case RTX_COMM_COMPARE:
6200 return simplify_relational_operation (code, mode,
6201 ((GET_MODE (XEXP (x, 0))
6202 != VOIDmode)
6203 ? GET_MODE (XEXP (x, 0))
6204 : GET_MODE (XEXP (x, 1))),
6205 XEXP (x, 0),
6206 XEXP (x, 1));
6208 case RTX_EXTRA:
6209 if (code == SUBREG)
6210 return simplify_subreg (mode, SUBREG_REG (x),
6211 GET_MODE (SUBREG_REG (x)),
6212 SUBREG_BYTE (x));
6213 break;
6215 case RTX_OBJ:
6216 if (code == LO_SUM)
6218 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6219 if (GET_CODE (XEXP (x, 0)) == HIGH
6220 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6221 return XEXP (x, 1);
6223 break;
6225 default:
6226 break;
6228 return NULL;