Optimize powerpc*-*-linux* e500 hardfp/soft-fp use.
[official-gcc.git] / gcc / simplify-rtx.c
blobbe1632448a8977a9375ff129ecfac2c93f31e632
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "varasm.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "flags.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "hashtab.h"
35 #include "hash-set.h"
36 #include "vec.h"
37 #include "machmode.h"
38 #include "input.h"
39 #include "function.h"
40 #include "expr.h"
41 #include "diagnostic-core.h"
42 #include "ggc.h"
43 #include "target.h"
44 #include "predict.h"
46 /* Simplification and canonicalization of RTL. */
48 /* Much code operates on (low, high) pairs; the low value is an
49 unsigned wide int, the high value a signed wide int. We
50 occasionally need to sign extend from low to high as if low were a
51 signed wide int. */
52 #define HWI_SIGN_EXTEND(low) \
53 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
55 static rtx neg_const_int (machine_mode, const_rtx);
56 static bool plus_minus_operand_p (const_rtx);
57 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
58 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
59 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
60 unsigned int);
61 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
62 rtx, rtx);
63 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
64 machine_mode, rtx, rtx);
65 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
66 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
67 rtx, rtx, rtx, rtx);
69 /* Negate a CONST_INT rtx, truncating (because a conversion from a
70 maximally negative number can overflow). */
71 static rtx
72 neg_const_int (machine_mode mode, const_rtx i)
74 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
77 /* Test whether expression, X, is an immediate constant that represents
78 the most significant bit of machine mode MODE. */
80 bool
81 mode_signbit_p (machine_mode mode, const_rtx x)
83 unsigned HOST_WIDE_INT val;
84 unsigned int width;
86 if (GET_MODE_CLASS (mode) != MODE_INT)
87 return false;
89 width = GET_MODE_PRECISION (mode);
90 if (width == 0)
91 return false;
93 if (width <= HOST_BITS_PER_WIDE_INT
94 && CONST_INT_P (x))
95 val = INTVAL (x);
96 #if TARGET_SUPPORTS_WIDE_INT
97 else if (CONST_WIDE_INT_P (x))
99 unsigned int i;
100 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
101 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
102 return false;
103 for (i = 0; i < elts - 1; i++)
104 if (CONST_WIDE_INT_ELT (x, i) != 0)
105 return false;
106 val = CONST_WIDE_INT_ELT (x, elts - 1);
107 width %= HOST_BITS_PER_WIDE_INT;
108 if (width == 0)
109 width = HOST_BITS_PER_WIDE_INT;
111 #else
112 else if (width <= HOST_BITS_PER_DOUBLE_INT
113 && CONST_DOUBLE_AS_INT_P (x)
114 && CONST_DOUBLE_LOW (x) == 0)
116 val = CONST_DOUBLE_HIGH (x);
117 width -= HOST_BITS_PER_WIDE_INT;
119 #endif
120 else
121 /* X is not an integer constant. */
122 return false;
124 if (width < HOST_BITS_PER_WIDE_INT)
125 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
126 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
129 /* Test whether VAL is equal to the most significant bit of mode MODE
130 (after masking with the mode mask of MODE). Returns false if the
131 precision of MODE is too large to handle. */
133 bool
134 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
136 unsigned int width;
138 if (GET_MODE_CLASS (mode) != MODE_INT)
139 return false;
141 width = GET_MODE_PRECISION (mode);
142 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
143 return false;
145 val &= GET_MODE_MASK (mode);
146 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
149 /* Test whether the most significant bit of mode MODE is set in VAL.
150 Returns false if the precision of MODE is too large to handle. */
151 bool
152 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
154 unsigned int width;
156 if (GET_MODE_CLASS (mode) != MODE_INT)
157 return false;
159 width = GET_MODE_PRECISION (mode);
160 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
161 return false;
163 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
164 return val != 0;
167 /* Test whether the most significant bit of mode MODE is clear in VAL.
168 Returns false if the precision of MODE is too large to handle. */
169 bool
170 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
172 unsigned int width;
174 if (GET_MODE_CLASS (mode) != MODE_INT)
175 return false;
177 width = GET_MODE_PRECISION (mode);
178 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
179 return false;
181 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
182 return val == 0;
185 /* Make a binary operation by properly ordering the operands and
186 seeing if the expression folds. */
189 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
190 rtx op1)
192 rtx tem;
194 /* If this simplifies, do it. */
195 tem = simplify_binary_operation (code, mode, op0, op1);
196 if (tem)
197 return tem;
199 /* Put complex operands first and constants second if commutative. */
200 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
201 && swap_commutative_operands_p (op0, op1))
202 tem = op0, op0 = op1, op1 = tem;
204 return gen_rtx_fmt_ee (code, mode, op0, op1);
207 /* If X is a MEM referencing the constant pool, return the real value.
208 Otherwise return X. */
210 avoid_constant_pool_reference (rtx x)
212 rtx c, tmp, addr;
213 machine_mode cmode;
214 HOST_WIDE_INT offset = 0;
216 switch (GET_CODE (x))
218 case MEM:
219 break;
221 case FLOAT_EXTEND:
222 /* Handle float extensions of constant pool references. */
223 tmp = XEXP (x, 0);
224 c = avoid_constant_pool_reference (tmp);
225 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
227 REAL_VALUE_TYPE d;
229 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
230 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
232 return x;
234 default:
235 return x;
238 if (GET_MODE (x) == BLKmode)
239 return x;
241 addr = XEXP (x, 0);
243 /* Call target hook to avoid the effects of -fpic etc.... */
244 addr = targetm.delegitimize_address (addr);
246 /* Split the address into a base and integer offset. */
247 if (GET_CODE (addr) == CONST
248 && GET_CODE (XEXP (addr, 0)) == PLUS
249 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
251 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
252 addr = XEXP (XEXP (addr, 0), 0);
255 if (GET_CODE (addr) == LO_SUM)
256 addr = XEXP (addr, 1);
258 /* If this is a constant pool reference, we can turn it into its
259 constant and hope that simplifications happen. */
260 if (GET_CODE (addr) == SYMBOL_REF
261 && CONSTANT_POOL_ADDRESS_P (addr))
263 c = get_pool_constant (addr);
264 cmode = get_pool_mode (addr);
266 /* If we're accessing the constant in a different mode than it was
267 originally stored, attempt to fix that up via subreg simplifications.
268 If that fails we have no choice but to return the original memory. */
269 if ((offset != 0 || cmode != GET_MODE (x))
270 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
272 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
273 if (tem && CONSTANT_P (tem))
274 return tem;
276 else
277 return c;
280 return x;
283 /* Simplify a MEM based on its attributes. This is the default
284 delegitimize_address target hook, and it's recommended that every
285 overrider call it. */
288 delegitimize_mem_from_attrs (rtx x)
290 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
291 use their base addresses as equivalent. */
292 if (MEM_P (x)
293 && MEM_EXPR (x)
294 && MEM_OFFSET_KNOWN_P (x))
296 tree decl = MEM_EXPR (x);
297 machine_mode mode = GET_MODE (x);
298 HOST_WIDE_INT offset = 0;
300 switch (TREE_CODE (decl))
302 default:
303 decl = NULL;
304 break;
306 case VAR_DECL:
307 break;
309 case ARRAY_REF:
310 case ARRAY_RANGE_REF:
311 case COMPONENT_REF:
312 case BIT_FIELD_REF:
313 case REALPART_EXPR:
314 case IMAGPART_EXPR:
315 case VIEW_CONVERT_EXPR:
317 HOST_WIDE_INT bitsize, bitpos;
318 tree toffset;
319 int unsignedp, volatilep = 0;
321 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
322 &mode, &unsignedp, &volatilep, false);
323 if (bitsize != GET_MODE_BITSIZE (mode)
324 || (bitpos % BITS_PER_UNIT)
325 || (toffset && !tree_fits_shwi_p (toffset)))
326 decl = NULL;
327 else
329 offset += bitpos / BITS_PER_UNIT;
330 if (toffset)
331 offset += tree_to_shwi (toffset);
333 break;
337 if (decl
338 && mode == GET_MODE (x)
339 && TREE_CODE (decl) == VAR_DECL
340 && (TREE_STATIC (decl)
341 || DECL_THREAD_LOCAL_P (decl))
342 && DECL_RTL_SET_P (decl)
343 && MEM_P (DECL_RTL (decl)))
345 rtx newx;
347 offset += MEM_OFFSET (x);
349 newx = DECL_RTL (decl);
351 if (MEM_P (newx))
353 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
355 /* Avoid creating a new MEM needlessly if we already had
356 the same address. We do if there's no OFFSET and the
357 old address X is identical to NEWX, or if X is of the
358 form (plus NEWX OFFSET), or the NEWX is of the form
359 (plus Y (const_int Z)) and X is that with the offset
360 added: (plus Y (const_int Z+OFFSET)). */
361 if (!((offset == 0
362 || (GET_CODE (o) == PLUS
363 && GET_CODE (XEXP (o, 1)) == CONST_INT
364 && (offset == INTVAL (XEXP (o, 1))
365 || (GET_CODE (n) == PLUS
366 && GET_CODE (XEXP (n, 1)) == CONST_INT
367 && (INTVAL (XEXP (n, 1)) + offset
368 == INTVAL (XEXP (o, 1)))
369 && (n = XEXP (n, 0))))
370 && (o = XEXP (o, 0))))
371 && rtx_equal_p (o, n)))
372 x = adjust_address_nv (newx, mode, offset);
374 else if (GET_MODE (x) == GET_MODE (newx)
375 && offset == 0)
376 x = newx;
380 return x;
383 /* Make a unary operation by first seeing if it folds and otherwise making
384 the specified operation. */
387 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
388 machine_mode op_mode)
390 rtx tem;
392 /* If this simplifies, use it. */
393 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
394 return tem;
396 return gen_rtx_fmt_e (code, mode, op);
399 /* Likewise for ternary operations. */
402 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
403 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
405 rtx tem;
407 /* If this simplifies, use it. */
408 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
409 op0, op1, op2)))
410 return tem;
412 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
415 /* Likewise, for relational operations.
416 CMP_MODE specifies mode comparison is done in. */
419 simplify_gen_relational (enum rtx_code code, machine_mode mode,
420 machine_mode cmp_mode, rtx op0, rtx op1)
422 rtx tem;
424 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
425 op0, op1)))
426 return tem;
428 return gen_rtx_fmt_ee (code, mode, op0, op1);
431 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
432 and simplify the result. If FN is non-NULL, call this callback on each
433 X, if it returns non-NULL, replace X with its return value and simplify the
434 result. */
437 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
438 rtx (*fn) (rtx, const_rtx, void *), void *data)
440 enum rtx_code code = GET_CODE (x);
441 machine_mode mode = GET_MODE (x);
442 machine_mode op_mode;
443 const char *fmt;
444 rtx op0, op1, op2, newx, op;
445 rtvec vec, newvec;
446 int i, j;
448 if (__builtin_expect (fn != NULL, 0))
450 newx = fn (x, old_rtx, data);
451 if (newx)
452 return newx;
454 else if (rtx_equal_p (x, old_rtx))
455 return copy_rtx ((rtx) data);
457 switch (GET_RTX_CLASS (code))
459 case RTX_UNARY:
460 op0 = XEXP (x, 0);
461 op_mode = GET_MODE (op0);
462 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
463 if (op0 == XEXP (x, 0))
464 return x;
465 return simplify_gen_unary (code, mode, op0, op_mode);
467 case RTX_BIN_ARITH:
468 case RTX_COMM_ARITH:
469 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
470 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
471 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
472 return x;
473 return simplify_gen_binary (code, mode, op0, op1);
475 case RTX_COMPARE:
476 case RTX_COMM_COMPARE:
477 op0 = XEXP (x, 0);
478 op1 = XEXP (x, 1);
479 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
480 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
481 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
482 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
483 return x;
484 return simplify_gen_relational (code, mode, op_mode, op0, op1);
486 case RTX_TERNARY:
487 case RTX_BITFIELD_OPS:
488 op0 = XEXP (x, 0);
489 op_mode = GET_MODE (op0);
490 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
491 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
492 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
493 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
494 return x;
495 if (op_mode == VOIDmode)
496 op_mode = GET_MODE (op0);
497 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
499 case RTX_EXTRA:
500 if (code == SUBREG)
502 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
503 if (op0 == SUBREG_REG (x))
504 return x;
505 op0 = simplify_gen_subreg (GET_MODE (x), op0,
506 GET_MODE (SUBREG_REG (x)),
507 SUBREG_BYTE (x));
508 return op0 ? op0 : x;
510 break;
512 case RTX_OBJ:
513 if (code == MEM)
515 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
516 if (op0 == XEXP (x, 0))
517 return x;
518 return replace_equiv_address_nv (x, op0);
520 else if (code == LO_SUM)
522 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
523 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
525 /* (lo_sum (high x) x) -> x */
526 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
527 return op1;
529 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
530 return x;
531 return gen_rtx_LO_SUM (mode, op0, op1);
533 break;
535 default:
536 break;
539 newx = x;
540 fmt = GET_RTX_FORMAT (code);
541 for (i = 0; fmt[i]; i++)
542 switch (fmt[i])
544 case 'E':
545 vec = XVEC (x, i);
546 newvec = XVEC (newx, i);
547 for (j = 0; j < GET_NUM_ELEM (vec); j++)
549 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
550 old_rtx, fn, data);
551 if (op != RTVEC_ELT (vec, j))
553 if (newvec == vec)
555 newvec = shallow_copy_rtvec (vec);
556 if (x == newx)
557 newx = shallow_copy_rtx (x);
558 XVEC (newx, i) = newvec;
560 RTVEC_ELT (newvec, j) = op;
563 break;
565 case 'e':
566 if (XEXP (x, i))
568 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
569 if (op != XEXP (x, i))
571 if (x == newx)
572 newx = shallow_copy_rtx (x);
573 XEXP (newx, i) = op;
576 break;
578 return newx;
581 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
582 resulting RTX. Return a new RTX which is as simplified as possible. */
585 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
587 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
590 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
591 Only handle cases where the truncated value is inherently an rvalue.
593 RTL provides two ways of truncating a value:
595 1. a lowpart subreg. This form is only a truncation when both
596 the outer and inner modes (here MODE and OP_MODE respectively)
597 are scalar integers, and only then when the subreg is used as
598 an rvalue.
600 It is only valid to form such truncating subregs if the
601 truncation requires no action by the target. The onus for
602 proving this is on the creator of the subreg -- e.g. the
603 caller to simplify_subreg or simplify_gen_subreg -- and typically
604 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
606 2. a TRUNCATE. This form handles both scalar and compound integers.
608 The first form is preferred where valid. However, the TRUNCATE
609 handling in simplify_unary_operation turns the second form into the
610 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
611 so it is generally safe to form rvalue truncations using:
613 simplify_gen_unary (TRUNCATE, ...)
615 and leave simplify_unary_operation to work out which representation
616 should be used.
618 Because of the proof requirements on (1), simplify_truncation must
619 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
620 regardless of whether the outer truncation came from a SUBREG or a
621 TRUNCATE. For example, if the caller has proven that an SImode
622 truncation of:
624 (and:DI X Y)
626 is a no-op and can be represented as a subreg, it does not follow
627 that SImode truncations of X and Y are also no-ops. On a target
628 like 64-bit MIPS that requires SImode values to be stored in
629 sign-extended form, an SImode truncation of:
631 (and:DI (reg:DI X) (const_int 63))
633 is trivially a no-op because only the lower 6 bits can be set.
634 However, X is still an arbitrary 64-bit number and so we cannot
635 assume that truncating it too is a no-op. */
637 static rtx
638 simplify_truncation (machine_mode mode, rtx op,
639 machine_mode op_mode)
641 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
642 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
643 gcc_assert (precision <= op_precision);
645 /* Optimize truncations of zero and sign extended values. */
646 if (GET_CODE (op) == ZERO_EXTEND
647 || GET_CODE (op) == SIGN_EXTEND)
649 /* There are three possibilities. If MODE is the same as the
650 origmode, we can omit both the extension and the subreg.
651 If MODE is not larger than the origmode, we can apply the
652 truncation without the extension. Finally, if the outermode
653 is larger than the origmode, we can just extend to the appropriate
654 mode. */
655 machine_mode origmode = GET_MODE (XEXP (op, 0));
656 if (mode == origmode)
657 return XEXP (op, 0);
658 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
659 return simplify_gen_unary (TRUNCATE, mode,
660 XEXP (op, 0), origmode);
661 else
662 return simplify_gen_unary (GET_CODE (op), mode,
663 XEXP (op, 0), origmode);
666 /* If the machine can perform operations in the truncated mode, distribute
667 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
668 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
669 if (1
670 #ifdef WORD_REGISTER_OPERATIONS
671 && precision >= BITS_PER_WORD
672 #endif
673 && (GET_CODE (op) == PLUS
674 || GET_CODE (op) == MINUS
675 || GET_CODE (op) == MULT))
677 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
678 if (op0)
680 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
681 if (op1)
682 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
686 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
687 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
688 the outer subreg is effectively a truncation to the original mode. */
689 if ((GET_CODE (op) == LSHIFTRT
690 || GET_CODE (op) == ASHIFTRT)
691 /* Ensure that OP_MODE is at least twice as wide as MODE
692 to avoid the possibility that an outer LSHIFTRT shifts by more
693 than the sign extension's sign_bit_copies and introduces zeros
694 into the high bits of the result. */
695 && 2 * precision <= op_precision
696 && CONST_INT_P (XEXP (op, 1))
697 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
698 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
699 && UINTVAL (XEXP (op, 1)) < precision)
700 return simplify_gen_binary (ASHIFTRT, mode,
701 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
703 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
704 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
705 the outer subreg is effectively a truncation to the original mode. */
706 if ((GET_CODE (op) == LSHIFTRT
707 || GET_CODE (op) == ASHIFTRT)
708 && CONST_INT_P (XEXP (op, 1))
709 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
710 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
711 && UINTVAL (XEXP (op, 1)) < precision)
712 return simplify_gen_binary (LSHIFTRT, mode,
713 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
715 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
716 to (ashift:QI (x:QI) C), where C is a suitable small constant and
717 the outer subreg is effectively a truncation to the original mode. */
718 if (GET_CODE (op) == ASHIFT
719 && CONST_INT_P (XEXP (op, 1))
720 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
721 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
722 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
723 && UINTVAL (XEXP (op, 1)) < precision)
724 return simplify_gen_binary (ASHIFT, mode,
725 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
727 /* Recognize a word extraction from a multi-word subreg. */
728 if ((GET_CODE (op) == LSHIFTRT
729 || GET_CODE (op) == ASHIFTRT)
730 && SCALAR_INT_MODE_P (mode)
731 && SCALAR_INT_MODE_P (op_mode)
732 && precision >= BITS_PER_WORD
733 && 2 * precision <= op_precision
734 && CONST_INT_P (XEXP (op, 1))
735 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
736 && UINTVAL (XEXP (op, 1)) < op_precision)
738 int byte = subreg_lowpart_offset (mode, op_mode);
739 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
740 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
741 (WORDS_BIG_ENDIAN
742 ? byte - shifted_bytes
743 : byte + shifted_bytes));
746 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
747 and try replacing the TRUNCATE and shift with it. Don't do this
748 if the MEM has a mode-dependent address. */
749 if ((GET_CODE (op) == LSHIFTRT
750 || GET_CODE (op) == ASHIFTRT)
751 && SCALAR_INT_MODE_P (op_mode)
752 && MEM_P (XEXP (op, 0))
753 && CONST_INT_P (XEXP (op, 1))
754 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
755 && INTVAL (XEXP (op, 1)) > 0
756 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
757 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
758 MEM_ADDR_SPACE (XEXP (op, 0)))
759 && ! MEM_VOLATILE_P (XEXP (op, 0))
760 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
761 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
763 int byte = subreg_lowpart_offset (mode, op_mode);
764 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
765 return adjust_address_nv (XEXP (op, 0), mode,
766 (WORDS_BIG_ENDIAN
767 ? byte - shifted_bytes
768 : byte + shifted_bytes));
771 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
772 (OP:SI foo:SI) if OP is NEG or ABS. */
773 if ((GET_CODE (op) == ABS
774 || GET_CODE (op) == NEG)
775 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
776 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
777 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
778 return simplify_gen_unary (GET_CODE (op), mode,
779 XEXP (XEXP (op, 0), 0), mode);
781 /* (truncate:A (subreg:B (truncate:C X) 0)) is
782 (truncate:A X). */
783 if (GET_CODE (op) == SUBREG
784 && SCALAR_INT_MODE_P (mode)
785 && SCALAR_INT_MODE_P (op_mode)
786 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
787 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
788 && subreg_lowpart_p (op))
790 rtx inner = XEXP (SUBREG_REG (op), 0);
791 if (GET_MODE_PRECISION (mode)
792 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
793 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
794 else
795 /* If subreg above is paradoxical and C is narrower
796 than A, return (subreg:A (truncate:C X) 0). */
797 return simplify_gen_subreg (mode, SUBREG_REG (op),
798 GET_MODE (SUBREG_REG (op)), 0);
801 /* (truncate:A (truncate:B X)) is (truncate:A X). */
802 if (GET_CODE (op) == TRUNCATE)
803 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
804 GET_MODE (XEXP (op, 0)));
806 return NULL_RTX;
809 /* Try to simplify a unary operation CODE whose output mode is to be
810 MODE with input operand OP whose mode was originally OP_MODE.
811 Return zero if no simplification can be made. */
813 simplify_unary_operation (enum rtx_code code, machine_mode mode,
814 rtx op, machine_mode op_mode)
816 rtx trueop, tem;
818 trueop = avoid_constant_pool_reference (op);
820 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
821 if (tem)
822 return tem;
824 return simplify_unary_operation_1 (code, mode, op);
827 /* Perform some simplifications we can do even if the operands
828 aren't constant. */
829 static rtx
830 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
832 enum rtx_code reversed;
833 rtx temp;
835 switch (code)
837 case NOT:
838 /* (not (not X)) == X. */
839 if (GET_CODE (op) == NOT)
840 return XEXP (op, 0);
842 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
843 comparison is all ones. */
844 if (COMPARISON_P (op)
845 && (mode == BImode || STORE_FLAG_VALUE == -1)
846 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
847 return simplify_gen_relational (reversed, mode, VOIDmode,
848 XEXP (op, 0), XEXP (op, 1));
850 /* (not (plus X -1)) can become (neg X). */
851 if (GET_CODE (op) == PLUS
852 && XEXP (op, 1) == constm1_rtx)
853 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
855 /* Similarly, (not (neg X)) is (plus X -1). */
856 if (GET_CODE (op) == NEG)
857 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
858 CONSTM1_RTX (mode));
860 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
861 if (GET_CODE (op) == XOR
862 && CONST_INT_P (XEXP (op, 1))
863 && (temp = simplify_unary_operation (NOT, mode,
864 XEXP (op, 1), mode)) != 0)
865 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
867 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
868 if (GET_CODE (op) == PLUS
869 && CONST_INT_P (XEXP (op, 1))
870 && mode_signbit_p (mode, XEXP (op, 1))
871 && (temp = simplify_unary_operation (NOT, mode,
872 XEXP (op, 1), mode)) != 0)
873 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
876 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
877 operands other than 1, but that is not valid. We could do a
878 similar simplification for (not (lshiftrt C X)) where C is
879 just the sign bit, but this doesn't seem common enough to
880 bother with. */
881 if (GET_CODE (op) == ASHIFT
882 && XEXP (op, 0) == const1_rtx)
884 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
885 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
888 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
889 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
890 so we can perform the above simplification. */
891 if (STORE_FLAG_VALUE == -1
892 && GET_CODE (op) == ASHIFTRT
893 && CONST_INT_P (XEXP (op, 1))
894 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
895 return simplify_gen_relational (GE, mode, VOIDmode,
896 XEXP (op, 0), const0_rtx);
899 if (GET_CODE (op) == SUBREG
900 && subreg_lowpart_p (op)
901 && (GET_MODE_SIZE (GET_MODE (op))
902 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
903 && GET_CODE (SUBREG_REG (op)) == ASHIFT
904 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
906 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
907 rtx x;
909 x = gen_rtx_ROTATE (inner_mode,
910 simplify_gen_unary (NOT, inner_mode, const1_rtx,
911 inner_mode),
912 XEXP (SUBREG_REG (op), 1));
913 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
914 if (temp)
915 return temp;
918 /* Apply De Morgan's laws to reduce number of patterns for machines
919 with negating logical insns (and-not, nand, etc.). If result has
920 only one NOT, put it first, since that is how the patterns are
921 coded. */
922 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
924 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
925 machine_mode op_mode;
927 op_mode = GET_MODE (in1);
928 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
930 op_mode = GET_MODE (in2);
931 if (op_mode == VOIDmode)
932 op_mode = mode;
933 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
935 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
937 rtx tem = in2;
938 in2 = in1; in1 = tem;
941 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
942 mode, in1, in2);
945 /* (not (bswap x)) -> (bswap (not x)). */
946 if (GET_CODE (op) == BSWAP)
948 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
949 return simplify_gen_unary (BSWAP, mode, x, mode);
951 break;
953 case NEG:
954 /* (neg (neg X)) == X. */
955 if (GET_CODE (op) == NEG)
956 return XEXP (op, 0);
958 /* (neg (plus X 1)) can become (not X). */
959 if (GET_CODE (op) == PLUS
960 && XEXP (op, 1) == const1_rtx)
961 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
963 /* Similarly, (neg (not X)) is (plus X 1). */
964 if (GET_CODE (op) == NOT)
965 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
966 CONST1_RTX (mode));
968 /* (neg (minus X Y)) can become (minus Y X). This transformation
969 isn't safe for modes with signed zeros, since if X and Y are
970 both +0, (minus Y X) is the same as (minus X Y). If the
971 rounding mode is towards +infinity (or -infinity) then the two
972 expressions will be rounded differently. */
973 if (GET_CODE (op) == MINUS
974 && !HONOR_SIGNED_ZEROS (mode)
975 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
976 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
978 if (GET_CODE (op) == PLUS
979 && !HONOR_SIGNED_ZEROS (mode)
980 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
982 /* (neg (plus A C)) is simplified to (minus -C A). */
983 if (CONST_SCALAR_INT_P (XEXP (op, 1))
984 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
986 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
987 if (temp)
988 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
991 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
992 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
993 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
996 /* (neg (mult A B)) becomes (mult A (neg B)).
997 This works even for floating-point values. */
998 if (GET_CODE (op) == MULT
999 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1001 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1002 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1005 /* NEG commutes with ASHIFT since it is multiplication. Only do
1006 this if we can then eliminate the NEG (e.g., if the operand
1007 is a constant). */
1008 if (GET_CODE (op) == ASHIFT)
1010 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1011 if (temp)
1012 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1015 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1016 C is equal to the width of MODE minus 1. */
1017 if (GET_CODE (op) == ASHIFTRT
1018 && CONST_INT_P (XEXP (op, 1))
1019 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1020 return simplify_gen_binary (LSHIFTRT, mode,
1021 XEXP (op, 0), XEXP (op, 1));
1023 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1024 C is equal to the width of MODE minus 1. */
1025 if (GET_CODE (op) == LSHIFTRT
1026 && CONST_INT_P (XEXP (op, 1))
1027 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1028 return simplify_gen_binary (ASHIFTRT, mode,
1029 XEXP (op, 0), XEXP (op, 1));
1031 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1032 if (GET_CODE (op) == XOR
1033 && XEXP (op, 1) == const1_rtx
1034 && nonzero_bits (XEXP (op, 0), mode) == 1)
1035 return plus_constant (mode, XEXP (op, 0), -1);
1037 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1038 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1039 if (GET_CODE (op) == LT
1040 && XEXP (op, 1) == const0_rtx
1041 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1043 machine_mode inner = GET_MODE (XEXP (op, 0));
1044 int isize = GET_MODE_PRECISION (inner);
1045 if (STORE_FLAG_VALUE == 1)
1047 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1048 GEN_INT (isize - 1));
1049 if (mode == inner)
1050 return temp;
1051 if (GET_MODE_PRECISION (mode) > isize)
1052 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1053 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1055 else if (STORE_FLAG_VALUE == -1)
1057 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1058 GEN_INT (isize - 1));
1059 if (mode == inner)
1060 return temp;
1061 if (GET_MODE_PRECISION (mode) > isize)
1062 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1063 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1066 break;
1068 case TRUNCATE:
1069 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1070 with the umulXi3_highpart patterns. */
1071 if (GET_CODE (op) == LSHIFTRT
1072 && GET_CODE (XEXP (op, 0)) == MULT)
1073 break;
1075 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1077 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1079 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1080 if (temp)
1081 return temp;
1083 /* We can't handle truncation to a partial integer mode here
1084 because we don't know the real bitsize of the partial
1085 integer mode. */
1086 break;
1089 if (GET_MODE (op) != VOIDmode)
1091 temp = simplify_truncation (mode, op, GET_MODE (op));
1092 if (temp)
1093 return temp;
1096 /* If we know that the value is already truncated, we can
1097 replace the TRUNCATE with a SUBREG. */
1098 if (GET_MODE_NUNITS (mode) == 1
1099 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1100 || truncated_to_mode (mode, op)))
1102 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1103 if (temp)
1104 return temp;
1107 /* A truncate of a comparison can be replaced with a subreg if
1108 STORE_FLAG_VALUE permits. This is like the previous test,
1109 but it works even if the comparison is done in a mode larger
1110 than HOST_BITS_PER_WIDE_INT. */
1111 if (HWI_COMPUTABLE_MODE_P (mode)
1112 && COMPARISON_P (op)
1113 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1115 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1116 if (temp)
1117 return temp;
1120 /* A truncate of a memory is just loading the low part of the memory
1121 if we are not changing the meaning of the address. */
1122 if (GET_CODE (op) == MEM
1123 && !VECTOR_MODE_P (mode)
1124 && !MEM_VOLATILE_P (op)
1125 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1127 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1128 if (temp)
1129 return temp;
1132 break;
1134 case FLOAT_TRUNCATE:
1135 if (DECIMAL_FLOAT_MODE_P (mode))
1136 break;
1138 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1139 if (GET_CODE (op) == FLOAT_EXTEND
1140 && GET_MODE (XEXP (op, 0)) == mode)
1141 return XEXP (op, 0);
1143 /* (float_truncate:SF (float_truncate:DF foo:XF))
1144 = (float_truncate:SF foo:XF).
1145 This may eliminate double rounding, so it is unsafe.
1147 (float_truncate:SF (float_extend:XF foo:DF))
1148 = (float_truncate:SF foo:DF).
1150 (float_truncate:DF (float_extend:XF foo:SF))
1151 = (float_extend:SF foo:DF). */
1152 if ((GET_CODE (op) == FLOAT_TRUNCATE
1153 && flag_unsafe_math_optimizations)
1154 || GET_CODE (op) == FLOAT_EXTEND)
1155 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1156 0)))
1157 > GET_MODE_SIZE (mode)
1158 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1159 mode,
1160 XEXP (op, 0), mode);
1162 /* (float_truncate (float x)) is (float x) */
1163 if (GET_CODE (op) == FLOAT
1164 && (flag_unsafe_math_optimizations
1165 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1166 && ((unsigned)significand_size (GET_MODE (op))
1167 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1168 - num_sign_bit_copies (XEXP (op, 0),
1169 GET_MODE (XEXP (op, 0))))))))
1170 return simplify_gen_unary (FLOAT, mode,
1171 XEXP (op, 0),
1172 GET_MODE (XEXP (op, 0)));
1174 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1175 (OP:SF foo:SF) if OP is NEG or ABS. */
1176 if ((GET_CODE (op) == ABS
1177 || GET_CODE (op) == NEG)
1178 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1179 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1180 return simplify_gen_unary (GET_CODE (op), mode,
1181 XEXP (XEXP (op, 0), 0), mode);
1183 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1184 is (float_truncate:SF x). */
1185 if (GET_CODE (op) == SUBREG
1186 && subreg_lowpart_p (op)
1187 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1188 return SUBREG_REG (op);
1189 break;
1191 case FLOAT_EXTEND:
1192 if (DECIMAL_FLOAT_MODE_P (mode))
1193 break;
1195 /* (float_extend (float_extend x)) is (float_extend x)
1197 (float_extend (float x)) is (float x) assuming that double
1198 rounding can't happen.
1200 if (GET_CODE (op) == FLOAT_EXTEND
1201 || (GET_CODE (op) == FLOAT
1202 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1203 && ((unsigned)significand_size (GET_MODE (op))
1204 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1205 - num_sign_bit_copies (XEXP (op, 0),
1206 GET_MODE (XEXP (op, 0)))))))
1207 return simplify_gen_unary (GET_CODE (op), mode,
1208 XEXP (op, 0),
1209 GET_MODE (XEXP (op, 0)));
1211 break;
1213 case ABS:
1214 /* (abs (neg <foo>)) -> (abs <foo>) */
1215 if (GET_CODE (op) == NEG)
1216 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1217 GET_MODE (XEXP (op, 0)));
1219 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1220 do nothing. */
1221 if (GET_MODE (op) == VOIDmode)
1222 break;
1224 /* If operand is something known to be positive, ignore the ABS. */
1225 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1226 || val_signbit_known_clear_p (GET_MODE (op),
1227 nonzero_bits (op, GET_MODE (op))))
1228 return op;
1230 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1231 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1232 return gen_rtx_NEG (mode, op);
1234 break;
1236 case FFS:
1237 /* (ffs (*_extend <X>)) = (ffs <X>) */
1238 if (GET_CODE (op) == SIGN_EXTEND
1239 || GET_CODE (op) == ZERO_EXTEND)
1240 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1241 GET_MODE (XEXP (op, 0)));
1242 break;
1244 case POPCOUNT:
1245 switch (GET_CODE (op))
1247 case BSWAP:
1248 case ZERO_EXTEND:
1249 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1250 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1251 GET_MODE (XEXP (op, 0)));
1253 case ROTATE:
1254 case ROTATERT:
1255 /* Rotations don't affect popcount. */
1256 if (!side_effects_p (XEXP (op, 1)))
1257 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1258 GET_MODE (XEXP (op, 0)));
1259 break;
1261 default:
1262 break;
1264 break;
1266 case PARITY:
1267 switch (GET_CODE (op))
1269 case NOT:
1270 case BSWAP:
1271 case ZERO_EXTEND:
1272 case SIGN_EXTEND:
1273 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1274 GET_MODE (XEXP (op, 0)));
1276 case ROTATE:
1277 case ROTATERT:
1278 /* Rotations don't affect parity. */
1279 if (!side_effects_p (XEXP (op, 1)))
1280 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1281 GET_MODE (XEXP (op, 0)));
1282 break;
1284 default:
1285 break;
1287 break;
1289 case BSWAP:
1290 /* (bswap (bswap x)) -> x. */
1291 if (GET_CODE (op) == BSWAP)
1292 return XEXP (op, 0);
1293 break;
1295 case FLOAT:
1296 /* (float (sign_extend <X>)) = (float <X>). */
1297 if (GET_CODE (op) == SIGN_EXTEND)
1298 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1299 GET_MODE (XEXP (op, 0)));
1300 break;
1302 case SIGN_EXTEND:
1303 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1304 becomes just the MINUS if its mode is MODE. This allows
1305 folding switch statements on machines using casesi (such as
1306 the VAX). */
1307 if (GET_CODE (op) == TRUNCATE
1308 && GET_MODE (XEXP (op, 0)) == mode
1309 && GET_CODE (XEXP (op, 0)) == MINUS
1310 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1311 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1312 return XEXP (op, 0);
1314 /* Extending a widening multiplication should be canonicalized to
1315 a wider widening multiplication. */
1316 if (GET_CODE (op) == MULT)
1318 rtx lhs = XEXP (op, 0);
1319 rtx rhs = XEXP (op, 1);
1320 enum rtx_code lcode = GET_CODE (lhs);
1321 enum rtx_code rcode = GET_CODE (rhs);
1323 /* Widening multiplies usually extend both operands, but sometimes
1324 they use a shift to extract a portion of a register. */
1325 if ((lcode == SIGN_EXTEND
1326 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1327 && (rcode == SIGN_EXTEND
1328 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1330 machine_mode lmode = GET_MODE (lhs);
1331 machine_mode rmode = GET_MODE (rhs);
1332 int bits;
1334 if (lcode == ASHIFTRT)
1335 /* Number of bits not shifted off the end. */
1336 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1337 else /* lcode == SIGN_EXTEND */
1338 /* Size of inner mode. */
1339 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1341 if (rcode == ASHIFTRT)
1342 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1343 else /* rcode == SIGN_EXTEND */
1344 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1346 /* We can only widen multiplies if the result is mathematiclly
1347 equivalent. I.e. if overflow was impossible. */
1348 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1349 return simplify_gen_binary
1350 (MULT, mode,
1351 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1352 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1356 /* Check for a sign extension of a subreg of a promoted
1357 variable, where the promotion is sign-extended, and the
1358 target mode is the same as the variable's promotion. */
1359 if (GET_CODE (op) == SUBREG
1360 && SUBREG_PROMOTED_VAR_P (op)
1361 && SUBREG_PROMOTED_SIGNED_P (op)
1362 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1364 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1365 if (temp)
1366 return temp;
1369 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1370 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1371 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1373 gcc_assert (GET_MODE_PRECISION (mode)
1374 > GET_MODE_PRECISION (GET_MODE (op)));
1375 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1376 GET_MODE (XEXP (op, 0)));
1379 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1380 is (sign_extend:M (subreg:O <X>)) if there is mode with
1381 GET_MODE_BITSIZE (N) - I bits.
1382 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1383 is similarly (zero_extend:M (subreg:O <X>)). */
1384 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1385 && GET_CODE (XEXP (op, 0)) == ASHIFT
1386 && CONST_INT_P (XEXP (op, 1))
1387 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1388 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1390 machine_mode tmode
1391 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1392 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1393 gcc_assert (GET_MODE_BITSIZE (mode)
1394 > GET_MODE_BITSIZE (GET_MODE (op)));
1395 if (tmode != BLKmode)
1397 rtx inner =
1398 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1399 if (inner)
1400 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1401 ? SIGN_EXTEND : ZERO_EXTEND,
1402 mode, inner, tmode);
1406 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1407 /* As we do not know which address space the pointer is referring to,
1408 we can do this only if the target does not support different pointer
1409 or address modes depending on the address space. */
1410 if (target_default_pointer_address_modes_p ()
1411 && ! POINTERS_EXTEND_UNSIGNED
1412 && mode == Pmode && GET_MODE (op) == ptr_mode
1413 && (CONSTANT_P (op)
1414 || (GET_CODE (op) == SUBREG
1415 && REG_P (SUBREG_REG (op))
1416 && REG_POINTER (SUBREG_REG (op))
1417 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1418 return convert_memory_address (Pmode, op);
1419 #endif
1420 break;
1422 case ZERO_EXTEND:
1423 /* Check for a zero extension of a subreg of a promoted
1424 variable, where the promotion is zero-extended, and the
1425 target mode is the same as the variable's promotion. */
1426 if (GET_CODE (op) == SUBREG
1427 && SUBREG_PROMOTED_VAR_P (op)
1428 && SUBREG_PROMOTED_UNSIGNED_P (op)
1429 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1431 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1432 if (temp)
1433 return temp;
1436 /* Extending a widening multiplication should be canonicalized to
1437 a wider widening multiplication. */
1438 if (GET_CODE (op) == MULT)
1440 rtx lhs = XEXP (op, 0);
1441 rtx rhs = XEXP (op, 1);
1442 enum rtx_code lcode = GET_CODE (lhs);
1443 enum rtx_code rcode = GET_CODE (rhs);
1445 /* Widening multiplies usually extend both operands, but sometimes
1446 they use a shift to extract a portion of a register. */
1447 if ((lcode == ZERO_EXTEND
1448 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1449 && (rcode == ZERO_EXTEND
1450 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1452 machine_mode lmode = GET_MODE (lhs);
1453 machine_mode rmode = GET_MODE (rhs);
1454 int bits;
1456 if (lcode == LSHIFTRT)
1457 /* Number of bits not shifted off the end. */
1458 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1459 else /* lcode == ZERO_EXTEND */
1460 /* Size of inner mode. */
1461 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1463 if (rcode == LSHIFTRT)
1464 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1465 else /* rcode == ZERO_EXTEND */
1466 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1468 /* We can only widen multiplies if the result is mathematiclly
1469 equivalent. I.e. if overflow was impossible. */
1470 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1471 return simplify_gen_binary
1472 (MULT, mode,
1473 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1474 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1478 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1479 if (GET_CODE (op) == ZERO_EXTEND)
1480 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1481 GET_MODE (XEXP (op, 0)));
1483 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1484 is (zero_extend:M (subreg:O <X>)) if there is mode with
1485 GET_MODE_PRECISION (N) - I bits. */
1486 if (GET_CODE (op) == LSHIFTRT
1487 && GET_CODE (XEXP (op, 0)) == ASHIFT
1488 && CONST_INT_P (XEXP (op, 1))
1489 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1490 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1492 machine_mode tmode
1493 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1494 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1495 if (tmode != BLKmode)
1497 rtx inner =
1498 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1499 if (inner)
1500 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1504 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1505 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1506 of mode N. E.g.
1507 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1508 (and:SI (reg:SI) (const_int 63)). */
1509 if (GET_CODE (op) == SUBREG
1510 && GET_MODE_PRECISION (GET_MODE (op))
1511 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1512 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1513 <= HOST_BITS_PER_WIDE_INT
1514 && GET_MODE_PRECISION (mode)
1515 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1516 && subreg_lowpart_p (op)
1517 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1518 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1520 if (GET_MODE_PRECISION (mode)
1521 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1522 return SUBREG_REG (op);
1523 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1524 GET_MODE (SUBREG_REG (op)));
1527 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1528 /* As we do not know which address space the pointer is referring to,
1529 we can do this only if the target does not support different pointer
1530 or address modes depending on the address space. */
1531 if (target_default_pointer_address_modes_p ()
1532 && POINTERS_EXTEND_UNSIGNED > 0
1533 && mode == Pmode && GET_MODE (op) == ptr_mode
1534 && (CONSTANT_P (op)
1535 || (GET_CODE (op) == SUBREG
1536 && REG_P (SUBREG_REG (op))
1537 && REG_POINTER (SUBREG_REG (op))
1538 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1539 return convert_memory_address (Pmode, op);
1540 #endif
1541 break;
1543 default:
1544 break;
1547 return 0;
1550 /* Try to compute the value of a unary operation CODE whose output mode is to
1551 be MODE with input operand OP whose mode was originally OP_MODE.
1552 Return zero if the value cannot be computed. */
1554 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1555 rtx op, machine_mode op_mode)
1557 unsigned int width = GET_MODE_PRECISION (mode);
1559 if (code == VEC_DUPLICATE)
1561 gcc_assert (VECTOR_MODE_P (mode));
1562 if (GET_MODE (op) != VOIDmode)
1564 if (!VECTOR_MODE_P (GET_MODE (op)))
1565 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1566 else
1567 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1568 (GET_MODE (op)));
1570 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1571 || GET_CODE (op) == CONST_VECTOR)
1573 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1574 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1575 rtvec v = rtvec_alloc (n_elts);
1576 unsigned int i;
1578 if (GET_CODE (op) != CONST_VECTOR)
1579 for (i = 0; i < n_elts; i++)
1580 RTVEC_ELT (v, i) = op;
1581 else
1583 machine_mode inmode = GET_MODE (op);
1584 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1585 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1587 gcc_assert (in_n_elts < n_elts);
1588 gcc_assert ((n_elts % in_n_elts) == 0);
1589 for (i = 0; i < n_elts; i++)
1590 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1592 return gen_rtx_CONST_VECTOR (mode, v);
1596 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1598 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1599 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1600 machine_mode opmode = GET_MODE (op);
1601 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1602 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1603 rtvec v = rtvec_alloc (n_elts);
1604 unsigned int i;
1606 gcc_assert (op_n_elts == n_elts);
1607 for (i = 0; i < n_elts; i++)
1609 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1610 CONST_VECTOR_ELT (op, i),
1611 GET_MODE_INNER (opmode));
1612 if (!x)
1613 return 0;
1614 RTVEC_ELT (v, i) = x;
1616 return gen_rtx_CONST_VECTOR (mode, v);
1619 /* The order of these tests is critical so that, for example, we don't
1620 check the wrong mode (input vs. output) for a conversion operation,
1621 such as FIX. At some point, this should be simplified. */
1623 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1625 REAL_VALUE_TYPE d;
1627 if (op_mode == VOIDmode)
1629 /* CONST_INT have VOIDmode as the mode. We assume that all
1630 the bits of the constant are significant, though, this is
1631 a dangerous assumption as many times CONST_INTs are
1632 created and used with garbage in the bits outside of the
1633 precision of the implied mode of the const_int. */
1634 op_mode = MAX_MODE_INT;
1637 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1638 d = real_value_truncate (mode, d);
1639 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1641 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1643 REAL_VALUE_TYPE d;
1645 if (op_mode == VOIDmode)
1647 /* CONST_INT have VOIDmode as the mode. We assume that all
1648 the bits of the constant are significant, though, this is
1649 a dangerous assumption as many times CONST_INTs are
1650 created and used with garbage in the bits outside of the
1651 precision of the implied mode of the const_int. */
1652 op_mode = MAX_MODE_INT;
1655 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1656 d = real_value_truncate (mode, d);
1657 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1660 if (CONST_SCALAR_INT_P (op) && width > 0)
1662 wide_int result;
1663 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1664 rtx_mode_t op0 = std::make_pair (op, imode);
1665 int int_value;
1667 #if TARGET_SUPPORTS_WIDE_INT == 0
1668 /* This assert keeps the simplification from producing a result
1669 that cannot be represented in a CONST_DOUBLE but a lot of
1670 upstream callers expect that this function never fails to
1671 simplify something and so you if you added this to the test
1672 above the code would die later anyway. If this assert
1673 happens, you just need to make the port support wide int. */
1674 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1675 #endif
1677 switch (code)
1679 case NOT:
1680 result = wi::bit_not (op0);
1681 break;
1683 case NEG:
1684 result = wi::neg (op0);
1685 break;
1687 case ABS:
1688 result = wi::abs (op0);
1689 break;
1691 case FFS:
1692 result = wi::shwi (wi::ffs (op0), mode);
1693 break;
1695 case CLZ:
1696 if (wi::ne_p (op0, 0))
1697 int_value = wi::clz (op0);
1698 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1699 int_value = GET_MODE_PRECISION (mode);
1700 result = wi::shwi (int_value, mode);
1701 break;
1703 case CLRSB:
1704 result = wi::shwi (wi::clrsb (op0), mode);
1705 break;
1707 case CTZ:
1708 if (wi::ne_p (op0, 0))
1709 int_value = wi::ctz (op0);
1710 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1711 int_value = GET_MODE_PRECISION (mode);
1712 result = wi::shwi (int_value, mode);
1713 break;
1715 case POPCOUNT:
1716 result = wi::shwi (wi::popcount (op0), mode);
1717 break;
1719 case PARITY:
1720 result = wi::shwi (wi::parity (op0), mode);
1721 break;
1723 case BSWAP:
1724 result = wide_int (op0).bswap ();
1725 break;
1727 case TRUNCATE:
1728 case ZERO_EXTEND:
1729 result = wide_int::from (op0, width, UNSIGNED);
1730 break;
1732 case SIGN_EXTEND:
1733 result = wide_int::from (op0, width, SIGNED);
1734 break;
1736 case SQRT:
1737 default:
1738 return 0;
1741 return immed_wide_int_const (result, mode);
1744 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1745 && SCALAR_FLOAT_MODE_P (mode)
1746 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1748 REAL_VALUE_TYPE d;
1749 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1751 switch (code)
1753 case SQRT:
1754 return 0;
1755 case ABS:
1756 d = real_value_abs (&d);
1757 break;
1758 case NEG:
1759 d = real_value_negate (&d);
1760 break;
1761 case FLOAT_TRUNCATE:
1762 d = real_value_truncate (mode, d);
1763 break;
1764 case FLOAT_EXTEND:
1765 /* All this does is change the mode, unless changing
1766 mode class. */
1767 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1768 real_convert (&d, mode, &d);
1769 break;
1770 case FIX:
1771 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1772 break;
1773 case NOT:
1775 long tmp[4];
1776 int i;
1778 real_to_target (tmp, &d, GET_MODE (op));
1779 for (i = 0; i < 4; i++)
1780 tmp[i] = ~tmp[i];
1781 real_from_target (&d, tmp, mode);
1782 break;
1784 default:
1785 gcc_unreachable ();
1787 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1789 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1790 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1791 && GET_MODE_CLASS (mode) == MODE_INT
1792 && width > 0)
1794 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1795 operators are intentionally left unspecified (to ease implementation
1796 by target backends), for consistency, this routine implements the
1797 same semantics for constant folding as used by the middle-end. */
1799 /* This was formerly used only for non-IEEE float.
1800 eggert@twinsun.com says it is safe for IEEE also. */
1801 REAL_VALUE_TYPE x, t;
1802 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1803 wide_int wmax, wmin;
1804 /* This is part of the abi to real_to_integer, but we check
1805 things before making this call. */
1806 bool fail;
1808 switch (code)
1810 case FIX:
1811 if (REAL_VALUE_ISNAN (x))
1812 return const0_rtx;
1814 /* Test against the signed upper bound. */
1815 wmax = wi::max_value (width, SIGNED);
1816 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1817 if (REAL_VALUES_LESS (t, x))
1818 return immed_wide_int_const (wmax, mode);
1820 /* Test against the signed lower bound. */
1821 wmin = wi::min_value (width, SIGNED);
1822 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1823 if (REAL_VALUES_LESS (x, t))
1824 return immed_wide_int_const (wmin, mode);
1826 return immed_wide_int_const (real_to_integer (&x, &fail, width), mode);
1827 break;
1829 case UNSIGNED_FIX:
1830 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1831 return const0_rtx;
1833 /* Test against the unsigned upper bound. */
1834 wmax = wi::max_value (width, UNSIGNED);
1835 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1836 if (REAL_VALUES_LESS (t, x))
1837 return immed_wide_int_const (wmax, mode);
1839 return immed_wide_int_const (real_to_integer (&x, &fail, width),
1840 mode);
1841 break;
1843 default:
1844 gcc_unreachable ();
1848 return NULL_RTX;
1851 /* Subroutine of simplify_binary_operation to simplify a binary operation
1852 CODE that can commute with byte swapping, with result mode MODE and
1853 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1854 Return zero if no simplification or canonicalization is possible. */
1856 static rtx
1857 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1858 rtx op0, rtx op1)
1860 rtx tem;
1862 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1863 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1865 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1866 simplify_gen_unary (BSWAP, mode, op1, mode));
1867 return simplify_gen_unary (BSWAP, mode, tem, mode);
1870 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1871 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1873 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1874 return simplify_gen_unary (BSWAP, mode, tem, mode);
1877 return NULL_RTX;
1880 /* Subroutine of simplify_binary_operation to simplify a commutative,
1881 associative binary operation CODE with result mode MODE, operating
1882 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1883 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1884 canonicalization is possible. */
1886 static rtx
1887 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1888 rtx op0, rtx op1)
1890 rtx tem;
1892 /* Linearize the operator to the left. */
1893 if (GET_CODE (op1) == code)
1895 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1896 if (GET_CODE (op0) == code)
1898 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1899 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1902 /* "a op (b op c)" becomes "(b op c) op a". */
1903 if (! swap_commutative_operands_p (op1, op0))
1904 return simplify_gen_binary (code, mode, op1, op0);
1906 tem = op0;
1907 op0 = op1;
1908 op1 = tem;
1911 if (GET_CODE (op0) == code)
1913 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1914 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1916 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1917 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1920 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1921 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1922 if (tem != 0)
1923 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1925 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1926 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1927 if (tem != 0)
1928 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1931 return 0;
1935 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1936 and OP1. Return 0 if no simplification is possible.
1938 Don't use this for relational operations such as EQ or LT.
1939 Use simplify_relational_operation instead. */
1941 simplify_binary_operation (enum rtx_code code, machine_mode mode,
1942 rtx op0, rtx op1)
1944 rtx trueop0, trueop1;
1945 rtx tem;
1947 /* Relational operations don't work here. We must know the mode
1948 of the operands in order to do the comparison correctly.
1949 Assuming a full word can give incorrect results.
1950 Consider comparing 128 with -128 in QImode. */
1951 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1952 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1954 /* Make sure the constant is second. */
1955 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1956 && swap_commutative_operands_p (op0, op1))
1958 tem = op0, op0 = op1, op1 = tem;
1961 trueop0 = avoid_constant_pool_reference (op0);
1962 trueop1 = avoid_constant_pool_reference (op1);
1964 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1965 if (tem)
1966 return tem;
1967 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1970 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1971 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1972 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1973 actual constants. */
1975 static rtx
1976 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
1977 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1979 rtx tem, reversed, opleft, opright;
1980 HOST_WIDE_INT val;
1981 unsigned int width = GET_MODE_PRECISION (mode);
1983 /* Even if we can't compute a constant result,
1984 there are some cases worth simplifying. */
1986 switch (code)
1988 case PLUS:
1989 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1990 when x is NaN, infinite, or finite and nonzero. They aren't
1991 when x is -0 and the rounding mode is not towards -infinity,
1992 since (-0) + 0 is then 0. */
1993 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1994 return op0;
1996 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1997 transformations are safe even for IEEE. */
1998 if (GET_CODE (op0) == NEG)
1999 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2000 else if (GET_CODE (op1) == NEG)
2001 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2003 /* (~a) + 1 -> -a */
2004 if (INTEGRAL_MODE_P (mode)
2005 && GET_CODE (op0) == NOT
2006 && trueop1 == const1_rtx)
2007 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2009 /* Handle both-operands-constant cases. We can only add
2010 CONST_INTs to constants since the sum of relocatable symbols
2011 can't be handled by most assemblers. Don't add CONST_INT
2012 to CONST_INT since overflow won't be computed properly if wider
2013 than HOST_BITS_PER_WIDE_INT. */
2015 if ((GET_CODE (op0) == CONST
2016 || GET_CODE (op0) == SYMBOL_REF
2017 || GET_CODE (op0) == LABEL_REF)
2018 && CONST_INT_P (op1))
2019 return plus_constant (mode, op0, INTVAL (op1));
2020 else if ((GET_CODE (op1) == CONST
2021 || GET_CODE (op1) == SYMBOL_REF
2022 || GET_CODE (op1) == LABEL_REF)
2023 && CONST_INT_P (op0))
2024 return plus_constant (mode, op1, INTVAL (op0));
2026 /* See if this is something like X * C - X or vice versa or
2027 if the multiplication is written as a shift. If so, we can
2028 distribute and make a new multiply, shift, or maybe just
2029 have X (if C is 2 in the example above). But don't make
2030 something more expensive than we had before. */
2032 if (SCALAR_INT_MODE_P (mode))
2034 rtx lhs = op0, rhs = op1;
2036 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2037 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2039 if (GET_CODE (lhs) == NEG)
2041 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2042 lhs = XEXP (lhs, 0);
2044 else if (GET_CODE (lhs) == MULT
2045 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2047 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2048 lhs = XEXP (lhs, 0);
2050 else if (GET_CODE (lhs) == ASHIFT
2051 && CONST_INT_P (XEXP (lhs, 1))
2052 && INTVAL (XEXP (lhs, 1)) >= 0
2053 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2055 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2056 GET_MODE_PRECISION (mode));
2057 lhs = XEXP (lhs, 0);
2060 if (GET_CODE (rhs) == NEG)
2062 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2063 rhs = XEXP (rhs, 0);
2065 else if (GET_CODE (rhs) == MULT
2066 && CONST_INT_P (XEXP (rhs, 1)))
2068 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2069 rhs = XEXP (rhs, 0);
2071 else if (GET_CODE (rhs) == ASHIFT
2072 && CONST_INT_P (XEXP (rhs, 1))
2073 && INTVAL (XEXP (rhs, 1)) >= 0
2074 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2076 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2077 GET_MODE_PRECISION (mode));
2078 rhs = XEXP (rhs, 0);
2081 if (rtx_equal_p (lhs, rhs))
2083 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2084 rtx coeff;
2085 bool speed = optimize_function_for_speed_p (cfun);
2087 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2089 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2090 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2091 ? tem : 0;
2095 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2096 if (CONST_SCALAR_INT_P (op1)
2097 && GET_CODE (op0) == XOR
2098 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2099 && mode_signbit_p (mode, op1))
2100 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2101 simplify_gen_binary (XOR, mode, op1,
2102 XEXP (op0, 1)));
2104 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2105 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2106 && GET_CODE (op0) == MULT
2107 && GET_CODE (XEXP (op0, 0)) == NEG)
2109 rtx in1, in2;
2111 in1 = XEXP (XEXP (op0, 0), 0);
2112 in2 = XEXP (op0, 1);
2113 return simplify_gen_binary (MINUS, mode, op1,
2114 simplify_gen_binary (MULT, mode,
2115 in1, in2));
2118 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2119 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2120 is 1. */
2121 if (COMPARISON_P (op0)
2122 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2123 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2124 && (reversed = reversed_comparison (op0, mode)))
2125 return
2126 simplify_gen_unary (NEG, mode, reversed, mode);
2128 /* If one of the operands is a PLUS or a MINUS, see if we can
2129 simplify this by the associative law.
2130 Don't use the associative law for floating point.
2131 The inaccuracy makes it nonassociative,
2132 and subtle programs can break if operations are associated. */
2134 if (INTEGRAL_MODE_P (mode)
2135 && (plus_minus_operand_p (op0)
2136 || plus_minus_operand_p (op1))
2137 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2138 return tem;
2140 /* Reassociate floating point addition only when the user
2141 specifies associative math operations. */
2142 if (FLOAT_MODE_P (mode)
2143 && flag_associative_math)
2145 tem = simplify_associative_operation (code, mode, op0, op1);
2146 if (tem)
2147 return tem;
2149 break;
2151 case COMPARE:
2152 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2153 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2154 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2155 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2157 rtx xop00 = XEXP (op0, 0);
2158 rtx xop10 = XEXP (op1, 0);
2160 #ifdef HAVE_cc0
2161 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2162 #else
2163 if (REG_P (xop00) && REG_P (xop10)
2164 && GET_MODE (xop00) == GET_MODE (xop10)
2165 && REGNO (xop00) == REGNO (xop10)
2166 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2167 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2168 #endif
2169 return xop00;
2171 break;
2173 case MINUS:
2174 /* We can't assume x-x is 0 even with non-IEEE floating point,
2175 but since it is zero except in very strange circumstances, we
2176 will treat it as zero with -ffinite-math-only. */
2177 if (rtx_equal_p (trueop0, trueop1)
2178 && ! side_effects_p (op0)
2179 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2180 return CONST0_RTX (mode);
2182 /* Change subtraction from zero into negation. (0 - x) is the
2183 same as -x when x is NaN, infinite, or finite and nonzero.
2184 But if the mode has signed zeros, and does not round towards
2185 -infinity, then 0 - 0 is 0, not -0. */
2186 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2187 return simplify_gen_unary (NEG, mode, op1, mode);
2189 /* (-1 - a) is ~a. */
2190 if (trueop0 == constm1_rtx)
2191 return simplify_gen_unary (NOT, mode, op1, mode);
2193 /* Subtracting 0 has no effect unless the mode has signed zeros
2194 and supports rounding towards -infinity. In such a case,
2195 0 - 0 is -0. */
2196 if (!(HONOR_SIGNED_ZEROS (mode)
2197 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2198 && trueop1 == CONST0_RTX (mode))
2199 return op0;
2201 /* See if this is something like X * C - X or vice versa or
2202 if the multiplication is written as a shift. If so, we can
2203 distribute and make a new multiply, shift, or maybe just
2204 have X (if C is 2 in the example above). But don't make
2205 something more expensive than we had before. */
2207 if (SCALAR_INT_MODE_P (mode))
2209 rtx lhs = op0, rhs = op1;
2211 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2212 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2214 if (GET_CODE (lhs) == NEG)
2216 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2217 lhs = XEXP (lhs, 0);
2219 else if (GET_CODE (lhs) == MULT
2220 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2222 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2223 lhs = XEXP (lhs, 0);
2225 else if (GET_CODE (lhs) == ASHIFT
2226 && CONST_INT_P (XEXP (lhs, 1))
2227 && INTVAL (XEXP (lhs, 1)) >= 0
2228 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2230 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2231 GET_MODE_PRECISION (mode));
2232 lhs = XEXP (lhs, 0);
2235 if (GET_CODE (rhs) == NEG)
2237 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2238 rhs = XEXP (rhs, 0);
2240 else if (GET_CODE (rhs) == MULT
2241 && CONST_INT_P (XEXP (rhs, 1)))
2243 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2244 rhs = XEXP (rhs, 0);
2246 else if (GET_CODE (rhs) == ASHIFT
2247 && CONST_INT_P (XEXP (rhs, 1))
2248 && INTVAL (XEXP (rhs, 1)) >= 0
2249 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2251 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2252 GET_MODE_PRECISION (mode));
2253 negcoeff1 = -negcoeff1;
2254 rhs = XEXP (rhs, 0);
2257 if (rtx_equal_p (lhs, rhs))
2259 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2260 rtx coeff;
2261 bool speed = optimize_function_for_speed_p (cfun);
2263 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2265 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2266 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2267 ? tem : 0;
2271 /* (a - (-b)) -> (a + b). True even for IEEE. */
2272 if (GET_CODE (op1) == NEG)
2273 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2275 /* (-x - c) may be simplified as (-c - x). */
2276 if (GET_CODE (op0) == NEG
2277 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2279 tem = simplify_unary_operation (NEG, mode, op1, mode);
2280 if (tem)
2281 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2284 /* Don't let a relocatable value get a negative coeff. */
2285 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2286 return simplify_gen_binary (PLUS, mode,
2287 op0,
2288 neg_const_int (mode, op1));
2290 /* (x - (x & y)) -> (x & ~y) */
2291 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2293 if (rtx_equal_p (op0, XEXP (op1, 0)))
2295 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2296 GET_MODE (XEXP (op1, 1)));
2297 return simplify_gen_binary (AND, mode, op0, tem);
2299 if (rtx_equal_p (op0, XEXP (op1, 1)))
2301 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2302 GET_MODE (XEXP (op1, 0)));
2303 return simplify_gen_binary (AND, mode, op0, tem);
2307 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2308 by reversing the comparison code if valid. */
2309 if (STORE_FLAG_VALUE == 1
2310 && trueop0 == const1_rtx
2311 && COMPARISON_P (op1)
2312 && (reversed = reversed_comparison (op1, mode)))
2313 return reversed;
2315 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2316 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2317 && GET_CODE (op1) == MULT
2318 && GET_CODE (XEXP (op1, 0)) == NEG)
2320 rtx in1, in2;
2322 in1 = XEXP (XEXP (op1, 0), 0);
2323 in2 = XEXP (op1, 1);
2324 return simplify_gen_binary (PLUS, mode,
2325 simplify_gen_binary (MULT, mode,
2326 in1, in2),
2327 op0);
2330 /* Canonicalize (minus (neg A) (mult B C)) to
2331 (minus (mult (neg B) C) A). */
2332 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2333 && GET_CODE (op1) == MULT
2334 && GET_CODE (op0) == NEG)
2336 rtx in1, in2;
2338 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2339 in2 = XEXP (op1, 1);
2340 return simplify_gen_binary (MINUS, mode,
2341 simplify_gen_binary (MULT, mode,
2342 in1, in2),
2343 XEXP (op0, 0));
2346 /* If one of the operands is a PLUS or a MINUS, see if we can
2347 simplify this by the associative law. This will, for example,
2348 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2349 Don't use the associative law for floating point.
2350 The inaccuracy makes it nonassociative,
2351 and subtle programs can break if operations are associated. */
2353 if (INTEGRAL_MODE_P (mode)
2354 && (plus_minus_operand_p (op0)
2355 || plus_minus_operand_p (op1))
2356 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2357 return tem;
2358 break;
2360 case MULT:
2361 if (trueop1 == constm1_rtx)
2362 return simplify_gen_unary (NEG, mode, op0, mode);
2364 if (GET_CODE (op0) == NEG)
2366 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2367 /* If op1 is a MULT as well and simplify_unary_operation
2368 just moved the NEG to the second operand, simplify_gen_binary
2369 below could through simplify_associative_operation move
2370 the NEG around again and recurse endlessly. */
2371 if (temp
2372 && GET_CODE (op1) == MULT
2373 && GET_CODE (temp) == MULT
2374 && XEXP (op1, 0) == XEXP (temp, 0)
2375 && GET_CODE (XEXP (temp, 1)) == NEG
2376 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2377 temp = NULL_RTX;
2378 if (temp)
2379 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2381 if (GET_CODE (op1) == NEG)
2383 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2384 /* If op0 is a MULT as well and simplify_unary_operation
2385 just moved the NEG to the second operand, simplify_gen_binary
2386 below could through simplify_associative_operation move
2387 the NEG around again and recurse endlessly. */
2388 if (temp
2389 && GET_CODE (op0) == MULT
2390 && GET_CODE (temp) == MULT
2391 && XEXP (op0, 0) == XEXP (temp, 0)
2392 && GET_CODE (XEXP (temp, 1)) == NEG
2393 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2394 temp = NULL_RTX;
2395 if (temp)
2396 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2399 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2400 x is NaN, since x * 0 is then also NaN. Nor is it valid
2401 when the mode has signed zeros, since multiplying a negative
2402 number by 0 will give -0, not 0. */
2403 if (!HONOR_NANS (mode)
2404 && !HONOR_SIGNED_ZEROS (mode)
2405 && trueop1 == CONST0_RTX (mode)
2406 && ! side_effects_p (op0))
2407 return op1;
2409 /* In IEEE floating point, x*1 is not equivalent to x for
2410 signalling NaNs. */
2411 if (!HONOR_SNANS (mode)
2412 && trueop1 == CONST1_RTX (mode))
2413 return op0;
2415 /* Convert multiply by constant power of two into shift. */
2416 if (CONST_SCALAR_INT_P (trueop1))
2418 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2419 if (val >= 0)
2420 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2423 /* x*2 is x+x and x*(-1) is -x */
2424 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2425 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2426 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2427 && GET_MODE (op0) == mode)
2429 REAL_VALUE_TYPE d;
2430 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2432 if (REAL_VALUES_EQUAL (d, dconst2))
2433 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2435 if (!HONOR_SNANS (mode)
2436 && REAL_VALUES_EQUAL (d, dconstm1))
2437 return simplify_gen_unary (NEG, mode, op0, mode);
2440 /* Optimize -x * -x as x * x. */
2441 if (FLOAT_MODE_P (mode)
2442 && GET_CODE (op0) == NEG
2443 && GET_CODE (op1) == NEG
2444 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2445 && !side_effects_p (XEXP (op0, 0)))
2446 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2448 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2449 if (SCALAR_FLOAT_MODE_P (mode)
2450 && GET_CODE (op0) == ABS
2451 && GET_CODE (op1) == ABS
2452 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2453 && !side_effects_p (XEXP (op0, 0)))
2454 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2456 /* Reassociate multiplication, but for floating point MULTs
2457 only when the user specifies unsafe math optimizations. */
2458 if (! FLOAT_MODE_P (mode)
2459 || flag_unsafe_math_optimizations)
2461 tem = simplify_associative_operation (code, mode, op0, op1);
2462 if (tem)
2463 return tem;
2465 break;
2467 case IOR:
2468 if (trueop1 == CONST0_RTX (mode))
2469 return op0;
2470 if (INTEGRAL_MODE_P (mode)
2471 && trueop1 == CONSTM1_RTX (mode)
2472 && !side_effects_p (op0))
2473 return op1;
2474 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2475 return op0;
2476 /* A | (~A) -> -1 */
2477 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2478 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2479 && ! side_effects_p (op0)
2480 && SCALAR_INT_MODE_P (mode))
2481 return constm1_rtx;
2483 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2484 if (CONST_INT_P (op1)
2485 && HWI_COMPUTABLE_MODE_P (mode)
2486 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2487 && !side_effects_p (op0))
2488 return op1;
2490 /* Canonicalize (X & C1) | C2. */
2491 if (GET_CODE (op0) == AND
2492 && CONST_INT_P (trueop1)
2493 && CONST_INT_P (XEXP (op0, 1)))
2495 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2496 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2497 HOST_WIDE_INT c2 = INTVAL (trueop1);
2499 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2500 if ((c1 & c2) == c1
2501 && !side_effects_p (XEXP (op0, 0)))
2502 return trueop1;
2504 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2505 if (((c1|c2) & mask) == mask)
2506 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2508 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2509 if (((c1 & ~c2) & mask) != (c1 & mask))
2511 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2512 gen_int_mode (c1 & ~c2, mode));
2513 return simplify_gen_binary (IOR, mode, tem, op1);
2517 /* Convert (A & B) | A to A. */
2518 if (GET_CODE (op0) == AND
2519 && (rtx_equal_p (XEXP (op0, 0), op1)
2520 || rtx_equal_p (XEXP (op0, 1), op1))
2521 && ! side_effects_p (XEXP (op0, 0))
2522 && ! side_effects_p (XEXP (op0, 1)))
2523 return op1;
2525 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2526 mode size to (rotate A CX). */
2528 if (GET_CODE (op1) == ASHIFT
2529 || GET_CODE (op1) == SUBREG)
2531 opleft = op1;
2532 opright = op0;
2534 else
2536 opright = op1;
2537 opleft = op0;
2540 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2541 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2542 && CONST_INT_P (XEXP (opleft, 1))
2543 && CONST_INT_P (XEXP (opright, 1))
2544 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2545 == GET_MODE_PRECISION (mode)))
2546 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2548 /* Same, but for ashift that has been "simplified" to a wider mode
2549 by simplify_shift_const. */
2551 if (GET_CODE (opleft) == SUBREG
2552 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2553 && GET_CODE (opright) == LSHIFTRT
2554 && GET_CODE (XEXP (opright, 0)) == SUBREG
2555 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2556 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2557 && (GET_MODE_SIZE (GET_MODE (opleft))
2558 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2559 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2560 SUBREG_REG (XEXP (opright, 0)))
2561 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2562 && CONST_INT_P (XEXP (opright, 1))
2563 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2564 == GET_MODE_PRECISION (mode)))
2565 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2566 XEXP (SUBREG_REG (opleft), 1));
2568 /* If we have (ior (and (X C1) C2)), simplify this by making
2569 C1 as small as possible if C1 actually changes. */
2570 if (CONST_INT_P (op1)
2571 && (HWI_COMPUTABLE_MODE_P (mode)
2572 || INTVAL (op1) > 0)
2573 && GET_CODE (op0) == AND
2574 && CONST_INT_P (XEXP (op0, 1))
2575 && CONST_INT_P (op1)
2576 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2578 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2579 gen_int_mode (UINTVAL (XEXP (op0, 1))
2580 & ~UINTVAL (op1),
2581 mode));
2582 return simplify_gen_binary (IOR, mode, tmp, op1);
2585 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2586 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2587 the PLUS does not affect any of the bits in OP1: then we can do
2588 the IOR as a PLUS and we can associate. This is valid if OP1
2589 can be safely shifted left C bits. */
2590 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2591 && GET_CODE (XEXP (op0, 0)) == PLUS
2592 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2593 && CONST_INT_P (XEXP (op0, 1))
2594 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2596 int count = INTVAL (XEXP (op0, 1));
2597 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2599 if (mask >> count == INTVAL (trueop1)
2600 && trunc_int_for_mode (mask, mode) == mask
2601 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2602 return simplify_gen_binary (ASHIFTRT, mode,
2603 plus_constant (mode, XEXP (op0, 0),
2604 mask),
2605 XEXP (op0, 1));
2608 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2609 if (tem)
2610 return tem;
2612 tem = simplify_associative_operation (code, mode, op0, op1);
2613 if (tem)
2614 return tem;
2615 break;
2617 case XOR:
2618 if (trueop1 == CONST0_RTX (mode))
2619 return op0;
2620 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2621 return simplify_gen_unary (NOT, mode, op0, mode);
2622 if (rtx_equal_p (trueop0, trueop1)
2623 && ! side_effects_p (op0)
2624 && GET_MODE_CLASS (mode) != MODE_CC)
2625 return CONST0_RTX (mode);
2627 /* Canonicalize XOR of the most significant bit to PLUS. */
2628 if (CONST_SCALAR_INT_P (op1)
2629 && mode_signbit_p (mode, op1))
2630 return simplify_gen_binary (PLUS, mode, op0, op1);
2631 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2632 if (CONST_SCALAR_INT_P (op1)
2633 && GET_CODE (op0) == PLUS
2634 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2635 && mode_signbit_p (mode, XEXP (op0, 1)))
2636 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2637 simplify_gen_binary (XOR, mode, op1,
2638 XEXP (op0, 1)));
2640 /* If we are XORing two things that have no bits in common,
2641 convert them into an IOR. This helps to detect rotation encoded
2642 using those methods and possibly other simplifications. */
2644 if (HWI_COMPUTABLE_MODE_P (mode)
2645 && (nonzero_bits (op0, mode)
2646 & nonzero_bits (op1, mode)) == 0)
2647 return (simplify_gen_binary (IOR, mode, op0, op1));
2649 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2650 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2651 (NOT y). */
2653 int num_negated = 0;
2655 if (GET_CODE (op0) == NOT)
2656 num_negated++, op0 = XEXP (op0, 0);
2657 if (GET_CODE (op1) == NOT)
2658 num_negated++, op1 = XEXP (op1, 0);
2660 if (num_negated == 2)
2661 return simplify_gen_binary (XOR, mode, op0, op1);
2662 else if (num_negated == 1)
2663 return simplify_gen_unary (NOT, mode,
2664 simplify_gen_binary (XOR, mode, op0, op1),
2665 mode);
2668 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2669 correspond to a machine insn or result in further simplifications
2670 if B is a constant. */
2672 if (GET_CODE (op0) == AND
2673 && rtx_equal_p (XEXP (op0, 1), op1)
2674 && ! side_effects_p (op1))
2675 return simplify_gen_binary (AND, mode,
2676 simplify_gen_unary (NOT, mode,
2677 XEXP (op0, 0), mode),
2678 op1);
2680 else if (GET_CODE (op0) == AND
2681 && rtx_equal_p (XEXP (op0, 0), op1)
2682 && ! side_effects_p (op1))
2683 return simplify_gen_binary (AND, mode,
2684 simplify_gen_unary (NOT, mode,
2685 XEXP (op0, 1), mode),
2686 op1);
2688 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2689 we can transform like this:
2690 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2691 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2692 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2693 Attempt a few simplifications when B and C are both constants. */
2694 if (GET_CODE (op0) == AND
2695 && CONST_INT_P (op1)
2696 && CONST_INT_P (XEXP (op0, 1)))
2698 rtx a = XEXP (op0, 0);
2699 rtx b = XEXP (op0, 1);
2700 rtx c = op1;
2701 HOST_WIDE_INT bval = INTVAL (b);
2702 HOST_WIDE_INT cval = INTVAL (c);
2704 rtx na_c
2705 = simplify_binary_operation (AND, mode,
2706 simplify_gen_unary (NOT, mode, a, mode),
2708 if ((~cval & bval) == 0)
2710 /* Try to simplify ~A&C | ~B&C. */
2711 if (na_c != NULL_RTX)
2712 return simplify_gen_binary (IOR, mode, na_c,
2713 gen_int_mode (~bval & cval, mode));
2715 else
2717 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2718 if (na_c == const0_rtx)
2720 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2721 gen_int_mode (~cval & bval,
2722 mode));
2723 return simplify_gen_binary (IOR, mode, a_nc_b,
2724 gen_int_mode (~bval & cval,
2725 mode));
2730 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2731 comparison if STORE_FLAG_VALUE is 1. */
2732 if (STORE_FLAG_VALUE == 1
2733 && trueop1 == const1_rtx
2734 && COMPARISON_P (op0)
2735 && (reversed = reversed_comparison (op0, mode)))
2736 return reversed;
2738 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2739 is (lt foo (const_int 0)), so we can perform the above
2740 simplification if STORE_FLAG_VALUE is 1. */
2742 if (STORE_FLAG_VALUE == 1
2743 && trueop1 == const1_rtx
2744 && GET_CODE (op0) == LSHIFTRT
2745 && CONST_INT_P (XEXP (op0, 1))
2746 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2747 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2749 /* (xor (comparison foo bar) (const_int sign-bit))
2750 when STORE_FLAG_VALUE is the sign bit. */
2751 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2752 && trueop1 == const_true_rtx
2753 && COMPARISON_P (op0)
2754 && (reversed = reversed_comparison (op0, mode)))
2755 return reversed;
2757 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2758 if (tem)
2759 return tem;
2761 tem = simplify_associative_operation (code, mode, op0, op1);
2762 if (tem)
2763 return tem;
2764 break;
2766 case AND:
2767 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2768 return trueop1;
2769 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2770 return op0;
2771 if (HWI_COMPUTABLE_MODE_P (mode))
2773 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2774 HOST_WIDE_INT nzop1;
2775 if (CONST_INT_P (trueop1))
2777 HOST_WIDE_INT val1 = INTVAL (trueop1);
2778 /* If we are turning off bits already known off in OP0, we need
2779 not do an AND. */
2780 if ((nzop0 & ~val1) == 0)
2781 return op0;
2783 nzop1 = nonzero_bits (trueop1, mode);
2784 /* If we are clearing all the nonzero bits, the result is zero. */
2785 if ((nzop1 & nzop0) == 0
2786 && !side_effects_p (op0) && !side_effects_p (op1))
2787 return CONST0_RTX (mode);
2789 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2790 && GET_MODE_CLASS (mode) != MODE_CC)
2791 return op0;
2792 /* A & (~A) -> 0 */
2793 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2794 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2795 && ! side_effects_p (op0)
2796 && GET_MODE_CLASS (mode) != MODE_CC)
2797 return CONST0_RTX (mode);
2799 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2800 there are no nonzero bits of C outside of X's mode. */
2801 if ((GET_CODE (op0) == SIGN_EXTEND
2802 || GET_CODE (op0) == ZERO_EXTEND)
2803 && CONST_INT_P (trueop1)
2804 && HWI_COMPUTABLE_MODE_P (mode)
2805 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2806 & UINTVAL (trueop1)) == 0)
2808 machine_mode imode = GET_MODE (XEXP (op0, 0));
2809 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2810 gen_int_mode (INTVAL (trueop1),
2811 imode));
2812 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2815 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2816 we might be able to further simplify the AND with X and potentially
2817 remove the truncation altogether. */
2818 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2820 rtx x = XEXP (op0, 0);
2821 machine_mode xmode = GET_MODE (x);
2822 tem = simplify_gen_binary (AND, xmode, x,
2823 gen_int_mode (INTVAL (trueop1), xmode));
2824 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2827 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2828 if (GET_CODE (op0) == IOR
2829 && CONST_INT_P (trueop1)
2830 && CONST_INT_P (XEXP (op0, 1)))
2832 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2833 return simplify_gen_binary (IOR, mode,
2834 simplify_gen_binary (AND, mode,
2835 XEXP (op0, 0), op1),
2836 gen_int_mode (tmp, mode));
2839 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2840 insn (and may simplify more). */
2841 if (GET_CODE (op0) == XOR
2842 && rtx_equal_p (XEXP (op0, 0), op1)
2843 && ! side_effects_p (op1))
2844 return simplify_gen_binary (AND, mode,
2845 simplify_gen_unary (NOT, mode,
2846 XEXP (op0, 1), mode),
2847 op1);
2849 if (GET_CODE (op0) == XOR
2850 && rtx_equal_p (XEXP (op0, 1), op1)
2851 && ! side_effects_p (op1))
2852 return simplify_gen_binary (AND, mode,
2853 simplify_gen_unary (NOT, mode,
2854 XEXP (op0, 0), mode),
2855 op1);
2857 /* Similarly for (~(A ^ B)) & A. */
2858 if (GET_CODE (op0) == NOT
2859 && GET_CODE (XEXP (op0, 0)) == XOR
2860 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2861 && ! side_effects_p (op1))
2862 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2864 if (GET_CODE (op0) == NOT
2865 && GET_CODE (XEXP (op0, 0)) == XOR
2866 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2867 && ! side_effects_p (op1))
2868 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2870 /* Convert (A | B) & A to A. */
2871 if (GET_CODE (op0) == IOR
2872 && (rtx_equal_p (XEXP (op0, 0), op1)
2873 || rtx_equal_p (XEXP (op0, 1), op1))
2874 && ! side_effects_p (XEXP (op0, 0))
2875 && ! side_effects_p (XEXP (op0, 1)))
2876 return op1;
2878 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2879 ((A & N) + B) & M -> (A + B) & M
2880 Similarly if (N & M) == 0,
2881 ((A | N) + B) & M -> (A + B) & M
2882 and for - instead of + and/or ^ instead of |.
2883 Also, if (N & M) == 0, then
2884 (A +- N) & M -> A & M. */
2885 if (CONST_INT_P (trueop1)
2886 && HWI_COMPUTABLE_MODE_P (mode)
2887 && ~UINTVAL (trueop1)
2888 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2889 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2891 rtx pmop[2];
2892 int which;
2894 pmop[0] = XEXP (op0, 0);
2895 pmop[1] = XEXP (op0, 1);
2897 if (CONST_INT_P (pmop[1])
2898 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2899 return simplify_gen_binary (AND, mode, pmop[0], op1);
2901 for (which = 0; which < 2; which++)
2903 tem = pmop[which];
2904 switch (GET_CODE (tem))
2906 case AND:
2907 if (CONST_INT_P (XEXP (tem, 1))
2908 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2909 == UINTVAL (trueop1))
2910 pmop[which] = XEXP (tem, 0);
2911 break;
2912 case IOR:
2913 case XOR:
2914 if (CONST_INT_P (XEXP (tem, 1))
2915 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2916 pmop[which] = XEXP (tem, 0);
2917 break;
2918 default:
2919 break;
2923 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2925 tem = simplify_gen_binary (GET_CODE (op0), mode,
2926 pmop[0], pmop[1]);
2927 return simplify_gen_binary (code, mode, tem, op1);
2931 /* (and X (ior (not X) Y) -> (and X Y) */
2932 if (GET_CODE (op1) == IOR
2933 && GET_CODE (XEXP (op1, 0)) == NOT
2934 && op0 == XEXP (XEXP (op1, 0), 0))
2935 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2937 /* (and (ior (not X) Y) X) -> (and X Y) */
2938 if (GET_CODE (op0) == IOR
2939 && GET_CODE (XEXP (op0, 0)) == NOT
2940 && op1 == XEXP (XEXP (op0, 0), 0))
2941 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2943 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2944 if (tem)
2945 return tem;
2947 tem = simplify_associative_operation (code, mode, op0, op1);
2948 if (tem)
2949 return tem;
2950 break;
2952 case UDIV:
2953 /* 0/x is 0 (or x&0 if x has side-effects). */
2954 if (trueop0 == CONST0_RTX (mode))
2956 if (side_effects_p (op1))
2957 return simplify_gen_binary (AND, mode, op1, trueop0);
2958 return trueop0;
2960 /* x/1 is x. */
2961 if (trueop1 == CONST1_RTX (mode))
2963 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2964 if (tem)
2965 return tem;
2967 /* Convert divide by power of two into shift. */
2968 if (CONST_INT_P (trueop1)
2969 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2970 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2971 break;
2973 case DIV:
2974 /* Handle floating point and integers separately. */
2975 if (SCALAR_FLOAT_MODE_P (mode))
2977 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2978 safe for modes with NaNs, since 0.0 / 0.0 will then be
2979 NaN rather than 0.0. Nor is it safe for modes with signed
2980 zeros, since dividing 0 by a negative number gives -0.0 */
2981 if (trueop0 == CONST0_RTX (mode)
2982 && !HONOR_NANS (mode)
2983 && !HONOR_SIGNED_ZEROS (mode)
2984 && ! side_effects_p (op1))
2985 return op0;
2986 /* x/1.0 is x. */
2987 if (trueop1 == CONST1_RTX (mode)
2988 && !HONOR_SNANS (mode))
2989 return op0;
2991 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2992 && trueop1 != CONST0_RTX (mode))
2994 REAL_VALUE_TYPE d;
2995 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2997 /* x/-1.0 is -x. */
2998 if (REAL_VALUES_EQUAL (d, dconstm1)
2999 && !HONOR_SNANS (mode))
3000 return simplify_gen_unary (NEG, mode, op0, mode);
3002 /* Change FP division by a constant into multiplication.
3003 Only do this with -freciprocal-math. */
3004 if (flag_reciprocal_math
3005 && !REAL_VALUES_EQUAL (d, dconst0))
3007 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3008 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3009 return simplify_gen_binary (MULT, mode, op0, tem);
3013 else if (SCALAR_INT_MODE_P (mode))
3015 /* 0/x is 0 (or x&0 if x has side-effects). */
3016 if (trueop0 == CONST0_RTX (mode)
3017 && !cfun->can_throw_non_call_exceptions)
3019 if (side_effects_p (op1))
3020 return simplify_gen_binary (AND, mode, op1, trueop0);
3021 return trueop0;
3023 /* x/1 is x. */
3024 if (trueop1 == CONST1_RTX (mode))
3026 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3027 if (tem)
3028 return tem;
3030 /* x/-1 is -x. */
3031 if (trueop1 == constm1_rtx)
3033 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3034 if (x)
3035 return simplify_gen_unary (NEG, mode, x, mode);
3038 break;
3040 case UMOD:
3041 /* 0%x is 0 (or x&0 if x has side-effects). */
3042 if (trueop0 == CONST0_RTX (mode))
3044 if (side_effects_p (op1))
3045 return simplify_gen_binary (AND, mode, op1, trueop0);
3046 return trueop0;
3048 /* x%1 is 0 (of x&0 if x has side-effects). */
3049 if (trueop1 == CONST1_RTX (mode))
3051 if (side_effects_p (op0))
3052 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3053 return CONST0_RTX (mode);
3055 /* Implement modulus by power of two as AND. */
3056 if (CONST_INT_P (trueop1)
3057 && exact_log2 (UINTVAL (trueop1)) > 0)
3058 return simplify_gen_binary (AND, mode, op0,
3059 gen_int_mode (INTVAL (op1) - 1, mode));
3060 break;
3062 case MOD:
3063 /* 0%x is 0 (or x&0 if x has side-effects). */
3064 if (trueop0 == CONST0_RTX (mode))
3066 if (side_effects_p (op1))
3067 return simplify_gen_binary (AND, mode, op1, trueop0);
3068 return trueop0;
3070 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3071 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3073 if (side_effects_p (op0))
3074 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3075 return CONST0_RTX (mode);
3077 break;
3079 case ROTATERT:
3080 case ROTATE:
3081 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3082 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3083 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3084 amount instead. */
3085 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3086 if (CONST_INT_P (trueop1)
3087 && IN_RANGE (INTVAL (trueop1),
3088 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3089 GET_MODE_PRECISION (mode) - 1))
3090 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3091 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3092 - INTVAL (trueop1)));
3093 #endif
3094 /* FALLTHRU */
3095 case ASHIFTRT:
3096 if (trueop1 == CONST0_RTX (mode))
3097 return op0;
3098 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3099 return op0;
3100 /* Rotating ~0 always results in ~0. */
3101 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3102 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3103 && ! side_effects_p (op1))
3104 return op0;
3105 canonicalize_shift:
3106 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3108 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3109 if (val != INTVAL (op1))
3110 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3112 break;
3114 case ASHIFT:
3115 case SS_ASHIFT:
3116 case US_ASHIFT:
3117 if (trueop1 == CONST0_RTX (mode))
3118 return op0;
3119 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3120 return op0;
3121 goto canonicalize_shift;
3123 case LSHIFTRT:
3124 if (trueop1 == CONST0_RTX (mode))
3125 return op0;
3126 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3127 return op0;
3128 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3129 if (GET_CODE (op0) == CLZ
3130 && CONST_INT_P (trueop1)
3131 && STORE_FLAG_VALUE == 1
3132 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3134 machine_mode imode = GET_MODE (XEXP (op0, 0));
3135 unsigned HOST_WIDE_INT zero_val = 0;
3137 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3138 && zero_val == GET_MODE_PRECISION (imode)
3139 && INTVAL (trueop1) == exact_log2 (zero_val))
3140 return simplify_gen_relational (EQ, mode, imode,
3141 XEXP (op0, 0), const0_rtx);
3143 goto canonicalize_shift;
3145 case SMIN:
3146 if (width <= HOST_BITS_PER_WIDE_INT
3147 && mode_signbit_p (mode, trueop1)
3148 && ! side_effects_p (op0))
3149 return op1;
3150 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3151 return op0;
3152 tem = simplify_associative_operation (code, mode, op0, op1);
3153 if (tem)
3154 return tem;
3155 break;
3157 case SMAX:
3158 if (width <= HOST_BITS_PER_WIDE_INT
3159 && CONST_INT_P (trueop1)
3160 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3161 && ! side_effects_p (op0))
3162 return op1;
3163 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3164 return op0;
3165 tem = simplify_associative_operation (code, mode, op0, op1);
3166 if (tem)
3167 return tem;
3168 break;
3170 case UMIN:
3171 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3172 return op1;
3173 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3174 return op0;
3175 tem = simplify_associative_operation (code, mode, op0, op1);
3176 if (tem)
3177 return tem;
3178 break;
3180 case UMAX:
3181 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3182 return op1;
3183 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3184 return op0;
3185 tem = simplify_associative_operation (code, mode, op0, op1);
3186 if (tem)
3187 return tem;
3188 break;
3190 case SS_PLUS:
3191 case US_PLUS:
3192 case SS_MINUS:
3193 case US_MINUS:
3194 case SS_MULT:
3195 case US_MULT:
3196 case SS_DIV:
3197 case US_DIV:
3198 /* ??? There are simplifications that can be done. */
3199 return 0;
3201 case VEC_SELECT:
3202 if (!VECTOR_MODE_P (mode))
3204 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3205 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3206 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3207 gcc_assert (XVECLEN (trueop1, 0) == 1);
3208 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3210 if (GET_CODE (trueop0) == CONST_VECTOR)
3211 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3212 (trueop1, 0, 0)));
3214 /* Extract a scalar element from a nested VEC_SELECT expression
3215 (with optional nested VEC_CONCAT expression). Some targets
3216 (i386) extract scalar element from a vector using chain of
3217 nested VEC_SELECT expressions. When input operand is a memory
3218 operand, this operation can be simplified to a simple scalar
3219 load from an offseted memory address. */
3220 if (GET_CODE (trueop0) == VEC_SELECT)
3222 rtx op0 = XEXP (trueop0, 0);
3223 rtx op1 = XEXP (trueop0, 1);
3225 machine_mode opmode = GET_MODE (op0);
3226 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3227 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3229 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3230 int elem;
3232 rtvec vec;
3233 rtx tmp_op, tmp;
3235 gcc_assert (GET_CODE (op1) == PARALLEL);
3236 gcc_assert (i < n_elts);
3238 /* Select element, pointed by nested selector. */
3239 elem = INTVAL (XVECEXP (op1, 0, i));
3241 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3242 if (GET_CODE (op0) == VEC_CONCAT)
3244 rtx op00 = XEXP (op0, 0);
3245 rtx op01 = XEXP (op0, 1);
3247 machine_mode mode00, mode01;
3248 int n_elts00, n_elts01;
3250 mode00 = GET_MODE (op00);
3251 mode01 = GET_MODE (op01);
3253 /* Find out number of elements of each operand. */
3254 if (VECTOR_MODE_P (mode00))
3256 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3257 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3259 else
3260 n_elts00 = 1;
3262 if (VECTOR_MODE_P (mode01))
3264 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3265 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3267 else
3268 n_elts01 = 1;
3270 gcc_assert (n_elts == n_elts00 + n_elts01);
3272 /* Select correct operand of VEC_CONCAT
3273 and adjust selector. */
3274 if (elem < n_elts01)
3275 tmp_op = op00;
3276 else
3278 tmp_op = op01;
3279 elem -= n_elts00;
3282 else
3283 tmp_op = op0;
3285 vec = rtvec_alloc (1);
3286 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3288 tmp = gen_rtx_fmt_ee (code, mode,
3289 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3290 return tmp;
3292 if (GET_CODE (trueop0) == VEC_DUPLICATE
3293 && GET_MODE (XEXP (trueop0, 0)) == mode)
3294 return XEXP (trueop0, 0);
3296 else
3298 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3299 gcc_assert (GET_MODE_INNER (mode)
3300 == GET_MODE_INNER (GET_MODE (trueop0)));
3301 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3303 if (GET_CODE (trueop0) == CONST_VECTOR)
3305 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3306 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3307 rtvec v = rtvec_alloc (n_elts);
3308 unsigned int i;
3310 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3311 for (i = 0; i < n_elts; i++)
3313 rtx x = XVECEXP (trueop1, 0, i);
3315 gcc_assert (CONST_INT_P (x));
3316 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3317 INTVAL (x));
3320 return gen_rtx_CONST_VECTOR (mode, v);
3323 /* Recognize the identity. */
3324 if (GET_MODE (trueop0) == mode)
3326 bool maybe_ident = true;
3327 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3329 rtx j = XVECEXP (trueop1, 0, i);
3330 if (!CONST_INT_P (j) || INTVAL (j) != i)
3332 maybe_ident = false;
3333 break;
3336 if (maybe_ident)
3337 return trueop0;
3340 /* If we build {a,b} then permute it, build the result directly. */
3341 if (XVECLEN (trueop1, 0) == 2
3342 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3343 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3344 && GET_CODE (trueop0) == VEC_CONCAT
3345 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3346 && GET_MODE (XEXP (trueop0, 0)) == mode
3347 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3348 && GET_MODE (XEXP (trueop0, 1)) == mode)
3350 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3351 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3352 rtx subop0, subop1;
3354 gcc_assert (i0 < 4 && i1 < 4);
3355 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3356 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3358 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3361 if (XVECLEN (trueop1, 0) == 2
3362 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3363 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3364 && GET_CODE (trueop0) == VEC_CONCAT
3365 && GET_MODE (trueop0) == mode)
3367 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3368 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3369 rtx subop0, subop1;
3371 gcc_assert (i0 < 2 && i1 < 2);
3372 subop0 = XEXP (trueop0, i0);
3373 subop1 = XEXP (trueop0, i1);
3375 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3378 /* If we select one half of a vec_concat, return that. */
3379 if (GET_CODE (trueop0) == VEC_CONCAT
3380 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3382 rtx subop0 = XEXP (trueop0, 0);
3383 rtx subop1 = XEXP (trueop0, 1);
3384 machine_mode mode0 = GET_MODE (subop0);
3385 machine_mode mode1 = GET_MODE (subop1);
3386 int li = GET_MODE_SIZE (GET_MODE_INNER (mode0));
3387 int l0 = GET_MODE_SIZE (mode0) / li;
3388 int l1 = GET_MODE_SIZE (mode1) / li;
3389 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3390 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3392 bool success = true;
3393 for (int i = 1; i < l0; ++i)
3395 rtx j = XVECEXP (trueop1, 0, i);
3396 if (!CONST_INT_P (j) || INTVAL (j) != i)
3398 success = false;
3399 break;
3402 if (success)
3403 return subop0;
3405 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3407 bool success = true;
3408 for (int i = 1; i < l1; ++i)
3410 rtx j = XVECEXP (trueop1, 0, i);
3411 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3413 success = false;
3414 break;
3417 if (success)
3418 return subop1;
3423 if (XVECLEN (trueop1, 0) == 1
3424 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3425 && GET_CODE (trueop0) == VEC_CONCAT)
3427 rtx vec = trueop0;
3428 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3430 /* Try to find the element in the VEC_CONCAT. */
3431 while (GET_MODE (vec) != mode
3432 && GET_CODE (vec) == VEC_CONCAT)
3434 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3435 if (offset < vec_size)
3436 vec = XEXP (vec, 0);
3437 else
3439 offset -= vec_size;
3440 vec = XEXP (vec, 1);
3442 vec = avoid_constant_pool_reference (vec);
3445 if (GET_MODE (vec) == mode)
3446 return vec;
3449 /* If we select elements in a vec_merge that all come from the same
3450 operand, select from that operand directly. */
3451 if (GET_CODE (op0) == VEC_MERGE)
3453 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3454 if (CONST_INT_P (trueop02))
3456 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3457 bool all_operand0 = true;
3458 bool all_operand1 = true;
3459 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3461 rtx j = XVECEXP (trueop1, 0, i);
3462 if (sel & (1 << UINTVAL (j)))
3463 all_operand1 = false;
3464 else
3465 all_operand0 = false;
3467 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3468 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3469 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3470 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3474 /* If we have two nested selects that are inverses of each
3475 other, replace them with the source operand. */
3476 if (GET_CODE (trueop0) == VEC_SELECT
3477 && GET_MODE (XEXP (trueop0, 0)) == mode)
3479 rtx op0_subop1 = XEXP (trueop0, 1);
3480 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3481 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3483 /* Apply the outer ordering vector to the inner one. (The inner
3484 ordering vector is expressly permitted to be of a different
3485 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3486 then the two VEC_SELECTs cancel. */
3487 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3489 rtx x = XVECEXP (trueop1, 0, i);
3490 if (!CONST_INT_P (x))
3491 return 0;
3492 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3493 if (!CONST_INT_P (y) || i != INTVAL (y))
3494 return 0;
3496 return XEXP (trueop0, 0);
3499 return 0;
3500 case VEC_CONCAT:
3502 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3503 ? GET_MODE (trueop0)
3504 : GET_MODE_INNER (mode));
3505 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3506 ? GET_MODE (trueop1)
3507 : GET_MODE_INNER (mode));
3509 gcc_assert (VECTOR_MODE_P (mode));
3510 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3511 == GET_MODE_SIZE (mode));
3513 if (VECTOR_MODE_P (op0_mode))
3514 gcc_assert (GET_MODE_INNER (mode)
3515 == GET_MODE_INNER (op0_mode));
3516 else
3517 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3519 if (VECTOR_MODE_P (op1_mode))
3520 gcc_assert (GET_MODE_INNER (mode)
3521 == GET_MODE_INNER (op1_mode));
3522 else
3523 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3525 if ((GET_CODE (trueop0) == CONST_VECTOR
3526 || CONST_SCALAR_INT_P (trueop0)
3527 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3528 && (GET_CODE (trueop1) == CONST_VECTOR
3529 || CONST_SCALAR_INT_P (trueop1)
3530 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3532 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3533 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3534 rtvec v = rtvec_alloc (n_elts);
3535 unsigned int i;
3536 unsigned in_n_elts = 1;
3538 if (VECTOR_MODE_P (op0_mode))
3539 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3540 for (i = 0; i < n_elts; i++)
3542 if (i < in_n_elts)
3544 if (!VECTOR_MODE_P (op0_mode))
3545 RTVEC_ELT (v, i) = trueop0;
3546 else
3547 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3549 else
3551 if (!VECTOR_MODE_P (op1_mode))
3552 RTVEC_ELT (v, i) = trueop1;
3553 else
3554 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3555 i - in_n_elts);
3559 return gen_rtx_CONST_VECTOR (mode, v);
3562 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3563 Restrict the transformation to avoid generating a VEC_SELECT with a
3564 mode unrelated to its operand. */
3565 if (GET_CODE (trueop0) == VEC_SELECT
3566 && GET_CODE (trueop1) == VEC_SELECT
3567 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3568 && GET_MODE (XEXP (trueop0, 0)) == mode)
3570 rtx par0 = XEXP (trueop0, 1);
3571 rtx par1 = XEXP (trueop1, 1);
3572 int len0 = XVECLEN (par0, 0);
3573 int len1 = XVECLEN (par1, 0);
3574 rtvec vec = rtvec_alloc (len0 + len1);
3575 for (int i = 0; i < len0; i++)
3576 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3577 for (int i = 0; i < len1; i++)
3578 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3579 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3580 gen_rtx_PARALLEL (VOIDmode, vec));
3583 return 0;
3585 default:
3586 gcc_unreachable ();
3589 return 0;
3593 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3594 rtx op0, rtx op1)
3596 unsigned int width = GET_MODE_PRECISION (mode);
3598 if (VECTOR_MODE_P (mode)
3599 && code != VEC_CONCAT
3600 && GET_CODE (op0) == CONST_VECTOR
3601 && GET_CODE (op1) == CONST_VECTOR)
3603 unsigned n_elts = GET_MODE_NUNITS (mode);
3604 machine_mode op0mode = GET_MODE (op0);
3605 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3606 machine_mode op1mode = GET_MODE (op1);
3607 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3608 rtvec v = rtvec_alloc (n_elts);
3609 unsigned int i;
3611 gcc_assert (op0_n_elts == n_elts);
3612 gcc_assert (op1_n_elts == n_elts);
3613 for (i = 0; i < n_elts; i++)
3615 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3616 CONST_VECTOR_ELT (op0, i),
3617 CONST_VECTOR_ELT (op1, i));
3618 if (!x)
3619 return 0;
3620 RTVEC_ELT (v, i) = x;
3623 return gen_rtx_CONST_VECTOR (mode, v);
3626 if (VECTOR_MODE_P (mode)
3627 && code == VEC_CONCAT
3628 && (CONST_SCALAR_INT_P (op0)
3629 || GET_CODE (op0) == CONST_FIXED
3630 || CONST_DOUBLE_AS_FLOAT_P (op0))
3631 && (CONST_SCALAR_INT_P (op1)
3632 || CONST_DOUBLE_AS_FLOAT_P (op1)
3633 || GET_CODE (op1) == CONST_FIXED))
3635 unsigned n_elts = GET_MODE_NUNITS (mode);
3636 rtvec v = rtvec_alloc (n_elts);
3638 gcc_assert (n_elts >= 2);
3639 if (n_elts == 2)
3641 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3642 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3644 RTVEC_ELT (v, 0) = op0;
3645 RTVEC_ELT (v, 1) = op1;
3647 else
3649 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3650 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3651 unsigned i;
3653 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3654 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3655 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3657 for (i = 0; i < op0_n_elts; ++i)
3658 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3659 for (i = 0; i < op1_n_elts; ++i)
3660 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3663 return gen_rtx_CONST_VECTOR (mode, v);
3666 if (SCALAR_FLOAT_MODE_P (mode)
3667 && CONST_DOUBLE_AS_FLOAT_P (op0)
3668 && CONST_DOUBLE_AS_FLOAT_P (op1)
3669 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3671 if (code == AND
3672 || code == IOR
3673 || code == XOR)
3675 long tmp0[4];
3676 long tmp1[4];
3677 REAL_VALUE_TYPE r;
3678 int i;
3680 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3681 GET_MODE (op0));
3682 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3683 GET_MODE (op1));
3684 for (i = 0; i < 4; i++)
3686 switch (code)
3688 case AND:
3689 tmp0[i] &= tmp1[i];
3690 break;
3691 case IOR:
3692 tmp0[i] |= tmp1[i];
3693 break;
3694 case XOR:
3695 tmp0[i] ^= tmp1[i];
3696 break;
3697 default:
3698 gcc_unreachable ();
3701 real_from_target (&r, tmp0, mode);
3702 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3704 else
3706 REAL_VALUE_TYPE f0, f1, value, result;
3707 bool inexact;
3709 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3710 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3711 real_convert (&f0, mode, &f0);
3712 real_convert (&f1, mode, &f1);
3714 if (HONOR_SNANS (mode)
3715 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3716 return 0;
3718 if (code == DIV
3719 && REAL_VALUES_EQUAL (f1, dconst0)
3720 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3721 return 0;
3723 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3724 && flag_trapping_math
3725 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3727 int s0 = REAL_VALUE_NEGATIVE (f0);
3728 int s1 = REAL_VALUE_NEGATIVE (f1);
3730 switch (code)
3732 case PLUS:
3733 /* Inf + -Inf = NaN plus exception. */
3734 if (s0 != s1)
3735 return 0;
3736 break;
3737 case MINUS:
3738 /* Inf - Inf = NaN plus exception. */
3739 if (s0 == s1)
3740 return 0;
3741 break;
3742 case DIV:
3743 /* Inf / Inf = NaN plus exception. */
3744 return 0;
3745 default:
3746 break;
3750 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3751 && flag_trapping_math
3752 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3753 || (REAL_VALUE_ISINF (f1)
3754 && REAL_VALUES_EQUAL (f0, dconst0))))
3755 /* Inf * 0 = NaN plus exception. */
3756 return 0;
3758 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3759 &f0, &f1);
3760 real_convert (&result, mode, &value);
3762 /* Don't constant fold this floating point operation if
3763 the result has overflowed and flag_trapping_math. */
3765 if (flag_trapping_math
3766 && MODE_HAS_INFINITIES (mode)
3767 && REAL_VALUE_ISINF (result)
3768 && !REAL_VALUE_ISINF (f0)
3769 && !REAL_VALUE_ISINF (f1))
3770 /* Overflow plus exception. */
3771 return 0;
3773 /* Don't constant fold this floating point operation if the
3774 result may dependent upon the run-time rounding mode and
3775 flag_rounding_math is set, or if GCC's software emulation
3776 is unable to accurately represent the result. */
3778 if ((flag_rounding_math
3779 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3780 && (inexact || !real_identical (&result, &value)))
3781 return NULL_RTX;
3783 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3787 /* We can fold some multi-word operations. */
3788 if ((GET_MODE_CLASS (mode) == MODE_INT
3789 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3790 && CONST_SCALAR_INT_P (op0)
3791 && CONST_SCALAR_INT_P (op1))
3793 wide_int result;
3794 bool overflow;
3795 rtx_mode_t pop0 = std::make_pair (op0, mode);
3796 rtx_mode_t pop1 = std::make_pair (op1, mode);
3798 #if TARGET_SUPPORTS_WIDE_INT == 0
3799 /* This assert keeps the simplification from producing a result
3800 that cannot be represented in a CONST_DOUBLE but a lot of
3801 upstream callers expect that this function never fails to
3802 simplify something and so you if you added this to the test
3803 above the code would die later anyway. If this assert
3804 happens, you just need to make the port support wide int. */
3805 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3806 #endif
3807 switch (code)
3809 case MINUS:
3810 result = wi::sub (pop0, pop1);
3811 break;
3813 case PLUS:
3814 result = wi::add (pop0, pop1);
3815 break;
3817 case MULT:
3818 result = wi::mul (pop0, pop1);
3819 break;
3821 case DIV:
3822 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3823 if (overflow)
3824 return NULL_RTX;
3825 break;
3827 case MOD:
3828 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3829 if (overflow)
3830 return NULL_RTX;
3831 break;
3833 case UDIV:
3834 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3835 if (overflow)
3836 return NULL_RTX;
3837 break;
3839 case UMOD:
3840 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3841 if (overflow)
3842 return NULL_RTX;
3843 break;
3845 case AND:
3846 result = wi::bit_and (pop0, pop1);
3847 break;
3849 case IOR:
3850 result = wi::bit_or (pop0, pop1);
3851 break;
3853 case XOR:
3854 result = wi::bit_xor (pop0, pop1);
3855 break;
3857 case SMIN:
3858 result = wi::smin (pop0, pop1);
3859 break;
3861 case SMAX:
3862 result = wi::smax (pop0, pop1);
3863 break;
3865 case UMIN:
3866 result = wi::umin (pop0, pop1);
3867 break;
3869 case UMAX:
3870 result = wi::umax (pop0, pop1);
3871 break;
3873 case LSHIFTRT:
3874 case ASHIFTRT:
3875 case ASHIFT:
3877 wide_int wop1 = pop1;
3878 if (SHIFT_COUNT_TRUNCATED)
3879 wop1 = wi::umod_trunc (wop1, width);
3880 else if (wi::geu_p (wop1, width))
3881 return NULL_RTX;
3883 switch (code)
3885 case LSHIFTRT:
3886 result = wi::lrshift (pop0, wop1);
3887 break;
3889 case ASHIFTRT:
3890 result = wi::arshift (pop0, wop1);
3891 break;
3893 case ASHIFT:
3894 result = wi::lshift (pop0, wop1);
3895 break;
3897 default:
3898 gcc_unreachable ();
3900 break;
3902 case ROTATE:
3903 case ROTATERT:
3905 if (wi::neg_p (pop1))
3906 return NULL_RTX;
3908 switch (code)
3910 case ROTATE:
3911 result = wi::lrotate (pop0, pop1);
3912 break;
3914 case ROTATERT:
3915 result = wi::rrotate (pop0, pop1);
3916 break;
3918 default:
3919 gcc_unreachable ();
3921 break;
3923 default:
3924 return NULL_RTX;
3926 return immed_wide_int_const (result, mode);
3929 return NULL_RTX;
3934 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3935 PLUS or MINUS.
3937 Rather than test for specific case, we do this by a brute-force method
3938 and do all possible simplifications until no more changes occur. Then
3939 we rebuild the operation. */
3941 struct simplify_plus_minus_op_data
3943 rtx op;
3944 short neg;
3947 static bool
3948 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3950 int result;
3952 result = (commutative_operand_precedence (y)
3953 - commutative_operand_precedence (x));
3954 if (result)
3955 return result > 0;
3957 /* Group together equal REGs to do more simplification. */
3958 if (REG_P (x) && REG_P (y))
3959 return REGNO (x) > REGNO (y);
3960 else
3961 return false;
3964 static rtx
3965 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
3966 rtx op1)
3968 struct simplify_plus_minus_op_data ops[16];
3969 rtx result, tem;
3970 int n_ops = 2;
3971 int changed, n_constants, canonicalized = 0;
3972 int i, j;
3974 memset (ops, 0, sizeof ops);
3976 /* Set up the two operands and then expand them until nothing has been
3977 changed. If we run out of room in our array, give up; this should
3978 almost never happen. */
3980 ops[0].op = op0;
3981 ops[0].neg = 0;
3982 ops[1].op = op1;
3983 ops[1].neg = (code == MINUS);
3987 changed = 0;
3988 n_constants = 0;
3990 for (i = 0; i < n_ops; i++)
3992 rtx this_op = ops[i].op;
3993 int this_neg = ops[i].neg;
3994 enum rtx_code this_code = GET_CODE (this_op);
3996 switch (this_code)
3998 case PLUS:
3999 case MINUS:
4000 if (n_ops == ARRAY_SIZE (ops))
4001 return NULL_RTX;
4003 ops[n_ops].op = XEXP (this_op, 1);
4004 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4005 n_ops++;
4007 ops[i].op = XEXP (this_op, 0);
4008 changed = 1;
4009 canonicalized |= this_neg || i != n_ops - 2;
4010 break;
4012 case NEG:
4013 ops[i].op = XEXP (this_op, 0);
4014 ops[i].neg = ! this_neg;
4015 changed = 1;
4016 canonicalized = 1;
4017 break;
4019 case CONST:
4020 if (n_ops != ARRAY_SIZE (ops)
4021 && GET_CODE (XEXP (this_op, 0)) == PLUS
4022 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4023 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4025 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4026 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4027 ops[n_ops].neg = this_neg;
4028 n_ops++;
4029 changed = 1;
4030 canonicalized = 1;
4032 break;
4034 case NOT:
4035 /* ~a -> (-a - 1) */
4036 if (n_ops != ARRAY_SIZE (ops))
4038 ops[n_ops].op = CONSTM1_RTX (mode);
4039 ops[n_ops++].neg = this_neg;
4040 ops[i].op = XEXP (this_op, 0);
4041 ops[i].neg = !this_neg;
4042 changed = 1;
4043 canonicalized = 1;
4045 break;
4047 case CONST_INT:
4048 n_constants++;
4049 if (this_neg)
4051 ops[i].op = neg_const_int (mode, this_op);
4052 ops[i].neg = 0;
4053 changed = 1;
4054 canonicalized = 1;
4056 break;
4058 default:
4059 break;
4063 while (changed);
4065 if (n_constants > 1)
4066 canonicalized = 1;
4068 gcc_assert (n_ops >= 2);
4070 /* If we only have two operands, we can avoid the loops. */
4071 if (n_ops == 2)
4073 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4074 rtx lhs, rhs;
4076 /* Get the two operands. Be careful with the order, especially for
4077 the cases where code == MINUS. */
4078 if (ops[0].neg && ops[1].neg)
4080 lhs = gen_rtx_NEG (mode, ops[0].op);
4081 rhs = ops[1].op;
4083 else if (ops[0].neg)
4085 lhs = ops[1].op;
4086 rhs = ops[0].op;
4088 else
4090 lhs = ops[0].op;
4091 rhs = ops[1].op;
4094 return simplify_const_binary_operation (code, mode, lhs, rhs);
4097 /* Now simplify each pair of operands until nothing changes. */
4100 /* Insertion sort is good enough for a small array. */
4101 for (i = 1; i < n_ops; i++)
4103 struct simplify_plus_minus_op_data save;
4104 j = i - 1;
4105 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4106 continue;
4108 canonicalized = 1;
4109 save = ops[i];
4111 ops[j + 1] = ops[j];
4112 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4113 ops[j + 1] = save;
4116 changed = 0;
4117 for (i = n_ops - 1; i > 0; i--)
4118 for (j = i - 1; j >= 0; j--)
4120 rtx lhs = ops[j].op, rhs = ops[i].op;
4121 int lneg = ops[j].neg, rneg = ops[i].neg;
4123 if (lhs != 0 && rhs != 0)
4125 enum rtx_code ncode = PLUS;
4127 if (lneg != rneg)
4129 ncode = MINUS;
4130 if (lneg)
4131 tem = lhs, lhs = rhs, rhs = tem;
4133 else if (swap_commutative_operands_p (lhs, rhs))
4134 tem = lhs, lhs = rhs, rhs = tem;
4136 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4137 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4139 rtx tem_lhs, tem_rhs;
4141 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4142 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4143 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4145 if (tem && !CONSTANT_P (tem))
4146 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4148 else
4149 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4151 if (tem)
4153 /* Reject "simplifications" that just wrap the two
4154 arguments in a CONST. Failure to do so can result
4155 in infinite recursion with simplify_binary_operation
4156 when it calls us to simplify CONST operations.
4157 Also, if we find such a simplification, don't try
4158 any more combinations with this rhs: We must have
4159 something like symbol+offset, ie. one of the
4160 trivial CONST expressions we handle later. */
4161 if (GET_CODE (tem) == CONST
4162 && GET_CODE (XEXP (tem, 0)) == ncode
4163 && XEXP (XEXP (tem, 0), 0) == lhs
4164 && XEXP (XEXP (tem, 0), 1) == rhs)
4165 break;
4166 lneg &= rneg;
4167 if (GET_CODE (tem) == NEG)
4168 tem = XEXP (tem, 0), lneg = !lneg;
4169 if (CONST_INT_P (tem) && lneg)
4170 tem = neg_const_int (mode, tem), lneg = 0;
4172 ops[i].op = tem;
4173 ops[i].neg = lneg;
4174 ops[j].op = NULL_RTX;
4175 changed = 1;
4176 canonicalized = 1;
4181 /* If nothing changed, fail. */
4182 if (!canonicalized)
4183 return NULL_RTX;
4185 /* Pack all the operands to the lower-numbered entries. */
4186 for (i = 0, j = 0; j < n_ops; j++)
4187 if (ops[j].op)
4189 ops[i] = ops[j];
4190 i++;
4192 n_ops = i;
4194 while (changed);
4196 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4197 if (n_ops == 2
4198 && CONST_INT_P (ops[1].op)
4199 && CONSTANT_P (ops[0].op)
4200 && ops[0].neg)
4201 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4203 /* We suppressed creation of trivial CONST expressions in the
4204 combination loop to avoid recursion. Create one manually now.
4205 The combination loop should have ensured that there is exactly
4206 one CONST_INT, and the sort will have ensured that it is last
4207 in the array and that any other constant will be next-to-last. */
4209 if (n_ops > 1
4210 && CONST_INT_P (ops[n_ops - 1].op)
4211 && CONSTANT_P (ops[n_ops - 2].op))
4213 rtx value = ops[n_ops - 1].op;
4214 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4215 value = neg_const_int (mode, value);
4216 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4217 INTVAL (value));
4218 n_ops--;
4221 /* Put a non-negated operand first, if possible. */
4223 for (i = 0; i < n_ops && ops[i].neg; i++)
4224 continue;
4225 if (i == n_ops)
4226 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4227 else if (i != 0)
4229 tem = ops[0].op;
4230 ops[0] = ops[i];
4231 ops[i].op = tem;
4232 ops[i].neg = 1;
4235 /* Now make the result by performing the requested operations. */
4236 result = ops[0].op;
4237 for (i = 1; i < n_ops; i++)
4238 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4239 mode, result, ops[i].op);
4241 return result;
4244 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4245 static bool
4246 plus_minus_operand_p (const_rtx x)
4248 return GET_CODE (x) == PLUS
4249 || GET_CODE (x) == MINUS
4250 || (GET_CODE (x) == CONST
4251 && GET_CODE (XEXP (x, 0)) == PLUS
4252 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4253 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4256 /* Like simplify_binary_operation except used for relational operators.
4257 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4258 not also be VOIDmode.
4260 CMP_MODE specifies in which mode the comparison is done in, so it is
4261 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4262 the operands or, if both are VOIDmode, the operands are compared in
4263 "infinite precision". */
4265 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4266 machine_mode cmp_mode, rtx op0, rtx op1)
4268 rtx tem, trueop0, trueop1;
4270 if (cmp_mode == VOIDmode)
4271 cmp_mode = GET_MODE (op0);
4272 if (cmp_mode == VOIDmode)
4273 cmp_mode = GET_MODE (op1);
4275 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4276 if (tem)
4278 if (SCALAR_FLOAT_MODE_P (mode))
4280 if (tem == const0_rtx)
4281 return CONST0_RTX (mode);
4282 #ifdef FLOAT_STORE_FLAG_VALUE
4284 REAL_VALUE_TYPE val;
4285 val = FLOAT_STORE_FLAG_VALUE (mode);
4286 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4288 #else
4289 return NULL_RTX;
4290 #endif
4292 if (VECTOR_MODE_P (mode))
4294 if (tem == const0_rtx)
4295 return CONST0_RTX (mode);
4296 #ifdef VECTOR_STORE_FLAG_VALUE
4298 int i, units;
4299 rtvec v;
4301 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4302 if (val == NULL_RTX)
4303 return NULL_RTX;
4304 if (val == const1_rtx)
4305 return CONST1_RTX (mode);
4307 units = GET_MODE_NUNITS (mode);
4308 v = rtvec_alloc (units);
4309 for (i = 0; i < units; i++)
4310 RTVEC_ELT (v, i) = val;
4311 return gen_rtx_raw_CONST_VECTOR (mode, v);
4313 #else
4314 return NULL_RTX;
4315 #endif
4318 return tem;
4321 /* For the following tests, ensure const0_rtx is op1. */
4322 if (swap_commutative_operands_p (op0, op1)
4323 || (op0 == const0_rtx && op1 != const0_rtx))
4324 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4326 /* If op0 is a compare, extract the comparison arguments from it. */
4327 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4328 return simplify_gen_relational (code, mode, VOIDmode,
4329 XEXP (op0, 0), XEXP (op0, 1));
4331 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4332 || CC0_P (op0))
4333 return NULL_RTX;
4335 trueop0 = avoid_constant_pool_reference (op0);
4336 trueop1 = avoid_constant_pool_reference (op1);
4337 return simplify_relational_operation_1 (code, mode, cmp_mode,
4338 trueop0, trueop1);
4341 /* This part of simplify_relational_operation is only used when CMP_MODE
4342 is not in class MODE_CC (i.e. it is a real comparison).
4344 MODE is the mode of the result, while CMP_MODE specifies in which
4345 mode the comparison is done in, so it is the mode of the operands. */
4347 static rtx
4348 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4349 machine_mode cmp_mode, rtx op0, rtx op1)
4351 enum rtx_code op0code = GET_CODE (op0);
4353 if (op1 == const0_rtx && COMPARISON_P (op0))
4355 /* If op0 is a comparison, extract the comparison arguments
4356 from it. */
4357 if (code == NE)
4359 if (GET_MODE (op0) == mode)
4360 return simplify_rtx (op0);
4361 else
4362 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4363 XEXP (op0, 0), XEXP (op0, 1));
4365 else if (code == EQ)
4367 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4368 if (new_code != UNKNOWN)
4369 return simplify_gen_relational (new_code, mode, VOIDmode,
4370 XEXP (op0, 0), XEXP (op0, 1));
4374 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4375 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4376 if ((code == LTU || code == GEU)
4377 && GET_CODE (op0) == PLUS
4378 && CONST_INT_P (XEXP (op0, 1))
4379 && (rtx_equal_p (op1, XEXP (op0, 0))
4380 || rtx_equal_p (op1, XEXP (op0, 1)))
4381 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4382 && XEXP (op0, 1) != const0_rtx)
4384 rtx new_cmp
4385 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4386 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4387 cmp_mode, XEXP (op0, 0), new_cmp);
4390 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4391 if ((code == LTU || code == GEU)
4392 && GET_CODE (op0) == PLUS
4393 && rtx_equal_p (op1, XEXP (op0, 1))
4394 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4395 && !rtx_equal_p (op1, XEXP (op0, 0)))
4396 return simplify_gen_relational (code, mode, cmp_mode, op0,
4397 copy_rtx (XEXP (op0, 0)));
4399 if (op1 == const0_rtx)
4401 /* Canonicalize (GTU x 0) as (NE x 0). */
4402 if (code == GTU)
4403 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4404 /* Canonicalize (LEU x 0) as (EQ x 0). */
4405 if (code == LEU)
4406 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4408 else if (op1 == const1_rtx)
4410 switch (code)
4412 case GE:
4413 /* Canonicalize (GE x 1) as (GT x 0). */
4414 return simplify_gen_relational (GT, mode, cmp_mode,
4415 op0, const0_rtx);
4416 case GEU:
4417 /* Canonicalize (GEU x 1) as (NE x 0). */
4418 return simplify_gen_relational (NE, mode, cmp_mode,
4419 op0, const0_rtx);
4420 case LT:
4421 /* Canonicalize (LT x 1) as (LE x 0). */
4422 return simplify_gen_relational (LE, mode, cmp_mode,
4423 op0, const0_rtx);
4424 case LTU:
4425 /* Canonicalize (LTU x 1) as (EQ x 0). */
4426 return simplify_gen_relational (EQ, mode, cmp_mode,
4427 op0, const0_rtx);
4428 default:
4429 break;
4432 else if (op1 == constm1_rtx)
4434 /* Canonicalize (LE x -1) as (LT x 0). */
4435 if (code == LE)
4436 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4437 /* Canonicalize (GT x -1) as (GE x 0). */
4438 if (code == GT)
4439 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4442 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4443 if ((code == EQ || code == NE)
4444 && (op0code == PLUS || op0code == MINUS)
4445 && CONSTANT_P (op1)
4446 && CONSTANT_P (XEXP (op0, 1))
4447 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4449 rtx x = XEXP (op0, 0);
4450 rtx c = XEXP (op0, 1);
4451 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4452 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4454 /* Detect an infinite recursive condition, where we oscillate at this
4455 simplification case between:
4456 A + B == C <---> C - B == A,
4457 where A, B, and C are all constants with non-simplifiable expressions,
4458 usually SYMBOL_REFs. */
4459 if (GET_CODE (tem) == invcode
4460 && CONSTANT_P (x)
4461 && rtx_equal_p (c, XEXP (tem, 1)))
4462 return NULL_RTX;
4464 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4467 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4468 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4469 if (code == NE
4470 && op1 == const0_rtx
4471 && GET_MODE_CLASS (mode) == MODE_INT
4472 && cmp_mode != VOIDmode
4473 /* ??? Work-around BImode bugs in the ia64 backend. */
4474 && mode != BImode
4475 && cmp_mode != BImode
4476 && nonzero_bits (op0, cmp_mode) == 1
4477 && STORE_FLAG_VALUE == 1)
4478 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4479 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4480 : lowpart_subreg (mode, op0, cmp_mode);
4482 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4483 if ((code == EQ || code == NE)
4484 && op1 == const0_rtx
4485 && op0code == XOR)
4486 return simplify_gen_relational (code, mode, cmp_mode,
4487 XEXP (op0, 0), XEXP (op0, 1));
4489 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4490 if ((code == EQ || code == NE)
4491 && op0code == XOR
4492 && rtx_equal_p (XEXP (op0, 0), op1)
4493 && !side_effects_p (XEXP (op0, 0)))
4494 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4495 CONST0_RTX (mode));
4497 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4498 if ((code == EQ || code == NE)
4499 && op0code == XOR
4500 && rtx_equal_p (XEXP (op0, 1), op1)
4501 && !side_effects_p (XEXP (op0, 1)))
4502 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4503 CONST0_RTX (mode));
4505 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4506 if ((code == EQ || code == NE)
4507 && op0code == XOR
4508 && CONST_SCALAR_INT_P (op1)
4509 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4510 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4511 simplify_gen_binary (XOR, cmp_mode,
4512 XEXP (op0, 1), op1));
4514 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4515 if ((code == EQ || code == NE)
4516 && GET_CODE (op0) == BSWAP
4517 && CONST_SCALAR_INT_P (op1))
4518 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4519 simplify_gen_unary (BSWAP, cmp_mode,
4520 op1, cmp_mode));
4522 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4523 if ((code == EQ || code == NE)
4524 && GET_CODE (op0) == BSWAP
4525 && GET_CODE (op1) == BSWAP)
4526 return simplify_gen_relational (code, mode, cmp_mode,
4527 XEXP (op0, 0), XEXP (op1, 0));
4529 if (op0code == POPCOUNT && op1 == const0_rtx)
4530 switch (code)
4532 case EQ:
4533 case LE:
4534 case LEU:
4535 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4536 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4537 XEXP (op0, 0), const0_rtx);
4539 case NE:
4540 case GT:
4541 case GTU:
4542 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4543 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4544 XEXP (op0, 0), const0_rtx);
4546 default:
4547 break;
4550 return NULL_RTX;
4553 enum
4555 CMP_EQ = 1,
4556 CMP_LT = 2,
4557 CMP_GT = 4,
4558 CMP_LTU = 8,
4559 CMP_GTU = 16
4563 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4564 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4565 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4566 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4567 For floating-point comparisons, assume that the operands were ordered. */
4569 static rtx
4570 comparison_result (enum rtx_code code, int known_results)
4572 switch (code)
4574 case EQ:
4575 case UNEQ:
4576 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4577 case NE:
4578 case LTGT:
4579 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4581 case LT:
4582 case UNLT:
4583 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4584 case GE:
4585 case UNGE:
4586 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4588 case GT:
4589 case UNGT:
4590 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4591 case LE:
4592 case UNLE:
4593 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4595 case LTU:
4596 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4597 case GEU:
4598 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4600 case GTU:
4601 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4602 case LEU:
4603 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4605 case ORDERED:
4606 return const_true_rtx;
4607 case UNORDERED:
4608 return const0_rtx;
4609 default:
4610 gcc_unreachable ();
4614 /* Check if the given comparison (done in the given MODE) is actually
4615 a tautology or a contradiction. If the mode is VOID_mode, the
4616 comparison is done in "infinite precision". If no simplification
4617 is possible, this function returns zero. Otherwise, it returns
4618 either const_true_rtx or const0_rtx. */
4621 simplify_const_relational_operation (enum rtx_code code,
4622 machine_mode mode,
4623 rtx op0, rtx op1)
4625 rtx tem;
4626 rtx trueop0;
4627 rtx trueop1;
4629 gcc_assert (mode != VOIDmode
4630 || (GET_MODE (op0) == VOIDmode
4631 && GET_MODE (op1) == VOIDmode));
4633 /* If op0 is a compare, extract the comparison arguments from it. */
4634 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4636 op1 = XEXP (op0, 1);
4637 op0 = XEXP (op0, 0);
4639 if (GET_MODE (op0) != VOIDmode)
4640 mode = GET_MODE (op0);
4641 else if (GET_MODE (op1) != VOIDmode)
4642 mode = GET_MODE (op1);
4643 else
4644 return 0;
4647 /* We can't simplify MODE_CC values since we don't know what the
4648 actual comparison is. */
4649 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4650 return 0;
4652 /* Make sure the constant is second. */
4653 if (swap_commutative_operands_p (op0, op1))
4655 tem = op0, op0 = op1, op1 = tem;
4656 code = swap_condition (code);
4659 trueop0 = avoid_constant_pool_reference (op0);
4660 trueop1 = avoid_constant_pool_reference (op1);
4662 /* For integer comparisons of A and B maybe we can simplify A - B and can
4663 then simplify a comparison of that with zero. If A and B are both either
4664 a register or a CONST_INT, this can't help; testing for these cases will
4665 prevent infinite recursion here and speed things up.
4667 We can only do this for EQ and NE comparisons as otherwise we may
4668 lose or introduce overflow which we cannot disregard as undefined as
4669 we do not know the signedness of the operation on either the left or
4670 the right hand side of the comparison. */
4672 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4673 && (code == EQ || code == NE)
4674 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4675 && (REG_P (op1) || CONST_INT_P (trueop1)))
4676 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4677 /* We cannot do this if tem is a nonzero address. */
4678 && ! nonzero_address_p (tem))
4679 return simplify_const_relational_operation (signed_condition (code),
4680 mode, tem, const0_rtx);
4682 if (! HONOR_NANS (mode) && code == ORDERED)
4683 return const_true_rtx;
4685 if (! HONOR_NANS (mode) && code == UNORDERED)
4686 return const0_rtx;
4688 /* For modes without NaNs, if the two operands are equal, we know the
4689 result except if they have side-effects. Even with NaNs we know
4690 the result of unordered comparisons and, if signaling NaNs are
4691 irrelevant, also the result of LT/GT/LTGT. */
4692 if ((! HONOR_NANS (GET_MODE (trueop0))
4693 || code == UNEQ || code == UNLE || code == UNGE
4694 || ((code == LT || code == GT || code == LTGT)
4695 && ! HONOR_SNANS (GET_MODE (trueop0))))
4696 && rtx_equal_p (trueop0, trueop1)
4697 && ! side_effects_p (trueop0))
4698 return comparison_result (code, CMP_EQ);
4700 /* If the operands are floating-point constants, see if we can fold
4701 the result. */
4702 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4703 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4704 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4706 REAL_VALUE_TYPE d0, d1;
4708 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4709 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4711 /* Comparisons are unordered iff at least one of the values is NaN. */
4712 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4713 switch (code)
4715 case UNEQ:
4716 case UNLT:
4717 case UNGT:
4718 case UNLE:
4719 case UNGE:
4720 case NE:
4721 case UNORDERED:
4722 return const_true_rtx;
4723 case EQ:
4724 case LT:
4725 case GT:
4726 case LE:
4727 case GE:
4728 case LTGT:
4729 case ORDERED:
4730 return const0_rtx;
4731 default:
4732 return 0;
4735 return comparison_result (code,
4736 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4737 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4740 /* Otherwise, see if the operands are both integers. */
4741 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4742 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4744 /* It would be nice if we really had a mode here. However, the
4745 largest int representable on the target is as good as
4746 infinite. */
4747 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4748 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4749 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4751 if (wi::eq_p (ptrueop0, ptrueop1))
4752 return comparison_result (code, CMP_EQ);
4753 else
4755 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4756 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4757 return comparison_result (code, cr);
4761 /* Optimize comparisons with upper and lower bounds. */
4762 if (HWI_COMPUTABLE_MODE_P (mode)
4763 && CONST_INT_P (trueop1))
4765 int sign;
4766 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4767 HOST_WIDE_INT val = INTVAL (trueop1);
4768 HOST_WIDE_INT mmin, mmax;
4770 if (code == GEU
4771 || code == LEU
4772 || code == GTU
4773 || code == LTU)
4774 sign = 0;
4775 else
4776 sign = 1;
4778 /* Get a reduced range if the sign bit is zero. */
4779 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4781 mmin = 0;
4782 mmax = nonzero;
4784 else
4786 rtx mmin_rtx, mmax_rtx;
4787 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4789 mmin = INTVAL (mmin_rtx);
4790 mmax = INTVAL (mmax_rtx);
4791 if (sign)
4793 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4795 mmin >>= (sign_copies - 1);
4796 mmax >>= (sign_copies - 1);
4800 switch (code)
4802 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4803 case GEU:
4804 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4805 return const_true_rtx;
4806 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4807 return const0_rtx;
4808 break;
4809 case GE:
4810 if (val <= mmin)
4811 return const_true_rtx;
4812 if (val > mmax)
4813 return const0_rtx;
4814 break;
4816 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4817 case LEU:
4818 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4819 return const_true_rtx;
4820 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4821 return const0_rtx;
4822 break;
4823 case LE:
4824 if (val >= mmax)
4825 return const_true_rtx;
4826 if (val < mmin)
4827 return const0_rtx;
4828 break;
4830 case EQ:
4831 /* x == y is always false for y out of range. */
4832 if (val < mmin || val > mmax)
4833 return const0_rtx;
4834 break;
4836 /* x > y is always false for y >= mmax, always true for y < mmin. */
4837 case GTU:
4838 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4839 return const0_rtx;
4840 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4841 return const_true_rtx;
4842 break;
4843 case GT:
4844 if (val >= mmax)
4845 return const0_rtx;
4846 if (val < mmin)
4847 return const_true_rtx;
4848 break;
4850 /* x < y is always false for y <= mmin, always true for y > mmax. */
4851 case LTU:
4852 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4853 return const0_rtx;
4854 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4855 return const_true_rtx;
4856 break;
4857 case LT:
4858 if (val <= mmin)
4859 return const0_rtx;
4860 if (val > mmax)
4861 return const_true_rtx;
4862 break;
4864 case NE:
4865 /* x != y is always true for y out of range. */
4866 if (val < mmin || val > mmax)
4867 return const_true_rtx;
4868 break;
4870 default:
4871 break;
4875 /* Optimize integer comparisons with zero. */
4876 if (trueop1 == const0_rtx)
4878 /* Some addresses are known to be nonzero. We don't know
4879 their sign, but equality comparisons are known. */
4880 if (nonzero_address_p (trueop0))
4882 if (code == EQ || code == LEU)
4883 return const0_rtx;
4884 if (code == NE || code == GTU)
4885 return const_true_rtx;
4888 /* See if the first operand is an IOR with a constant. If so, we
4889 may be able to determine the result of this comparison. */
4890 if (GET_CODE (op0) == IOR)
4892 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4893 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4895 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
4896 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4897 && (UINTVAL (inner_const)
4898 & ((unsigned HOST_WIDE_INT) 1
4899 << sign_bitnum)));
4901 switch (code)
4903 case EQ:
4904 case LEU:
4905 return const0_rtx;
4906 case NE:
4907 case GTU:
4908 return const_true_rtx;
4909 case LT:
4910 case LE:
4911 if (has_sign)
4912 return const_true_rtx;
4913 break;
4914 case GT:
4915 case GE:
4916 if (has_sign)
4917 return const0_rtx;
4918 break;
4919 default:
4920 break;
4926 /* Optimize comparison of ABS with zero. */
4927 if (trueop1 == CONST0_RTX (mode)
4928 && (GET_CODE (trueop0) == ABS
4929 || (GET_CODE (trueop0) == FLOAT_EXTEND
4930 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4932 switch (code)
4934 case LT:
4935 /* Optimize abs(x) < 0.0. */
4936 if (!HONOR_SNANS (mode)
4937 && (!INTEGRAL_MODE_P (mode)
4938 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4940 if (INTEGRAL_MODE_P (mode)
4941 && (issue_strict_overflow_warning
4942 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4943 warning (OPT_Wstrict_overflow,
4944 ("assuming signed overflow does not occur when "
4945 "assuming abs (x) < 0 is false"));
4946 return const0_rtx;
4948 break;
4950 case GE:
4951 /* Optimize abs(x) >= 0.0. */
4952 if (!HONOR_NANS (mode)
4953 && (!INTEGRAL_MODE_P (mode)
4954 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4956 if (INTEGRAL_MODE_P (mode)
4957 && (issue_strict_overflow_warning
4958 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4959 warning (OPT_Wstrict_overflow,
4960 ("assuming signed overflow does not occur when "
4961 "assuming abs (x) >= 0 is true"));
4962 return const_true_rtx;
4964 break;
4966 case UNGE:
4967 /* Optimize ! (abs(x) < 0.0). */
4968 return const_true_rtx;
4970 default:
4971 break;
4975 return 0;
4978 /* Simplify CODE, an operation with result mode MODE and three operands,
4979 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4980 a constant. Return 0 if no simplifications is possible. */
4983 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
4984 machine_mode op0_mode, rtx op0, rtx op1,
4985 rtx op2)
4987 unsigned int width = GET_MODE_PRECISION (mode);
4988 bool any_change = false;
4989 rtx tem, trueop2;
4991 /* VOIDmode means "infinite" precision. */
4992 if (width == 0)
4993 width = HOST_BITS_PER_WIDE_INT;
4995 switch (code)
4997 case FMA:
4998 /* Simplify negations around the multiplication. */
4999 /* -a * -b + c => a * b + c. */
5000 if (GET_CODE (op0) == NEG)
5002 tem = simplify_unary_operation (NEG, mode, op1, mode);
5003 if (tem)
5004 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5006 else if (GET_CODE (op1) == NEG)
5008 tem = simplify_unary_operation (NEG, mode, op0, mode);
5009 if (tem)
5010 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5013 /* Canonicalize the two multiplication operands. */
5014 /* a * -b + c => -b * a + c. */
5015 if (swap_commutative_operands_p (op0, op1))
5016 tem = op0, op0 = op1, op1 = tem, any_change = true;
5018 if (any_change)
5019 return gen_rtx_FMA (mode, op0, op1, op2);
5020 return NULL_RTX;
5022 case SIGN_EXTRACT:
5023 case ZERO_EXTRACT:
5024 if (CONST_INT_P (op0)
5025 && CONST_INT_P (op1)
5026 && CONST_INT_P (op2)
5027 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5028 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5030 /* Extracting a bit-field from a constant */
5031 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5032 HOST_WIDE_INT op1val = INTVAL (op1);
5033 HOST_WIDE_INT op2val = INTVAL (op2);
5034 if (BITS_BIG_ENDIAN)
5035 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5036 else
5037 val >>= op2val;
5039 if (HOST_BITS_PER_WIDE_INT != op1val)
5041 /* First zero-extend. */
5042 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5043 /* If desired, propagate sign bit. */
5044 if (code == SIGN_EXTRACT
5045 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5046 != 0)
5047 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5050 return gen_int_mode (val, mode);
5052 break;
5054 case IF_THEN_ELSE:
5055 if (CONST_INT_P (op0))
5056 return op0 != const0_rtx ? op1 : op2;
5058 /* Convert c ? a : a into "a". */
5059 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5060 return op1;
5062 /* Convert a != b ? a : b into "a". */
5063 if (GET_CODE (op0) == NE
5064 && ! side_effects_p (op0)
5065 && ! HONOR_NANS (mode)
5066 && ! HONOR_SIGNED_ZEROS (mode)
5067 && ((rtx_equal_p (XEXP (op0, 0), op1)
5068 && rtx_equal_p (XEXP (op0, 1), op2))
5069 || (rtx_equal_p (XEXP (op0, 0), op2)
5070 && rtx_equal_p (XEXP (op0, 1), op1))))
5071 return op1;
5073 /* Convert a == b ? a : b into "b". */
5074 if (GET_CODE (op0) == EQ
5075 && ! side_effects_p (op0)
5076 && ! HONOR_NANS (mode)
5077 && ! HONOR_SIGNED_ZEROS (mode)
5078 && ((rtx_equal_p (XEXP (op0, 0), op1)
5079 && rtx_equal_p (XEXP (op0, 1), op2))
5080 || (rtx_equal_p (XEXP (op0, 0), op2)
5081 && rtx_equal_p (XEXP (op0, 1), op1))))
5082 return op2;
5084 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5086 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5087 ? GET_MODE (XEXP (op0, 1))
5088 : GET_MODE (XEXP (op0, 0)));
5089 rtx temp;
5091 /* Look for happy constants in op1 and op2. */
5092 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5094 HOST_WIDE_INT t = INTVAL (op1);
5095 HOST_WIDE_INT f = INTVAL (op2);
5097 if (t == STORE_FLAG_VALUE && f == 0)
5098 code = GET_CODE (op0);
5099 else if (t == 0 && f == STORE_FLAG_VALUE)
5101 enum rtx_code tmp;
5102 tmp = reversed_comparison_code (op0, NULL_RTX);
5103 if (tmp == UNKNOWN)
5104 break;
5105 code = tmp;
5107 else
5108 break;
5110 return simplify_gen_relational (code, mode, cmp_mode,
5111 XEXP (op0, 0), XEXP (op0, 1));
5114 if (cmp_mode == VOIDmode)
5115 cmp_mode = op0_mode;
5116 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5117 cmp_mode, XEXP (op0, 0),
5118 XEXP (op0, 1));
5120 /* See if any simplifications were possible. */
5121 if (temp)
5123 if (CONST_INT_P (temp))
5124 return temp == const0_rtx ? op2 : op1;
5125 else if (temp)
5126 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5129 break;
5131 case VEC_MERGE:
5132 gcc_assert (GET_MODE (op0) == mode);
5133 gcc_assert (GET_MODE (op1) == mode);
5134 gcc_assert (VECTOR_MODE_P (mode));
5135 trueop2 = avoid_constant_pool_reference (op2);
5136 if (CONST_INT_P (trueop2))
5138 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5139 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5140 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5141 unsigned HOST_WIDE_INT mask;
5142 if (n_elts == HOST_BITS_PER_WIDE_INT)
5143 mask = -1;
5144 else
5145 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5147 if (!(sel & mask) && !side_effects_p (op0))
5148 return op1;
5149 if ((sel & mask) == mask && !side_effects_p (op1))
5150 return op0;
5152 rtx trueop0 = avoid_constant_pool_reference (op0);
5153 rtx trueop1 = avoid_constant_pool_reference (op1);
5154 if (GET_CODE (trueop0) == CONST_VECTOR
5155 && GET_CODE (trueop1) == CONST_VECTOR)
5157 rtvec v = rtvec_alloc (n_elts);
5158 unsigned int i;
5160 for (i = 0; i < n_elts; i++)
5161 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5162 ? CONST_VECTOR_ELT (trueop0, i)
5163 : CONST_VECTOR_ELT (trueop1, i));
5164 return gen_rtx_CONST_VECTOR (mode, v);
5167 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5168 if no element from a appears in the result. */
5169 if (GET_CODE (op0) == VEC_MERGE)
5171 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5172 if (CONST_INT_P (tem))
5174 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5175 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5176 return simplify_gen_ternary (code, mode, mode,
5177 XEXP (op0, 1), op1, op2);
5178 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5179 return simplify_gen_ternary (code, mode, mode,
5180 XEXP (op0, 0), op1, op2);
5183 if (GET_CODE (op1) == VEC_MERGE)
5185 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5186 if (CONST_INT_P (tem))
5188 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5189 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5190 return simplify_gen_ternary (code, mode, mode,
5191 op0, XEXP (op1, 1), op2);
5192 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5193 return simplify_gen_ternary (code, mode, mode,
5194 op0, XEXP (op1, 0), op2);
5199 if (rtx_equal_p (op0, op1)
5200 && !side_effects_p (op2) && !side_effects_p (op1))
5201 return op0;
5203 break;
5205 default:
5206 gcc_unreachable ();
5209 return 0;
5212 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5213 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5214 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5216 Works by unpacking OP into a collection of 8-bit values
5217 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5218 and then repacking them again for OUTERMODE. */
5220 static rtx
5221 simplify_immed_subreg (machine_mode outermode, rtx op,
5222 machine_mode innermode, unsigned int byte)
5224 enum {
5225 value_bit = 8,
5226 value_mask = (1 << value_bit) - 1
5228 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5229 int value_start;
5230 int i;
5231 int elem;
5233 int num_elem;
5234 rtx * elems;
5235 int elem_bitsize;
5236 rtx result_s;
5237 rtvec result_v = NULL;
5238 enum mode_class outer_class;
5239 machine_mode outer_submode;
5240 int max_bitsize;
5242 /* Some ports misuse CCmode. */
5243 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5244 return op;
5246 /* We have no way to represent a complex constant at the rtl level. */
5247 if (COMPLEX_MODE_P (outermode))
5248 return NULL_RTX;
5250 /* We support any size mode. */
5251 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5252 GET_MODE_BITSIZE (innermode));
5254 /* Unpack the value. */
5256 if (GET_CODE (op) == CONST_VECTOR)
5258 num_elem = CONST_VECTOR_NUNITS (op);
5259 elems = &CONST_VECTOR_ELT (op, 0);
5260 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5262 else
5264 num_elem = 1;
5265 elems = &op;
5266 elem_bitsize = max_bitsize;
5268 /* If this asserts, it is too complicated; reducing value_bit may help. */
5269 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5270 /* I don't know how to handle endianness of sub-units. */
5271 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5273 for (elem = 0; elem < num_elem; elem++)
5275 unsigned char * vp;
5276 rtx el = elems[elem];
5278 /* Vectors are kept in target memory order. (This is probably
5279 a mistake.) */
5281 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5282 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5283 / BITS_PER_UNIT);
5284 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5285 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5286 unsigned bytele = (subword_byte % UNITS_PER_WORD
5287 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5288 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5291 switch (GET_CODE (el))
5293 case CONST_INT:
5294 for (i = 0;
5295 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5296 i += value_bit)
5297 *vp++ = INTVAL (el) >> i;
5298 /* CONST_INTs are always logically sign-extended. */
5299 for (; i < elem_bitsize; i += value_bit)
5300 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5301 break;
5303 case CONST_WIDE_INT:
5305 rtx_mode_t val = std::make_pair (el, innermode);
5306 unsigned char extend = wi::sign_mask (val);
5308 for (i = 0; i < elem_bitsize; i += value_bit)
5309 *vp++ = wi::extract_uhwi (val, i, value_bit);
5310 for (; i < elem_bitsize; i += value_bit)
5311 *vp++ = extend;
5313 break;
5315 case CONST_DOUBLE:
5316 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5318 unsigned char extend = 0;
5319 /* If this triggers, someone should have generated a
5320 CONST_INT instead. */
5321 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5323 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5324 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5325 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5327 *vp++
5328 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5329 i += value_bit;
5332 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5333 extend = -1;
5334 for (; i < elem_bitsize; i += value_bit)
5335 *vp++ = extend;
5337 else
5339 /* This is big enough for anything on the platform. */
5340 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5341 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5343 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5344 gcc_assert (bitsize <= elem_bitsize);
5345 gcc_assert (bitsize % value_bit == 0);
5347 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5348 GET_MODE (el));
5350 /* real_to_target produces its result in words affected by
5351 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5352 and use WORDS_BIG_ENDIAN instead; see the documentation
5353 of SUBREG in rtl.texi. */
5354 for (i = 0; i < bitsize; i += value_bit)
5356 int ibase;
5357 if (WORDS_BIG_ENDIAN)
5358 ibase = bitsize - 1 - i;
5359 else
5360 ibase = i;
5361 *vp++ = tmp[ibase / 32] >> i % 32;
5364 /* It shouldn't matter what's done here, so fill it with
5365 zero. */
5366 for (; i < elem_bitsize; i += value_bit)
5367 *vp++ = 0;
5369 break;
5371 case CONST_FIXED:
5372 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5374 for (i = 0; i < elem_bitsize; i += value_bit)
5375 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5377 else
5379 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5380 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5381 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5382 i += value_bit)
5383 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5384 >> (i - HOST_BITS_PER_WIDE_INT);
5385 for (; i < elem_bitsize; i += value_bit)
5386 *vp++ = 0;
5388 break;
5390 default:
5391 gcc_unreachable ();
5395 /* Now, pick the right byte to start with. */
5396 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5397 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5398 will already have offset 0. */
5399 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5401 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5402 - byte);
5403 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5404 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5405 byte = (subword_byte % UNITS_PER_WORD
5406 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5409 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5410 so if it's become negative it will instead be very large.) */
5411 gcc_assert (byte < GET_MODE_SIZE (innermode));
5413 /* Convert from bytes to chunks of size value_bit. */
5414 value_start = byte * (BITS_PER_UNIT / value_bit);
5416 /* Re-pack the value. */
5418 if (VECTOR_MODE_P (outermode))
5420 num_elem = GET_MODE_NUNITS (outermode);
5421 result_v = rtvec_alloc (num_elem);
5422 elems = &RTVEC_ELT (result_v, 0);
5423 outer_submode = GET_MODE_INNER (outermode);
5425 else
5427 num_elem = 1;
5428 elems = &result_s;
5429 outer_submode = outermode;
5432 outer_class = GET_MODE_CLASS (outer_submode);
5433 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5435 gcc_assert (elem_bitsize % value_bit == 0);
5436 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5438 for (elem = 0; elem < num_elem; elem++)
5440 unsigned char *vp;
5442 /* Vectors are stored in target memory order. (This is probably
5443 a mistake.) */
5445 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5446 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5447 / BITS_PER_UNIT);
5448 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5449 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5450 unsigned bytele = (subword_byte % UNITS_PER_WORD
5451 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5452 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5455 switch (outer_class)
5457 case MODE_INT:
5458 case MODE_PARTIAL_INT:
5460 int u;
5461 int base = 0;
5462 int units
5463 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5464 / HOST_BITS_PER_WIDE_INT;
5465 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5466 wide_int r;
5468 for (u = 0; u < units; u++)
5470 unsigned HOST_WIDE_INT buf = 0;
5471 for (i = 0;
5472 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5473 i += value_bit)
5474 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5476 tmp[u] = buf;
5477 base += HOST_BITS_PER_WIDE_INT;
5479 gcc_assert (GET_MODE_PRECISION (outer_submode)
5480 <= MAX_BITSIZE_MODE_ANY_INT);
5481 r = wide_int::from_array (tmp, units,
5482 GET_MODE_PRECISION (outer_submode));
5483 elems[elem] = immed_wide_int_const (r, outer_submode);
5485 break;
5487 case MODE_FLOAT:
5488 case MODE_DECIMAL_FLOAT:
5490 REAL_VALUE_TYPE r;
5491 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5493 /* real_from_target wants its input in words affected by
5494 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5495 and use WORDS_BIG_ENDIAN instead; see the documentation
5496 of SUBREG in rtl.texi. */
5497 for (i = 0; i < max_bitsize / 32; i++)
5498 tmp[i] = 0;
5499 for (i = 0; i < elem_bitsize; i += value_bit)
5501 int ibase;
5502 if (WORDS_BIG_ENDIAN)
5503 ibase = elem_bitsize - 1 - i;
5504 else
5505 ibase = i;
5506 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5509 real_from_target (&r, tmp, outer_submode);
5510 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5512 break;
5514 case MODE_FRACT:
5515 case MODE_UFRACT:
5516 case MODE_ACCUM:
5517 case MODE_UACCUM:
5519 FIXED_VALUE_TYPE f;
5520 f.data.low = 0;
5521 f.data.high = 0;
5522 f.mode = outer_submode;
5524 for (i = 0;
5525 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5526 i += value_bit)
5527 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5528 for (; i < elem_bitsize; i += value_bit)
5529 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5530 << (i - HOST_BITS_PER_WIDE_INT));
5532 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5534 break;
5536 default:
5537 gcc_unreachable ();
5540 if (VECTOR_MODE_P (outermode))
5541 return gen_rtx_CONST_VECTOR (outermode, result_v);
5542 else
5543 return result_s;
5546 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5547 Return 0 if no simplifications are possible. */
5549 simplify_subreg (machine_mode outermode, rtx op,
5550 machine_mode innermode, unsigned int byte)
5552 /* Little bit of sanity checking. */
5553 gcc_assert (innermode != VOIDmode);
5554 gcc_assert (outermode != VOIDmode);
5555 gcc_assert (innermode != BLKmode);
5556 gcc_assert (outermode != BLKmode);
5558 gcc_assert (GET_MODE (op) == innermode
5559 || GET_MODE (op) == VOIDmode);
5561 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5562 return NULL_RTX;
5564 if (byte >= GET_MODE_SIZE (innermode))
5565 return NULL_RTX;
5567 if (outermode == innermode && !byte)
5568 return op;
5570 if (CONST_SCALAR_INT_P (op)
5571 || CONST_DOUBLE_AS_FLOAT_P (op)
5572 || GET_CODE (op) == CONST_FIXED
5573 || GET_CODE (op) == CONST_VECTOR)
5574 return simplify_immed_subreg (outermode, op, innermode, byte);
5576 /* Changing mode twice with SUBREG => just change it once,
5577 or not at all if changing back op starting mode. */
5578 if (GET_CODE (op) == SUBREG)
5580 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5581 int final_offset = byte + SUBREG_BYTE (op);
5582 rtx newx;
5584 if (outermode == innermostmode
5585 && byte == 0 && SUBREG_BYTE (op) == 0)
5586 return SUBREG_REG (op);
5588 /* The SUBREG_BYTE represents offset, as if the value were stored
5589 in memory. Irritating exception is paradoxical subreg, where
5590 we define SUBREG_BYTE to be 0. On big endian machines, this
5591 value should be negative. For a moment, undo this exception. */
5592 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5594 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5595 if (WORDS_BIG_ENDIAN)
5596 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5597 if (BYTES_BIG_ENDIAN)
5598 final_offset += difference % UNITS_PER_WORD;
5600 if (SUBREG_BYTE (op) == 0
5601 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5603 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5604 if (WORDS_BIG_ENDIAN)
5605 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5606 if (BYTES_BIG_ENDIAN)
5607 final_offset += difference % UNITS_PER_WORD;
5610 /* See whether resulting subreg will be paradoxical. */
5611 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5613 /* In nonparadoxical subregs we can't handle negative offsets. */
5614 if (final_offset < 0)
5615 return NULL_RTX;
5616 /* Bail out in case resulting subreg would be incorrect. */
5617 if (final_offset % GET_MODE_SIZE (outermode)
5618 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5619 return NULL_RTX;
5621 else
5623 int offset = 0;
5624 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5626 /* In paradoxical subreg, see if we are still looking on lower part.
5627 If so, our SUBREG_BYTE will be 0. */
5628 if (WORDS_BIG_ENDIAN)
5629 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5630 if (BYTES_BIG_ENDIAN)
5631 offset += difference % UNITS_PER_WORD;
5632 if (offset == final_offset)
5633 final_offset = 0;
5634 else
5635 return NULL_RTX;
5638 /* Recurse for further possible simplifications. */
5639 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5640 final_offset);
5641 if (newx)
5642 return newx;
5643 if (validate_subreg (outermode, innermostmode,
5644 SUBREG_REG (op), final_offset))
5646 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5647 if (SUBREG_PROMOTED_VAR_P (op)
5648 && SUBREG_PROMOTED_SIGN (op) >= 0
5649 && GET_MODE_CLASS (outermode) == MODE_INT
5650 && IN_RANGE (GET_MODE_SIZE (outermode),
5651 GET_MODE_SIZE (innermode),
5652 GET_MODE_SIZE (innermostmode))
5653 && subreg_lowpart_p (newx))
5655 SUBREG_PROMOTED_VAR_P (newx) = 1;
5656 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5658 return newx;
5660 return NULL_RTX;
5663 /* SUBREG of a hard register => just change the register number
5664 and/or mode. If the hard register is not valid in that mode,
5665 suppress this simplification. If the hard register is the stack,
5666 frame, or argument pointer, leave this as a SUBREG. */
5668 if (REG_P (op) && HARD_REGISTER_P (op))
5670 unsigned int regno, final_regno;
5672 regno = REGNO (op);
5673 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5674 if (HARD_REGISTER_NUM_P (final_regno))
5676 rtx x;
5677 int final_offset = byte;
5679 /* Adjust offset for paradoxical subregs. */
5680 if (byte == 0
5681 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5683 int difference = (GET_MODE_SIZE (innermode)
5684 - GET_MODE_SIZE (outermode));
5685 if (WORDS_BIG_ENDIAN)
5686 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5687 if (BYTES_BIG_ENDIAN)
5688 final_offset += difference % UNITS_PER_WORD;
5691 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5693 /* Propagate original regno. We don't have any way to specify
5694 the offset inside original regno, so do so only for lowpart.
5695 The information is used only by alias analysis that can not
5696 grog partial register anyway. */
5698 if (subreg_lowpart_offset (outermode, innermode) == byte)
5699 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5700 return x;
5704 /* If we have a SUBREG of a register that we are replacing and we are
5705 replacing it with a MEM, make a new MEM and try replacing the
5706 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5707 or if we would be widening it. */
5709 if (MEM_P (op)
5710 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5711 /* Allow splitting of volatile memory references in case we don't
5712 have instruction to move the whole thing. */
5713 && (! MEM_VOLATILE_P (op)
5714 || ! have_insn_for (SET, innermode))
5715 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5716 return adjust_address_nv (op, outermode, byte);
5718 /* Handle complex values represented as CONCAT
5719 of real and imaginary part. */
5720 if (GET_CODE (op) == CONCAT)
5722 unsigned int part_size, final_offset;
5723 rtx part, res;
5725 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5726 if (byte < part_size)
5728 part = XEXP (op, 0);
5729 final_offset = byte;
5731 else
5733 part = XEXP (op, 1);
5734 final_offset = byte - part_size;
5737 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5738 return NULL_RTX;
5740 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5741 if (res)
5742 return res;
5743 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5744 return gen_rtx_SUBREG (outermode, part, final_offset);
5745 return NULL_RTX;
5748 /* A SUBREG resulting from a zero extension may fold to zero if
5749 it extracts higher bits that the ZERO_EXTEND's source bits. */
5750 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5752 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5753 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5754 return CONST0_RTX (outermode);
5757 if (SCALAR_INT_MODE_P (outermode)
5758 && SCALAR_INT_MODE_P (innermode)
5759 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5760 && byte == subreg_lowpart_offset (outermode, innermode))
5762 rtx tem = simplify_truncation (outermode, op, innermode);
5763 if (tem)
5764 return tem;
5767 return NULL_RTX;
5770 /* Make a SUBREG operation or equivalent if it folds. */
5773 simplify_gen_subreg (machine_mode outermode, rtx op,
5774 machine_mode innermode, unsigned int byte)
5776 rtx newx;
5778 newx = simplify_subreg (outermode, op, innermode, byte);
5779 if (newx)
5780 return newx;
5782 if (GET_CODE (op) == SUBREG
5783 || GET_CODE (op) == CONCAT
5784 || GET_MODE (op) == VOIDmode)
5785 return NULL_RTX;
5787 if (validate_subreg (outermode, innermode, op, byte))
5788 return gen_rtx_SUBREG (outermode, op, byte);
5790 return NULL_RTX;
5793 /* Simplify X, an rtx expression.
5795 Return the simplified expression or NULL if no simplifications
5796 were possible.
5798 This is the preferred entry point into the simplification routines;
5799 however, we still allow passes to call the more specific routines.
5801 Right now GCC has three (yes, three) major bodies of RTL simplification
5802 code that need to be unified.
5804 1. fold_rtx in cse.c. This code uses various CSE specific
5805 information to aid in RTL simplification.
5807 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5808 it uses combine specific information to aid in RTL
5809 simplification.
5811 3. The routines in this file.
5814 Long term we want to only have one body of simplification code; to
5815 get to that state I recommend the following steps:
5817 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5818 which are not pass dependent state into these routines.
5820 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5821 use this routine whenever possible.
5823 3. Allow for pass dependent state to be provided to these
5824 routines and add simplifications based on the pass dependent
5825 state. Remove code from cse.c & combine.c that becomes
5826 redundant/dead.
5828 It will take time, but ultimately the compiler will be easier to
5829 maintain and improve. It's totally silly that when we add a
5830 simplification that it needs to be added to 4 places (3 for RTL
5831 simplification and 1 for tree simplification. */
5834 simplify_rtx (const_rtx x)
5836 const enum rtx_code code = GET_CODE (x);
5837 const machine_mode mode = GET_MODE (x);
5839 switch (GET_RTX_CLASS (code))
5841 case RTX_UNARY:
5842 return simplify_unary_operation (code, mode,
5843 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5844 case RTX_COMM_ARITH:
5845 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5846 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5848 /* Fall through.... */
5850 case RTX_BIN_ARITH:
5851 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5853 case RTX_TERNARY:
5854 case RTX_BITFIELD_OPS:
5855 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5856 XEXP (x, 0), XEXP (x, 1),
5857 XEXP (x, 2));
5859 case RTX_COMPARE:
5860 case RTX_COMM_COMPARE:
5861 return simplify_relational_operation (code, mode,
5862 ((GET_MODE (XEXP (x, 0))
5863 != VOIDmode)
5864 ? GET_MODE (XEXP (x, 0))
5865 : GET_MODE (XEXP (x, 1))),
5866 XEXP (x, 0),
5867 XEXP (x, 1));
5869 case RTX_EXTRA:
5870 if (code == SUBREG)
5871 return simplify_subreg (mode, SUBREG_REG (x),
5872 GET_MODE (SUBREG_REG (x)),
5873 SUBREG_BYTE (x));
5874 break;
5876 case RTX_OBJ:
5877 if (code == LO_SUM)
5879 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5880 if (GET_CODE (XEXP (x, 0)) == HIGH
5881 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5882 return XEXP (x, 1);
5884 break;
5886 default:
5887 break;
5889 return NULL;