PR ipa/64481
[official-gcc.git] / gcc / simplify-rtx.c
blobb1dbc25b495d5f2f72f85a21cefb0de7d5d4bd2d
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "hash-set.h"
27 #include "machmode.h"
28 #include "vec.h"
29 #include "double-int.h"
30 #include "input.h"
31 #include "alias.h"
32 #include "symtab.h"
33 #include "wide-int.h"
34 #include "inchash.h"
35 #include "tree.h"
36 #include "fold-const.h"
37 #include "varasm.h"
38 #include "tm_p.h"
39 #include "regs.h"
40 #include "hard-reg-set.h"
41 #include "flags.h"
42 #include "insn-config.h"
43 #include "recog.h"
44 #include "input.h"
45 #include "function.h"
46 #include "insn-codes.h"
47 #include "optabs.h"
48 #include "expr.h"
49 #include "diagnostic-core.h"
50 #include "ggc.h"
51 #include "target.h"
52 #include "predict.h"
54 /* Simplification and canonicalization of RTL. */
56 /* Much code operates on (low, high) pairs; the low value is an
57 unsigned wide int, the high value a signed wide int. We
58 occasionally need to sign extend from low to high as if low were a
59 signed wide int. */
60 #define HWI_SIGN_EXTEND(low) \
61 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
63 static rtx neg_const_int (machine_mode, const_rtx);
64 static bool plus_minus_operand_p (const_rtx);
65 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
66 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
67 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
68 unsigned int);
69 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
70 rtx, rtx);
71 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
72 machine_mode, rtx, rtx);
73 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
74 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
75 rtx, rtx, rtx, rtx);
77 /* Negate a CONST_INT rtx, truncating (because a conversion from a
78 maximally negative number can overflow). */
79 static rtx
80 neg_const_int (machine_mode mode, const_rtx i)
82 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
85 /* Test whether expression, X, is an immediate constant that represents
86 the most significant bit of machine mode MODE. */
88 bool
89 mode_signbit_p (machine_mode mode, const_rtx x)
91 unsigned HOST_WIDE_INT val;
92 unsigned int width;
94 if (GET_MODE_CLASS (mode) != MODE_INT)
95 return false;
97 width = GET_MODE_PRECISION (mode);
98 if (width == 0)
99 return false;
101 if (width <= HOST_BITS_PER_WIDE_INT
102 && CONST_INT_P (x))
103 val = INTVAL (x);
104 #if TARGET_SUPPORTS_WIDE_INT
105 else if (CONST_WIDE_INT_P (x))
107 unsigned int i;
108 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
109 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
110 return false;
111 for (i = 0; i < elts - 1; i++)
112 if (CONST_WIDE_INT_ELT (x, i) != 0)
113 return false;
114 val = CONST_WIDE_INT_ELT (x, elts - 1);
115 width %= HOST_BITS_PER_WIDE_INT;
116 if (width == 0)
117 width = HOST_BITS_PER_WIDE_INT;
119 #else
120 else if (width <= HOST_BITS_PER_DOUBLE_INT
121 && CONST_DOUBLE_AS_INT_P (x)
122 && CONST_DOUBLE_LOW (x) == 0)
124 val = CONST_DOUBLE_HIGH (x);
125 width -= HOST_BITS_PER_WIDE_INT;
127 #endif
128 else
129 /* X is not an integer constant. */
130 return false;
132 if (width < HOST_BITS_PER_WIDE_INT)
133 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
134 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
137 /* Test whether VAL is equal to the most significant bit of mode MODE
138 (after masking with the mode mask of MODE). Returns false if the
139 precision of MODE is too large to handle. */
141 bool
142 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
144 unsigned int width;
146 if (GET_MODE_CLASS (mode) != MODE_INT)
147 return false;
149 width = GET_MODE_PRECISION (mode);
150 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
151 return false;
153 val &= GET_MODE_MASK (mode);
154 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
157 /* Test whether the most significant bit of mode MODE is set in VAL.
158 Returns false if the precision of MODE is too large to handle. */
159 bool
160 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
162 unsigned int width;
164 if (GET_MODE_CLASS (mode) != MODE_INT)
165 return false;
167 width = GET_MODE_PRECISION (mode);
168 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
169 return false;
171 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
172 return val != 0;
175 /* Test whether the most significant bit of mode MODE is clear in VAL.
176 Returns false if the precision of MODE is too large to handle. */
177 bool
178 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
180 unsigned int width;
182 if (GET_MODE_CLASS (mode) != MODE_INT)
183 return false;
185 width = GET_MODE_PRECISION (mode);
186 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
187 return false;
189 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
190 return val == 0;
193 /* Make a binary operation by properly ordering the operands and
194 seeing if the expression folds. */
197 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
198 rtx op1)
200 rtx tem;
202 /* If this simplifies, do it. */
203 tem = simplify_binary_operation (code, mode, op0, op1);
204 if (tem)
205 return tem;
207 /* Put complex operands first and constants second if commutative. */
208 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
209 && swap_commutative_operands_p (op0, op1))
210 tem = op0, op0 = op1, op1 = tem;
212 return gen_rtx_fmt_ee (code, mode, op0, op1);
215 /* If X is a MEM referencing the constant pool, return the real value.
216 Otherwise return X. */
218 avoid_constant_pool_reference (rtx x)
220 rtx c, tmp, addr;
221 machine_mode cmode;
222 HOST_WIDE_INT offset = 0;
224 switch (GET_CODE (x))
226 case MEM:
227 break;
229 case FLOAT_EXTEND:
230 /* Handle float extensions of constant pool references. */
231 tmp = XEXP (x, 0);
232 c = avoid_constant_pool_reference (tmp);
233 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
235 REAL_VALUE_TYPE d;
237 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
238 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
240 return x;
242 default:
243 return x;
246 if (GET_MODE (x) == BLKmode)
247 return x;
249 addr = XEXP (x, 0);
251 /* Call target hook to avoid the effects of -fpic etc.... */
252 addr = targetm.delegitimize_address (addr);
254 /* Split the address into a base and integer offset. */
255 if (GET_CODE (addr) == CONST
256 && GET_CODE (XEXP (addr, 0)) == PLUS
257 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
259 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
260 addr = XEXP (XEXP (addr, 0), 0);
263 if (GET_CODE (addr) == LO_SUM)
264 addr = XEXP (addr, 1);
266 /* If this is a constant pool reference, we can turn it into its
267 constant and hope that simplifications happen. */
268 if (GET_CODE (addr) == SYMBOL_REF
269 && CONSTANT_POOL_ADDRESS_P (addr))
271 c = get_pool_constant (addr);
272 cmode = get_pool_mode (addr);
274 /* If we're accessing the constant in a different mode than it was
275 originally stored, attempt to fix that up via subreg simplifications.
276 If that fails we have no choice but to return the original memory. */
277 if ((offset != 0 || cmode != GET_MODE (x))
278 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
280 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
281 if (tem && CONSTANT_P (tem))
282 return tem;
284 else
285 return c;
288 return x;
291 /* Simplify a MEM based on its attributes. This is the default
292 delegitimize_address target hook, and it's recommended that every
293 overrider call it. */
296 delegitimize_mem_from_attrs (rtx x)
298 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
299 use their base addresses as equivalent. */
300 if (MEM_P (x)
301 && MEM_EXPR (x)
302 && MEM_OFFSET_KNOWN_P (x))
304 tree decl = MEM_EXPR (x);
305 machine_mode mode = GET_MODE (x);
306 HOST_WIDE_INT offset = 0;
308 switch (TREE_CODE (decl))
310 default:
311 decl = NULL;
312 break;
314 case VAR_DECL:
315 break;
317 case ARRAY_REF:
318 case ARRAY_RANGE_REF:
319 case COMPONENT_REF:
320 case BIT_FIELD_REF:
321 case REALPART_EXPR:
322 case IMAGPART_EXPR:
323 case VIEW_CONVERT_EXPR:
325 HOST_WIDE_INT bitsize, bitpos;
326 tree toffset;
327 int unsignedp, volatilep = 0;
329 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
330 &mode, &unsignedp, &volatilep, false);
331 if (bitsize != GET_MODE_BITSIZE (mode)
332 || (bitpos % BITS_PER_UNIT)
333 || (toffset && !tree_fits_shwi_p (toffset)))
334 decl = NULL;
335 else
337 offset += bitpos / BITS_PER_UNIT;
338 if (toffset)
339 offset += tree_to_shwi (toffset);
341 break;
345 if (decl
346 && mode == GET_MODE (x)
347 && TREE_CODE (decl) == VAR_DECL
348 && (TREE_STATIC (decl)
349 || DECL_THREAD_LOCAL_P (decl))
350 && DECL_RTL_SET_P (decl)
351 && MEM_P (DECL_RTL (decl)))
353 rtx newx;
355 offset += MEM_OFFSET (x);
357 newx = DECL_RTL (decl);
359 if (MEM_P (newx))
361 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
363 /* Avoid creating a new MEM needlessly if we already had
364 the same address. We do if there's no OFFSET and the
365 old address X is identical to NEWX, or if X is of the
366 form (plus NEWX OFFSET), or the NEWX is of the form
367 (plus Y (const_int Z)) and X is that with the offset
368 added: (plus Y (const_int Z+OFFSET)). */
369 if (!((offset == 0
370 || (GET_CODE (o) == PLUS
371 && GET_CODE (XEXP (o, 1)) == CONST_INT
372 && (offset == INTVAL (XEXP (o, 1))
373 || (GET_CODE (n) == PLUS
374 && GET_CODE (XEXP (n, 1)) == CONST_INT
375 && (INTVAL (XEXP (n, 1)) + offset
376 == INTVAL (XEXP (o, 1)))
377 && (n = XEXP (n, 0))))
378 && (o = XEXP (o, 0))))
379 && rtx_equal_p (o, n)))
380 x = adjust_address_nv (newx, mode, offset);
382 else if (GET_MODE (x) == GET_MODE (newx)
383 && offset == 0)
384 x = newx;
388 return x;
391 /* Make a unary operation by first seeing if it folds and otherwise making
392 the specified operation. */
395 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
396 machine_mode op_mode)
398 rtx tem;
400 /* If this simplifies, use it. */
401 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
402 return tem;
404 return gen_rtx_fmt_e (code, mode, op);
407 /* Likewise for ternary operations. */
410 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
411 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
413 rtx tem;
415 /* If this simplifies, use it. */
416 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
417 op0, op1, op2)))
418 return tem;
420 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
423 /* Likewise, for relational operations.
424 CMP_MODE specifies mode comparison is done in. */
427 simplify_gen_relational (enum rtx_code code, machine_mode mode,
428 machine_mode cmp_mode, rtx op0, rtx op1)
430 rtx tem;
432 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
433 op0, op1)))
434 return tem;
436 return gen_rtx_fmt_ee (code, mode, op0, op1);
439 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
440 and simplify the result. If FN is non-NULL, call this callback on each
441 X, if it returns non-NULL, replace X with its return value and simplify the
442 result. */
445 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
446 rtx (*fn) (rtx, const_rtx, void *), void *data)
448 enum rtx_code code = GET_CODE (x);
449 machine_mode mode = GET_MODE (x);
450 machine_mode op_mode;
451 const char *fmt;
452 rtx op0, op1, op2, newx, op;
453 rtvec vec, newvec;
454 int i, j;
456 if (__builtin_expect (fn != NULL, 0))
458 newx = fn (x, old_rtx, data);
459 if (newx)
460 return newx;
462 else if (rtx_equal_p (x, old_rtx))
463 return copy_rtx ((rtx) data);
465 switch (GET_RTX_CLASS (code))
467 case RTX_UNARY:
468 op0 = XEXP (x, 0);
469 op_mode = GET_MODE (op0);
470 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
471 if (op0 == XEXP (x, 0))
472 return x;
473 return simplify_gen_unary (code, mode, op0, op_mode);
475 case RTX_BIN_ARITH:
476 case RTX_COMM_ARITH:
477 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
478 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
479 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
480 return x;
481 return simplify_gen_binary (code, mode, op0, op1);
483 case RTX_COMPARE:
484 case RTX_COMM_COMPARE:
485 op0 = XEXP (x, 0);
486 op1 = XEXP (x, 1);
487 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
488 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
489 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
490 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
491 return x;
492 return simplify_gen_relational (code, mode, op_mode, op0, op1);
494 case RTX_TERNARY:
495 case RTX_BITFIELD_OPS:
496 op0 = XEXP (x, 0);
497 op_mode = GET_MODE (op0);
498 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
499 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
500 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
501 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
502 return x;
503 if (op_mode == VOIDmode)
504 op_mode = GET_MODE (op0);
505 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
507 case RTX_EXTRA:
508 if (code == SUBREG)
510 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
511 if (op0 == SUBREG_REG (x))
512 return x;
513 op0 = simplify_gen_subreg (GET_MODE (x), op0,
514 GET_MODE (SUBREG_REG (x)),
515 SUBREG_BYTE (x));
516 return op0 ? op0 : x;
518 break;
520 case RTX_OBJ:
521 if (code == MEM)
523 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
524 if (op0 == XEXP (x, 0))
525 return x;
526 return replace_equiv_address_nv (x, op0);
528 else if (code == LO_SUM)
530 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
531 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
533 /* (lo_sum (high x) x) -> x */
534 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
535 return op1;
537 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
538 return x;
539 return gen_rtx_LO_SUM (mode, op0, op1);
541 break;
543 default:
544 break;
547 newx = x;
548 fmt = GET_RTX_FORMAT (code);
549 for (i = 0; fmt[i]; i++)
550 switch (fmt[i])
552 case 'E':
553 vec = XVEC (x, i);
554 newvec = XVEC (newx, i);
555 for (j = 0; j < GET_NUM_ELEM (vec); j++)
557 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
558 old_rtx, fn, data);
559 if (op != RTVEC_ELT (vec, j))
561 if (newvec == vec)
563 newvec = shallow_copy_rtvec (vec);
564 if (x == newx)
565 newx = shallow_copy_rtx (x);
566 XVEC (newx, i) = newvec;
568 RTVEC_ELT (newvec, j) = op;
571 break;
573 case 'e':
574 if (XEXP (x, i))
576 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
577 if (op != XEXP (x, i))
579 if (x == newx)
580 newx = shallow_copy_rtx (x);
581 XEXP (newx, i) = op;
584 break;
586 return newx;
589 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
590 resulting RTX. Return a new RTX which is as simplified as possible. */
593 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
595 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
598 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
599 Only handle cases where the truncated value is inherently an rvalue.
601 RTL provides two ways of truncating a value:
603 1. a lowpart subreg. This form is only a truncation when both
604 the outer and inner modes (here MODE and OP_MODE respectively)
605 are scalar integers, and only then when the subreg is used as
606 an rvalue.
608 It is only valid to form such truncating subregs if the
609 truncation requires no action by the target. The onus for
610 proving this is on the creator of the subreg -- e.g. the
611 caller to simplify_subreg or simplify_gen_subreg -- and typically
612 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
614 2. a TRUNCATE. This form handles both scalar and compound integers.
616 The first form is preferred where valid. However, the TRUNCATE
617 handling in simplify_unary_operation turns the second form into the
618 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
619 so it is generally safe to form rvalue truncations using:
621 simplify_gen_unary (TRUNCATE, ...)
623 and leave simplify_unary_operation to work out which representation
624 should be used.
626 Because of the proof requirements on (1), simplify_truncation must
627 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
628 regardless of whether the outer truncation came from a SUBREG or a
629 TRUNCATE. For example, if the caller has proven that an SImode
630 truncation of:
632 (and:DI X Y)
634 is a no-op and can be represented as a subreg, it does not follow
635 that SImode truncations of X and Y are also no-ops. On a target
636 like 64-bit MIPS that requires SImode values to be stored in
637 sign-extended form, an SImode truncation of:
639 (and:DI (reg:DI X) (const_int 63))
641 is trivially a no-op because only the lower 6 bits can be set.
642 However, X is still an arbitrary 64-bit number and so we cannot
643 assume that truncating it too is a no-op. */
645 static rtx
646 simplify_truncation (machine_mode mode, rtx op,
647 machine_mode op_mode)
649 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
650 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
651 gcc_assert (precision <= op_precision);
653 /* Optimize truncations of zero and sign extended values. */
654 if (GET_CODE (op) == ZERO_EXTEND
655 || GET_CODE (op) == SIGN_EXTEND)
657 /* There are three possibilities. If MODE is the same as the
658 origmode, we can omit both the extension and the subreg.
659 If MODE is not larger than the origmode, we can apply the
660 truncation without the extension. Finally, if the outermode
661 is larger than the origmode, we can just extend to the appropriate
662 mode. */
663 machine_mode origmode = GET_MODE (XEXP (op, 0));
664 if (mode == origmode)
665 return XEXP (op, 0);
666 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
667 return simplify_gen_unary (TRUNCATE, mode,
668 XEXP (op, 0), origmode);
669 else
670 return simplify_gen_unary (GET_CODE (op), mode,
671 XEXP (op, 0), origmode);
674 /* If the machine can perform operations in the truncated mode, distribute
675 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
676 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
677 if (1
678 #ifdef WORD_REGISTER_OPERATIONS
679 && precision >= BITS_PER_WORD
680 #endif
681 && (GET_CODE (op) == PLUS
682 || GET_CODE (op) == MINUS
683 || GET_CODE (op) == MULT))
685 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
686 if (op0)
688 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
689 if (op1)
690 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
694 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
695 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
696 the outer subreg is effectively a truncation to the original mode. */
697 if ((GET_CODE (op) == LSHIFTRT
698 || GET_CODE (op) == ASHIFTRT)
699 /* Ensure that OP_MODE is at least twice as wide as MODE
700 to avoid the possibility that an outer LSHIFTRT shifts by more
701 than the sign extension's sign_bit_copies and introduces zeros
702 into the high bits of the result. */
703 && 2 * precision <= op_precision
704 && CONST_INT_P (XEXP (op, 1))
705 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
706 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
707 && UINTVAL (XEXP (op, 1)) < precision)
708 return simplify_gen_binary (ASHIFTRT, mode,
709 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
711 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
712 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
713 the outer subreg is effectively a truncation to the original mode. */
714 if ((GET_CODE (op) == LSHIFTRT
715 || GET_CODE (op) == ASHIFTRT)
716 && CONST_INT_P (XEXP (op, 1))
717 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
718 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
719 && UINTVAL (XEXP (op, 1)) < precision)
720 return simplify_gen_binary (LSHIFTRT, mode,
721 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
723 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
724 to (ashift:QI (x:QI) C), where C is a suitable small constant and
725 the outer subreg is effectively a truncation to the original mode. */
726 if (GET_CODE (op) == ASHIFT
727 && CONST_INT_P (XEXP (op, 1))
728 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
729 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
730 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
731 && UINTVAL (XEXP (op, 1)) < precision)
732 return simplify_gen_binary (ASHIFT, mode,
733 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
735 /* Recognize a word extraction from a multi-word subreg. */
736 if ((GET_CODE (op) == LSHIFTRT
737 || GET_CODE (op) == ASHIFTRT)
738 && SCALAR_INT_MODE_P (mode)
739 && SCALAR_INT_MODE_P (op_mode)
740 && precision >= BITS_PER_WORD
741 && 2 * precision <= op_precision
742 && CONST_INT_P (XEXP (op, 1))
743 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
744 && UINTVAL (XEXP (op, 1)) < op_precision)
746 int byte = subreg_lowpart_offset (mode, op_mode);
747 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
748 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
749 (WORDS_BIG_ENDIAN
750 ? byte - shifted_bytes
751 : byte + shifted_bytes));
754 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
755 and try replacing the TRUNCATE and shift with it. Don't do this
756 if the MEM has a mode-dependent address. */
757 if ((GET_CODE (op) == LSHIFTRT
758 || GET_CODE (op) == ASHIFTRT)
759 && SCALAR_INT_MODE_P (op_mode)
760 && MEM_P (XEXP (op, 0))
761 && CONST_INT_P (XEXP (op, 1))
762 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
763 && INTVAL (XEXP (op, 1)) > 0
764 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
765 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
766 MEM_ADDR_SPACE (XEXP (op, 0)))
767 && ! MEM_VOLATILE_P (XEXP (op, 0))
768 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
769 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
771 int byte = subreg_lowpart_offset (mode, op_mode);
772 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
773 return adjust_address_nv (XEXP (op, 0), mode,
774 (WORDS_BIG_ENDIAN
775 ? byte - shifted_bytes
776 : byte + shifted_bytes));
779 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
780 (OP:SI foo:SI) if OP is NEG or ABS. */
781 if ((GET_CODE (op) == ABS
782 || GET_CODE (op) == NEG)
783 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
784 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
785 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
786 return simplify_gen_unary (GET_CODE (op), mode,
787 XEXP (XEXP (op, 0), 0), mode);
789 /* (truncate:A (subreg:B (truncate:C X) 0)) is
790 (truncate:A X). */
791 if (GET_CODE (op) == SUBREG
792 && SCALAR_INT_MODE_P (mode)
793 && SCALAR_INT_MODE_P (op_mode)
794 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
795 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
796 && subreg_lowpart_p (op))
798 rtx inner = XEXP (SUBREG_REG (op), 0);
799 if (GET_MODE_PRECISION (mode)
800 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
801 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
802 else
803 /* If subreg above is paradoxical and C is narrower
804 than A, return (subreg:A (truncate:C X) 0). */
805 return simplify_gen_subreg (mode, SUBREG_REG (op),
806 GET_MODE (SUBREG_REG (op)), 0);
809 /* (truncate:A (truncate:B X)) is (truncate:A X). */
810 if (GET_CODE (op) == TRUNCATE)
811 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
812 GET_MODE (XEXP (op, 0)));
814 return NULL_RTX;
817 /* Try to simplify a unary operation CODE whose output mode is to be
818 MODE with input operand OP whose mode was originally OP_MODE.
819 Return zero if no simplification can be made. */
821 simplify_unary_operation (enum rtx_code code, machine_mode mode,
822 rtx op, machine_mode op_mode)
824 rtx trueop, tem;
826 trueop = avoid_constant_pool_reference (op);
828 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
829 if (tem)
830 return tem;
832 return simplify_unary_operation_1 (code, mode, op);
835 /* Perform some simplifications we can do even if the operands
836 aren't constant. */
837 static rtx
838 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
840 enum rtx_code reversed;
841 rtx temp;
843 switch (code)
845 case NOT:
846 /* (not (not X)) == X. */
847 if (GET_CODE (op) == NOT)
848 return XEXP (op, 0);
850 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
851 comparison is all ones. */
852 if (COMPARISON_P (op)
853 && (mode == BImode || STORE_FLAG_VALUE == -1)
854 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
855 return simplify_gen_relational (reversed, mode, VOIDmode,
856 XEXP (op, 0), XEXP (op, 1));
858 /* (not (plus X -1)) can become (neg X). */
859 if (GET_CODE (op) == PLUS
860 && XEXP (op, 1) == constm1_rtx)
861 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
863 /* Similarly, (not (neg X)) is (plus X -1). */
864 if (GET_CODE (op) == NEG)
865 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
866 CONSTM1_RTX (mode));
868 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
869 if (GET_CODE (op) == XOR
870 && CONST_INT_P (XEXP (op, 1))
871 && (temp = simplify_unary_operation (NOT, mode,
872 XEXP (op, 1), mode)) != 0)
873 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
875 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
876 if (GET_CODE (op) == PLUS
877 && CONST_INT_P (XEXP (op, 1))
878 && mode_signbit_p (mode, XEXP (op, 1))
879 && (temp = simplify_unary_operation (NOT, mode,
880 XEXP (op, 1), mode)) != 0)
881 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
884 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
885 operands other than 1, but that is not valid. We could do a
886 similar simplification for (not (lshiftrt C X)) where C is
887 just the sign bit, but this doesn't seem common enough to
888 bother with. */
889 if (GET_CODE (op) == ASHIFT
890 && XEXP (op, 0) == const1_rtx)
892 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
893 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
896 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
897 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
898 so we can perform the above simplification. */
899 if (STORE_FLAG_VALUE == -1
900 && GET_CODE (op) == ASHIFTRT
901 && CONST_INT_P (XEXP (op, 1))
902 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
903 return simplify_gen_relational (GE, mode, VOIDmode,
904 XEXP (op, 0), const0_rtx);
907 if (GET_CODE (op) == SUBREG
908 && subreg_lowpart_p (op)
909 && (GET_MODE_SIZE (GET_MODE (op))
910 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
911 && GET_CODE (SUBREG_REG (op)) == ASHIFT
912 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
914 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
915 rtx x;
917 x = gen_rtx_ROTATE (inner_mode,
918 simplify_gen_unary (NOT, inner_mode, const1_rtx,
919 inner_mode),
920 XEXP (SUBREG_REG (op), 1));
921 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
922 if (temp)
923 return temp;
926 /* Apply De Morgan's laws to reduce number of patterns for machines
927 with negating logical insns (and-not, nand, etc.). If result has
928 only one NOT, put it first, since that is how the patterns are
929 coded. */
930 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
932 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
933 machine_mode op_mode;
935 op_mode = GET_MODE (in1);
936 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
938 op_mode = GET_MODE (in2);
939 if (op_mode == VOIDmode)
940 op_mode = mode;
941 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
943 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
945 rtx tem = in2;
946 in2 = in1; in1 = tem;
949 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
950 mode, in1, in2);
953 /* (not (bswap x)) -> (bswap (not x)). */
954 if (GET_CODE (op) == BSWAP)
956 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
957 return simplify_gen_unary (BSWAP, mode, x, mode);
959 break;
961 case NEG:
962 /* (neg (neg X)) == X. */
963 if (GET_CODE (op) == NEG)
964 return XEXP (op, 0);
966 /* (neg (plus X 1)) can become (not X). */
967 if (GET_CODE (op) == PLUS
968 && XEXP (op, 1) == const1_rtx)
969 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
971 /* Similarly, (neg (not X)) is (plus X 1). */
972 if (GET_CODE (op) == NOT)
973 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
974 CONST1_RTX (mode));
976 /* (neg (minus X Y)) can become (minus Y X). This transformation
977 isn't safe for modes with signed zeros, since if X and Y are
978 both +0, (minus Y X) is the same as (minus X Y). If the
979 rounding mode is towards +infinity (or -infinity) then the two
980 expressions will be rounded differently. */
981 if (GET_CODE (op) == MINUS
982 && !HONOR_SIGNED_ZEROS (mode)
983 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
984 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
986 if (GET_CODE (op) == PLUS
987 && !HONOR_SIGNED_ZEROS (mode)
988 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
990 /* (neg (plus A C)) is simplified to (minus -C A). */
991 if (CONST_SCALAR_INT_P (XEXP (op, 1))
992 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
994 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
995 if (temp)
996 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
999 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1000 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1001 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1004 /* (neg (mult A B)) becomes (mult A (neg B)).
1005 This works even for floating-point values. */
1006 if (GET_CODE (op) == MULT
1007 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1009 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1010 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1013 /* NEG commutes with ASHIFT since it is multiplication. Only do
1014 this if we can then eliminate the NEG (e.g., if the operand
1015 is a constant). */
1016 if (GET_CODE (op) == ASHIFT)
1018 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1019 if (temp)
1020 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1023 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1024 C is equal to the width of MODE minus 1. */
1025 if (GET_CODE (op) == ASHIFTRT
1026 && CONST_INT_P (XEXP (op, 1))
1027 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1028 return simplify_gen_binary (LSHIFTRT, mode,
1029 XEXP (op, 0), XEXP (op, 1));
1031 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1032 C is equal to the width of MODE minus 1. */
1033 if (GET_CODE (op) == LSHIFTRT
1034 && CONST_INT_P (XEXP (op, 1))
1035 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1036 return simplify_gen_binary (ASHIFTRT, mode,
1037 XEXP (op, 0), XEXP (op, 1));
1039 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1040 if (GET_CODE (op) == XOR
1041 && XEXP (op, 1) == const1_rtx
1042 && nonzero_bits (XEXP (op, 0), mode) == 1)
1043 return plus_constant (mode, XEXP (op, 0), -1);
1045 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1046 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1047 if (GET_CODE (op) == LT
1048 && XEXP (op, 1) == const0_rtx
1049 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1051 machine_mode inner = GET_MODE (XEXP (op, 0));
1052 int isize = GET_MODE_PRECISION (inner);
1053 if (STORE_FLAG_VALUE == 1)
1055 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1056 GEN_INT (isize - 1));
1057 if (mode == inner)
1058 return temp;
1059 if (GET_MODE_PRECISION (mode) > isize)
1060 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1061 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1063 else if (STORE_FLAG_VALUE == -1)
1065 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1066 GEN_INT (isize - 1));
1067 if (mode == inner)
1068 return temp;
1069 if (GET_MODE_PRECISION (mode) > isize)
1070 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1071 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1074 break;
1076 case TRUNCATE:
1077 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1078 with the umulXi3_highpart patterns. */
1079 if (GET_CODE (op) == LSHIFTRT
1080 && GET_CODE (XEXP (op, 0)) == MULT)
1081 break;
1083 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1085 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1087 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1088 if (temp)
1089 return temp;
1091 /* We can't handle truncation to a partial integer mode here
1092 because we don't know the real bitsize of the partial
1093 integer mode. */
1094 break;
1097 if (GET_MODE (op) != VOIDmode)
1099 temp = simplify_truncation (mode, op, GET_MODE (op));
1100 if (temp)
1101 return temp;
1104 /* If we know that the value is already truncated, we can
1105 replace the TRUNCATE with a SUBREG. */
1106 if (GET_MODE_NUNITS (mode) == 1
1107 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1108 || truncated_to_mode (mode, op)))
1110 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1111 if (temp)
1112 return temp;
1115 /* A truncate of a comparison can be replaced with a subreg if
1116 STORE_FLAG_VALUE permits. This is like the previous test,
1117 but it works even if the comparison is done in a mode larger
1118 than HOST_BITS_PER_WIDE_INT. */
1119 if (HWI_COMPUTABLE_MODE_P (mode)
1120 && COMPARISON_P (op)
1121 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1123 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1124 if (temp)
1125 return temp;
1128 /* A truncate of a memory is just loading the low part of the memory
1129 if we are not changing the meaning of the address. */
1130 if (GET_CODE (op) == MEM
1131 && !VECTOR_MODE_P (mode)
1132 && !MEM_VOLATILE_P (op)
1133 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1135 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1136 if (temp)
1137 return temp;
1140 break;
1142 case FLOAT_TRUNCATE:
1143 if (DECIMAL_FLOAT_MODE_P (mode))
1144 break;
1146 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1147 if (GET_CODE (op) == FLOAT_EXTEND
1148 && GET_MODE (XEXP (op, 0)) == mode)
1149 return XEXP (op, 0);
1151 /* (float_truncate:SF (float_truncate:DF foo:XF))
1152 = (float_truncate:SF foo:XF).
1153 This may eliminate double rounding, so it is unsafe.
1155 (float_truncate:SF (float_extend:XF foo:DF))
1156 = (float_truncate:SF foo:DF).
1158 (float_truncate:DF (float_extend:XF foo:SF))
1159 = (float_extend:SF foo:DF). */
1160 if ((GET_CODE (op) == FLOAT_TRUNCATE
1161 && flag_unsafe_math_optimizations)
1162 || GET_CODE (op) == FLOAT_EXTEND)
1163 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1164 0)))
1165 > GET_MODE_SIZE (mode)
1166 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1167 mode,
1168 XEXP (op, 0), mode);
1170 /* (float_truncate (float x)) is (float x) */
1171 if (GET_CODE (op) == FLOAT
1172 && (flag_unsafe_math_optimizations
1173 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1174 && ((unsigned)significand_size (GET_MODE (op))
1175 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1176 - num_sign_bit_copies (XEXP (op, 0),
1177 GET_MODE (XEXP (op, 0))))))))
1178 return simplify_gen_unary (FLOAT, mode,
1179 XEXP (op, 0),
1180 GET_MODE (XEXP (op, 0)));
1182 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1183 (OP:SF foo:SF) if OP is NEG or ABS. */
1184 if ((GET_CODE (op) == ABS
1185 || GET_CODE (op) == NEG)
1186 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1187 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1188 return simplify_gen_unary (GET_CODE (op), mode,
1189 XEXP (XEXP (op, 0), 0), mode);
1191 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1192 is (float_truncate:SF x). */
1193 if (GET_CODE (op) == SUBREG
1194 && subreg_lowpart_p (op)
1195 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1196 return SUBREG_REG (op);
1197 break;
1199 case FLOAT_EXTEND:
1200 if (DECIMAL_FLOAT_MODE_P (mode))
1201 break;
1203 /* (float_extend (float_extend x)) is (float_extend x)
1205 (float_extend (float x)) is (float x) assuming that double
1206 rounding can't happen.
1208 if (GET_CODE (op) == FLOAT_EXTEND
1209 || (GET_CODE (op) == FLOAT
1210 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1211 && ((unsigned)significand_size (GET_MODE (op))
1212 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1213 - num_sign_bit_copies (XEXP (op, 0),
1214 GET_MODE (XEXP (op, 0)))))))
1215 return simplify_gen_unary (GET_CODE (op), mode,
1216 XEXP (op, 0),
1217 GET_MODE (XEXP (op, 0)));
1219 break;
1221 case ABS:
1222 /* (abs (neg <foo>)) -> (abs <foo>) */
1223 if (GET_CODE (op) == NEG)
1224 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1225 GET_MODE (XEXP (op, 0)));
1227 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1228 do nothing. */
1229 if (GET_MODE (op) == VOIDmode)
1230 break;
1232 /* If operand is something known to be positive, ignore the ABS. */
1233 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1234 || val_signbit_known_clear_p (GET_MODE (op),
1235 nonzero_bits (op, GET_MODE (op))))
1236 return op;
1238 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1239 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1240 return gen_rtx_NEG (mode, op);
1242 break;
1244 case FFS:
1245 /* (ffs (*_extend <X>)) = (ffs <X>) */
1246 if (GET_CODE (op) == SIGN_EXTEND
1247 || GET_CODE (op) == ZERO_EXTEND)
1248 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1249 GET_MODE (XEXP (op, 0)));
1250 break;
1252 case POPCOUNT:
1253 switch (GET_CODE (op))
1255 case BSWAP:
1256 case ZERO_EXTEND:
1257 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1258 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1259 GET_MODE (XEXP (op, 0)));
1261 case ROTATE:
1262 case ROTATERT:
1263 /* Rotations don't affect popcount. */
1264 if (!side_effects_p (XEXP (op, 1)))
1265 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1266 GET_MODE (XEXP (op, 0)));
1267 break;
1269 default:
1270 break;
1272 break;
1274 case PARITY:
1275 switch (GET_CODE (op))
1277 case NOT:
1278 case BSWAP:
1279 case ZERO_EXTEND:
1280 case SIGN_EXTEND:
1281 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1282 GET_MODE (XEXP (op, 0)));
1284 case ROTATE:
1285 case ROTATERT:
1286 /* Rotations don't affect parity. */
1287 if (!side_effects_p (XEXP (op, 1)))
1288 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1289 GET_MODE (XEXP (op, 0)));
1290 break;
1292 default:
1293 break;
1295 break;
1297 case BSWAP:
1298 /* (bswap (bswap x)) -> x. */
1299 if (GET_CODE (op) == BSWAP)
1300 return XEXP (op, 0);
1301 break;
1303 case FLOAT:
1304 /* (float (sign_extend <X>)) = (float <X>). */
1305 if (GET_CODE (op) == SIGN_EXTEND)
1306 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1307 GET_MODE (XEXP (op, 0)));
1308 break;
1310 case SIGN_EXTEND:
1311 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1312 becomes just the MINUS if its mode is MODE. This allows
1313 folding switch statements on machines using casesi (such as
1314 the VAX). */
1315 if (GET_CODE (op) == TRUNCATE
1316 && GET_MODE (XEXP (op, 0)) == mode
1317 && GET_CODE (XEXP (op, 0)) == MINUS
1318 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1319 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1320 return XEXP (op, 0);
1322 /* Extending a widening multiplication should be canonicalized to
1323 a wider widening multiplication. */
1324 if (GET_CODE (op) == MULT)
1326 rtx lhs = XEXP (op, 0);
1327 rtx rhs = XEXP (op, 1);
1328 enum rtx_code lcode = GET_CODE (lhs);
1329 enum rtx_code rcode = GET_CODE (rhs);
1331 /* Widening multiplies usually extend both operands, but sometimes
1332 they use a shift to extract a portion of a register. */
1333 if ((lcode == SIGN_EXTEND
1334 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1335 && (rcode == SIGN_EXTEND
1336 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1338 machine_mode lmode = GET_MODE (lhs);
1339 machine_mode rmode = GET_MODE (rhs);
1340 int bits;
1342 if (lcode == ASHIFTRT)
1343 /* Number of bits not shifted off the end. */
1344 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1345 else /* lcode == SIGN_EXTEND */
1346 /* Size of inner mode. */
1347 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1349 if (rcode == ASHIFTRT)
1350 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1351 else /* rcode == SIGN_EXTEND */
1352 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1354 /* We can only widen multiplies if the result is mathematiclly
1355 equivalent. I.e. if overflow was impossible. */
1356 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1357 return simplify_gen_binary
1358 (MULT, mode,
1359 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1360 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1364 /* Check for a sign extension of a subreg of a promoted
1365 variable, where the promotion is sign-extended, and the
1366 target mode is the same as the variable's promotion. */
1367 if (GET_CODE (op) == SUBREG
1368 && SUBREG_PROMOTED_VAR_P (op)
1369 && SUBREG_PROMOTED_SIGNED_P (op)
1370 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1372 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1373 if (temp)
1374 return temp;
1377 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1378 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1379 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1381 gcc_assert (GET_MODE_PRECISION (mode)
1382 > GET_MODE_PRECISION (GET_MODE (op)));
1383 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1384 GET_MODE (XEXP (op, 0)));
1387 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1388 is (sign_extend:M (subreg:O <X>)) if there is mode with
1389 GET_MODE_BITSIZE (N) - I bits.
1390 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1391 is similarly (zero_extend:M (subreg:O <X>)). */
1392 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1393 && GET_CODE (XEXP (op, 0)) == ASHIFT
1394 && CONST_INT_P (XEXP (op, 1))
1395 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1396 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1398 machine_mode tmode
1399 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1400 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1401 gcc_assert (GET_MODE_BITSIZE (mode)
1402 > GET_MODE_BITSIZE (GET_MODE (op)));
1403 if (tmode != BLKmode)
1405 rtx inner =
1406 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1407 if (inner)
1408 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1409 ? SIGN_EXTEND : ZERO_EXTEND,
1410 mode, inner, tmode);
1414 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1415 /* As we do not know which address space the pointer is referring to,
1416 we can do this only if the target does not support different pointer
1417 or address modes depending on the address space. */
1418 if (target_default_pointer_address_modes_p ()
1419 && ! POINTERS_EXTEND_UNSIGNED
1420 && mode == Pmode && GET_MODE (op) == ptr_mode
1421 && (CONSTANT_P (op)
1422 || (GET_CODE (op) == SUBREG
1423 && REG_P (SUBREG_REG (op))
1424 && REG_POINTER (SUBREG_REG (op))
1425 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1426 return convert_memory_address (Pmode, op);
1427 #endif
1428 break;
1430 case ZERO_EXTEND:
1431 /* Check for a zero extension of a subreg of a promoted
1432 variable, where the promotion is zero-extended, and the
1433 target mode is the same as the variable's promotion. */
1434 if (GET_CODE (op) == SUBREG
1435 && SUBREG_PROMOTED_VAR_P (op)
1436 && SUBREG_PROMOTED_UNSIGNED_P (op)
1437 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1439 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1440 if (temp)
1441 return temp;
1444 /* Extending a widening multiplication should be canonicalized to
1445 a wider widening multiplication. */
1446 if (GET_CODE (op) == MULT)
1448 rtx lhs = XEXP (op, 0);
1449 rtx rhs = XEXP (op, 1);
1450 enum rtx_code lcode = GET_CODE (lhs);
1451 enum rtx_code rcode = GET_CODE (rhs);
1453 /* Widening multiplies usually extend both operands, but sometimes
1454 they use a shift to extract a portion of a register. */
1455 if ((lcode == ZERO_EXTEND
1456 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1457 && (rcode == ZERO_EXTEND
1458 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1460 machine_mode lmode = GET_MODE (lhs);
1461 machine_mode rmode = GET_MODE (rhs);
1462 int bits;
1464 if (lcode == LSHIFTRT)
1465 /* Number of bits not shifted off the end. */
1466 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1467 else /* lcode == ZERO_EXTEND */
1468 /* Size of inner mode. */
1469 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1471 if (rcode == LSHIFTRT)
1472 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1473 else /* rcode == ZERO_EXTEND */
1474 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1476 /* We can only widen multiplies if the result is mathematiclly
1477 equivalent. I.e. if overflow was impossible. */
1478 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1479 return simplify_gen_binary
1480 (MULT, mode,
1481 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1482 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1486 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1487 if (GET_CODE (op) == ZERO_EXTEND)
1488 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1489 GET_MODE (XEXP (op, 0)));
1491 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1492 is (zero_extend:M (subreg:O <X>)) if there is mode with
1493 GET_MODE_PRECISION (N) - I bits. */
1494 if (GET_CODE (op) == LSHIFTRT
1495 && GET_CODE (XEXP (op, 0)) == ASHIFT
1496 && CONST_INT_P (XEXP (op, 1))
1497 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1498 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1500 machine_mode tmode
1501 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1502 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1503 if (tmode != BLKmode)
1505 rtx inner =
1506 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1507 if (inner)
1508 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1512 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1513 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1514 of mode N. E.g.
1515 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1516 (and:SI (reg:SI) (const_int 63)). */
1517 if (GET_CODE (op) == SUBREG
1518 && GET_MODE_PRECISION (GET_MODE (op))
1519 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1520 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1521 <= HOST_BITS_PER_WIDE_INT
1522 && GET_MODE_PRECISION (mode)
1523 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1524 && subreg_lowpart_p (op)
1525 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1526 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1528 if (GET_MODE_PRECISION (mode)
1529 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1530 return SUBREG_REG (op);
1531 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1532 GET_MODE (SUBREG_REG (op)));
1535 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1536 /* As we do not know which address space the pointer is referring to,
1537 we can do this only if the target does not support different pointer
1538 or address modes depending on the address space. */
1539 if (target_default_pointer_address_modes_p ()
1540 && POINTERS_EXTEND_UNSIGNED > 0
1541 && mode == Pmode && GET_MODE (op) == ptr_mode
1542 && (CONSTANT_P (op)
1543 || (GET_CODE (op) == SUBREG
1544 && REG_P (SUBREG_REG (op))
1545 && REG_POINTER (SUBREG_REG (op))
1546 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1547 return convert_memory_address (Pmode, op);
1548 #endif
1549 break;
1551 default:
1552 break;
1555 return 0;
1558 /* Try to compute the value of a unary operation CODE whose output mode is to
1559 be MODE with input operand OP whose mode was originally OP_MODE.
1560 Return zero if the value cannot be computed. */
1562 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1563 rtx op, machine_mode op_mode)
1565 unsigned int width = GET_MODE_PRECISION (mode);
1567 if (code == VEC_DUPLICATE)
1569 gcc_assert (VECTOR_MODE_P (mode));
1570 if (GET_MODE (op) != VOIDmode)
1572 if (!VECTOR_MODE_P (GET_MODE (op)))
1573 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1574 else
1575 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1576 (GET_MODE (op)));
1578 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1579 || GET_CODE (op) == CONST_VECTOR)
1581 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1582 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1583 rtvec v = rtvec_alloc (n_elts);
1584 unsigned int i;
1586 if (GET_CODE (op) != CONST_VECTOR)
1587 for (i = 0; i < n_elts; i++)
1588 RTVEC_ELT (v, i) = op;
1589 else
1591 machine_mode inmode = GET_MODE (op);
1592 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1593 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1595 gcc_assert (in_n_elts < n_elts);
1596 gcc_assert ((n_elts % in_n_elts) == 0);
1597 for (i = 0; i < n_elts; i++)
1598 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1600 return gen_rtx_CONST_VECTOR (mode, v);
1604 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1606 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1607 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1608 machine_mode opmode = GET_MODE (op);
1609 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1610 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1611 rtvec v = rtvec_alloc (n_elts);
1612 unsigned int i;
1614 gcc_assert (op_n_elts == n_elts);
1615 for (i = 0; i < n_elts; i++)
1617 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1618 CONST_VECTOR_ELT (op, i),
1619 GET_MODE_INNER (opmode));
1620 if (!x)
1621 return 0;
1622 RTVEC_ELT (v, i) = x;
1624 return gen_rtx_CONST_VECTOR (mode, v);
1627 /* The order of these tests is critical so that, for example, we don't
1628 check the wrong mode (input vs. output) for a conversion operation,
1629 such as FIX. At some point, this should be simplified. */
1631 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1633 REAL_VALUE_TYPE d;
1635 if (op_mode == VOIDmode)
1637 /* CONST_INT have VOIDmode as the mode. We assume that all
1638 the bits of the constant are significant, though, this is
1639 a dangerous assumption as many times CONST_INTs are
1640 created and used with garbage in the bits outside of the
1641 precision of the implied mode of the const_int. */
1642 op_mode = MAX_MODE_INT;
1645 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1646 d = real_value_truncate (mode, d);
1647 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1649 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1651 REAL_VALUE_TYPE d;
1653 if (op_mode == VOIDmode)
1655 /* CONST_INT have VOIDmode as the mode. We assume that all
1656 the bits of the constant are significant, though, this is
1657 a dangerous assumption as many times CONST_INTs are
1658 created and used with garbage in the bits outside of the
1659 precision of the implied mode of the const_int. */
1660 op_mode = MAX_MODE_INT;
1663 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1664 d = real_value_truncate (mode, d);
1665 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1668 if (CONST_SCALAR_INT_P (op) && width > 0)
1670 wide_int result;
1671 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1672 rtx_mode_t op0 = std::make_pair (op, imode);
1673 int int_value;
1675 #if TARGET_SUPPORTS_WIDE_INT == 0
1676 /* This assert keeps the simplification from producing a result
1677 that cannot be represented in a CONST_DOUBLE but a lot of
1678 upstream callers expect that this function never fails to
1679 simplify something and so you if you added this to the test
1680 above the code would die later anyway. If this assert
1681 happens, you just need to make the port support wide int. */
1682 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1683 #endif
1685 switch (code)
1687 case NOT:
1688 result = wi::bit_not (op0);
1689 break;
1691 case NEG:
1692 result = wi::neg (op0);
1693 break;
1695 case ABS:
1696 result = wi::abs (op0);
1697 break;
1699 case FFS:
1700 result = wi::shwi (wi::ffs (op0), mode);
1701 break;
1703 case CLZ:
1704 if (wi::ne_p (op0, 0))
1705 int_value = wi::clz (op0);
1706 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1707 int_value = GET_MODE_PRECISION (mode);
1708 result = wi::shwi (int_value, mode);
1709 break;
1711 case CLRSB:
1712 result = wi::shwi (wi::clrsb (op0), mode);
1713 break;
1715 case CTZ:
1716 if (wi::ne_p (op0, 0))
1717 int_value = wi::ctz (op0);
1718 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1719 int_value = GET_MODE_PRECISION (mode);
1720 result = wi::shwi (int_value, mode);
1721 break;
1723 case POPCOUNT:
1724 result = wi::shwi (wi::popcount (op0), mode);
1725 break;
1727 case PARITY:
1728 result = wi::shwi (wi::parity (op0), mode);
1729 break;
1731 case BSWAP:
1732 result = wide_int (op0).bswap ();
1733 break;
1735 case TRUNCATE:
1736 case ZERO_EXTEND:
1737 result = wide_int::from (op0, width, UNSIGNED);
1738 break;
1740 case SIGN_EXTEND:
1741 result = wide_int::from (op0, width, SIGNED);
1742 break;
1744 case SQRT:
1745 default:
1746 return 0;
1749 return immed_wide_int_const (result, mode);
1752 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1753 && SCALAR_FLOAT_MODE_P (mode)
1754 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1756 REAL_VALUE_TYPE d;
1757 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1759 switch (code)
1761 case SQRT:
1762 return 0;
1763 case ABS:
1764 d = real_value_abs (&d);
1765 break;
1766 case NEG:
1767 d = real_value_negate (&d);
1768 break;
1769 case FLOAT_TRUNCATE:
1770 d = real_value_truncate (mode, d);
1771 break;
1772 case FLOAT_EXTEND:
1773 /* All this does is change the mode, unless changing
1774 mode class. */
1775 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1776 real_convert (&d, mode, &d);
1777 break;
1778 case FIX:
1779 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1780 break;
1781 case NOT:
1783 long tmp[4];
1784 int i;
1786 real_to_target (tmp, &d, GET_MODE (op));
1787 for (i = 0; i < 4; i++)
1788 tmp[i] = ~tmp[i];
1789 real_from_target (&d, tmp, mode);
1790 break;
1792 default:
1793 gcc_unreachable ();
1795 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1797 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1798 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1799 && GET_MODE_CLASS (mode) == MODE_INT
1800 && width > 0)
1802 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1803 operators are intentionally left unspecified (to ease implementation
1804 by target backends), for consistency, this routine implements the
1805 same semantics for constant folding as used by the middle-end. */
1807 /* This was formerly used only for non-IEEE float.
1808 eggert@twinsun.com says it is safe for IEEE also. */
1809 REAL_VALUE_TYPE x, t;
1810 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1811 wide_int wmax, wmin;
1812 /* This is part of the abi to real_to_integer, but we check
1813 things before making this call. */
1814 bool fail;
1816 switch (code)
1818 case FIX:
1819 if (REAL_VALUE_ISNAN (x))
1820 return const0_rtx;
1822 /* Test against the signed upper bound. */
1823 wmax = wi::max_value (width, SIGNED);
1824 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1825 if (REAL_VALUES_LESS (t, x))
1826 return immed_wide_int_const (wmax, mode);
1828 /* Test against the signed lower bound. */
1829 wmin = wi::min_value (width, SIGNED);
1830 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1831 if (REAL_VALUES_LESS (x, t))
1832 return immed_wide_int_const (wmin, mode);
1834 return immed_wide_int_const (real_to_integer (&x, &fail, width), mode);
1835 break;
1837 case UNSIGNED_FIX:
1838 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1839 return const0_rtx;
1841 /* Test against the unsigned upper bound. */
1842 wmax = wi::max_value (width, UNSIGNED);
1843 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1844 if (REAL_VALUES_LESS (t, x))
1845 return immed_wide_int_const (wmax, mode);
1847 return immed_wide_int_const (real_to_integer (&x, &fail, width),
1848 mode);
1849 break;
1851 default:
1852 gcc_unreachable ();
1856 return NULL_RTX;
1859 /* Subroutine of simplify_binary_operation to simplify a binary operation
1860 CODE that can commute with byte swapping, with result mode MODE and
1861 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1862 Return zero if no simplification or canonicalization is possible. */
1864 static rtx
1865 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1866 rtx op0, rtx op1)
1868 rtx tem;
1870 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1871 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1873 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1874 simplify_gen_unary (BSWAP, mode, op1, mode));
1875 return simplify_gen_unary (BSWAP, mode, tem, mode);
1878 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1879 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1881 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1882 return simplify_gen_unary (BSWAP, mode, tem, mode);
1885 return NULL_RTX;
1888 /* Subroutine of simplify_binary_operation to simplify a commutative,
1889 associative binary operation CODE with result mode MODE, operating
1890 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1891 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1892 canonicalization is possible. */
1894 static rtx
1895 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1896 rtx op0, rtx op1)
1898 rtx tem;
1900 /* Linearize the operator to the left. */
1901 if (GET_CODE (op1) == code)
1903 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1904 if (GET_CODE (op0) == code)
1906 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1907 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1910 /* "a op (b op c)" becomes "(b op c) op a". */
1911 if (! swap_commutative_operands_p (op1, op0))
1912 return simplify_gen_binary (code, mode, op1, op0);
1914 tem = op0;
1915 op0 = op1;
1916 op1 = tem;
1919 if (GET_CODE (op0) == code)
1921 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1922 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1924 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1925 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1928 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1929 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1930 if (tem != 0)
1931 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1933 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1934 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1935 if (tem != 0)
1936 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1939 return 0;
1943 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1944 and OP1. Return 0 if no simplification is possible.
1946 Don't use this for relational operations such as EQ or LT.
1947 Use simplify_relational_operation instead. */
1949 simplify_binary_operation (enum rtx_code code, machine_mode mode,
1950 rtx op0, rtx op1)
1952 rtx trueop0, trueop1;
1953 rtx tem;
1955 /* Relational operations don't work here. We must know the mode
1956 of the operands in order to do the comparison correctly.
1957 Assuming a full word can give incorrect results.
1958 Consider comparing 128 with -128 in QImode. */
1959 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1960 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1962 /* Make sure the constant is second. */
1963 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1964 && swap_commutative_operands_p (op0, op1))
1966 tem = op0, op0 = op1, op1 = tem;
1969 trueop0 = avoid_constant_pool_reference (op0);
1970 trueop1 = avoid_constant_pool_reference (op1);
1972 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1973 if (tem)
1974 return tem;
1975 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1978 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1979 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1980 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1981 actual constants. */
1983 static rtx
1984 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
1985 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1987 rtx tem, reversed, opleft, opright;
1988 HOST_WIDE_INT val;
1989 unsigned int width = GET_MODE_PRECISION (mode);
1991 /* Even if we can't compute a constant result,
1992 there are some cases worth simplifying. */
1994 switch (code)
1996 case PLUS:
1997 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1998 when x is NaN, infinite, or finite and nonzero. They aren't
1999 when x is -0 and the rounding mode is not towards -infinity,
2000 since (-0) + 0 is then 0. */
2001 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2002 return op0;
2004 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2005 transformations are safe even for IEEE. */
2006 if (GET_CODE (op0) == NEG)
2007 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2008 else if (GET_CODE (op1) == NEG)
2009 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2011 /* (~a) + 1 -> -a */
2012 if (INTEGRAL_MODE_P (mode)
2013 && GET_CODE (op0) == NOT
2014 && trueop1 == const1_rtx)
2015 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2017 /* Handle both-operands-constant cases. We can only add
2018 CONST_INTs to constants since the sum of relocatable symbols
2019 can't be handled by most assemblers. Don't add CONST_INT
2020 to CONST_INT since overflow won't be computed properly if wider
2021 than HOST_BITS_PER_WIDE_INT. */
2023 if ((GET_CODE (op0) == CONST
2024 || GET_CODE (op0) == SYMBOL_REF
2025 || GET_CODE (op0) == LABEL_REF)
2026 && CONST_INT_P (op1))
2027 return plus_constant (mode, op0, INTVAL (op1));
2028 else if ((GET_CODE (op1) == CONST
2029 || GET_CODE (op1) == SYMBOL_REF
2030 || GET_CODE (op1) == LABEL_REF)
2031 && CONST_INT_P (op0))
2032 return plus_constant (mode, op1, INTVAL (op0));
2034 /* See if this is something like X * C - X or vice versa or
2035 if the multiplication is written as a shift. If so, we can
2036 distribute and make a new multiply, shift, or maybe just
2037 have X (if C is 2 in the example above). But don't make
2038 something more expensive than we had before. */
2040 if (SCALAR_INT_MODE_P (mode))
2042 rtx lhs = op0, rhs = op1;
2044 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2045 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2047 if (GET_CODE (lhs) == NEG)
2049 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2050 lhs = XEXP (lhs, 0);
2052 else if (GET_CODE (lhs) == MULT
2053 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2055 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2056 lhs = XEXP (lhs, 0);
2058 else if (GET_CODE (lhs) == ASHIFT
2059 && CONST_INT_P (XEXP (lhs, 1))
2060 && INTVAL (XEXP (lhs, 1)) >= 0
2061 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2063 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2064 GET_MODE_PRECISION (mode));
2065 lhs = XEXP (lhs, 0);
2068 if (GET_CODE (rhs) == NEG)
2070 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2071 rhs = XEXP (rhs, 0);
2073 else if (GET_CODE (rhs) == MULT
2074 && CONST_INT_P (XEXP (rhs, 1)))
2076 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2077 rhs = XEXP (rhs, 0);
2079 else if (GET_CODE (rhs) == ASHIFT
2080 && CONST_INT_P (XEXP (rhs, 1))
2081 && INTVAL (XEXP (rhs, 1)) >= 0
2082 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2084 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2085 GET_MODE_PRECISION (mode));
2086 rhs = XEXP (rhs, 0);
2089 if (rtx_equal_p (lhs, rhs))
2091 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2092 rtx coeff;
2093 bool speed = optimize_function_for_speed_p (cfun);
2095 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2097 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2098 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2099 ? tem : 0;
2103 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2104 if (CONST_SCALAR_INT_P (op1)
2105 && GET_CODE (op0) == XOR
2106 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2107 && mode_signbit_p (mode, op1))
2108 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2109 simplify_gen_binary (XOR, mode, op1,
2110 XEXP (op0, 1)));
2112 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2113 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2114 && GET_CODE (op0) == MULT
2115 && GET_CODE (XEXP (op0, 0)) == NEG)
2117 rtx in1, in2;
2119 in1 = XEXP (XEXP (op0, 0), 0);
2120 in2 = XEXP (op0, 1);
2121 return simplify_gen_binary (MINUS, mode, op1,
2122 simplify_gen_binary (MULT, mode,
2123 in1, in2));
2126 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2127 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2128 is 1. */
2129 if (COMPARISON_P (op0)
2130 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2131 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2132 && (reversed = reversed_comparison (op0, mode)))
2133 return
2134 simplify_gen_unary (NEG, mode, reversed, mode);
2136 /* If one of the operands is a PLUS or a MINUS, see if we can
2137 simplify this by the associative law.
2138 Don't use the associative law for floating point.
2139 The inaccuracy makes it nonassociative,
2140 and subtle programs can break if operations are associated. */
2142 if (INTEGRAL_MODE_P (mode)
2143 && (plus_minus_operand_p (op0)
2144 || plus_minus_operand_p (op1))
2145 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2146 return tem;
2148 /* Reassociate floating point addition only when the user
2149 specifies associative math operations. */
2150 if (FLOAT_MODE_P (mode)
2151 && flag_associative_math)
2153 tem = simplify_associative_operation (code, mode, op0, op1);
2154 if (tem)
2155 return tem;
2157 break;
2159 case COMPARE:
2160 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2161 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2162 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2163 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2165 rtx xop00 = XEXP (op0, 0);
2166 rtx xop10 = XEXP (op1, 0);
2168 #ifdef HAVE_cc0
2169 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2170 #else
2171 if (REG_P (xop00) && REG_P (xop10)
2172 && GET_MODE (xop00) == GET_MODE (xop10)
2173 && REGNO (xop00) == REGNO (xop10)
2174 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2175 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2176 #endif
2177 return xop00;
2179 break;
2181 case MINUS:
2182 /* We can't assume x-x is 0 even with non-IEEE floating point,
2183 but since it is zero except in very strange circumstances, we
2184 will treat it as zero with -ffinite-math-only. */
2185 if (rtx_equal_p (trueop0, trueop1)
2186 && ! side_effects_p (op0)
2187 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2188 return CONST0_RTX (mode);
2190 /* Change subtraction from zero into negation. (0 - x) is the
2191 same as -x when x is NaN, infinite, or finite and nonzero.
2192 But if the mode has signed zeros, and does not round towards
2193 -infinity, then 0 - 0 is 0, not -0. */
2194 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2195 return simplify_gen_unary (NEG, mode, op1, mode);
2197 /* (-1 - a) is ~a. */
2198 if (trueop0 == constm1_rtx)
2199 return simplify_gen_unary (NOT, mode, op1, mode);
2201 /* Subtracting 0 has no effect unless the mode has signed zeros
2202 and supports rounding towards -infinity. In such a case,
2203 0 - 0 is -0. */
2204 if (!(HONOR_SIGNED_ZEROS (mode)
2205 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2206 && trueop1 == CONST0_RTX (mode))
2207 return op0;
2209 /* See if this is something like X * C - X or vice versa or
2210 if the multiplication is written as a shift. If so, we can
2211 distribute and make a new multiply, shift, or maybe just
2212 have X (if C is 2 in the example above). But don't make
2213 something more expensive than we had before. */
2215 if (SCALAR_INT_MODE_P (mode))
2217 rtx lhs = op0, rhs = op1;
2219 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2220 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2222 if (GET_CODE (lhs) == NEG)
2224 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2225 lhs = XEXP (lhs, 0);
2227 else if (GET_CODE (lhs) == MULT
2228 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2230 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2231 lhs = XEXP (lhs, 0);
2233 else if (GET_CODE (lhs) == ASHIFT
2234 && CONST_INT_P (XEXP (lhs, 1))
2235 && INTVAL (XEXP (lhs, 1)) >= 0
2236 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2238 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2239 GET_MODE_PRECISION (mode));
2240 lhs = XEXP (lhs, 0);
2243 if (GET_CODE (rhs) == NEG)
2245 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2246 rhs = XEXP (rhs, 0);
2248 else if (GET_CODE (rhs) == MULT
2249 && CONST_INT_P (XEXP (rhs, 1)))
2251 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2252 rhs = XEXP (rhs, 0);
2254 else if (GET_CODE (rhs) == ASHIFT
2255 && CONST_INT_P (XEXP (rhs, 1))
2256 && INTVAL (XEXP (rhs, 1)) >= 0
2257 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2259 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2260 GET_MODE_PRECISION (mode));
2261 negcoeff1 = -negcoeff1;
2262 rhs = XEXP (rhs, 0);
2265 if (rtx_equal_p (lhs, rhs))
2267 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2268 rtx coeff;
2269 bool speed = optimize_function_for_speed_p (cfun);
2271 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2273 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2274 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2275 ? tem : 0;
2279 /* (a - (-b)) -> (a + b). True even for IEEE. */
2280 if (GET_CODE (op1) == NEG)
2281 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2283 /* (-x - c) may be simplified as (-c - x). */
2284 if (GET_CODE (op0) == NEG
2285 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2287 tem = simplify_unary_operation (NEG, mode, op1, mode);
2288 if (tem)
2289 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2292 /* Don't let a relocatable value get a negative coeff. */
2293 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2294 return simplify_gen_binary (PLUS, mode,
2295 op0,
2296 neg_const_int (mode, op1));
2298 /* (x - (x & y)) -> (x & ~y) */
2299 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2301 if (rtx_equal_p (op0, XEXP (op1, 0)))
2303 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2304 GET_MODE (XEXP (op1, 1)));
2305 return simplify_gen_binary (AND, mode, op0, tem);
2307 if (rtx_equal_p (op0, XEXP (op1, 1)))
2309 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2310 GET_MODE (XEXP (op1, 0)));
2311 return simplify_gen_binary (AND, mode, op0, tem);
2315 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2316 by reversing the comparison code if valid. */
2317 if (STORE_FLAG_VALUE == 1
2318 && trueop0 == const1_rtx
2319 && COMPARISON_P (op1)
2320 && (reversed = reversed_comparison (op1, mode)))
2321 return reversed;
2323 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2324 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2325 && GET_CODE (op1) == MULT
2326 && GET_CODE (XEXP (op1, 0)) == NEG)
2328 rtx in1, in2;
2330 in1 = XEXP (XEXP (op1, 0), 0);
2331 in2 = XEXP (op1, 1);
2332 return simplify_gen_binary (PLUS, mode,
2333 simplify_gen_binary (MULT, mode,
2334 in1, in2),
2335 op0);
2338 /* Canonicalize (minus (neg A) (mult B C)) to
2339 (minus (mult (neg B) C) A). */
2340 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2341 && GET_CODE (op1) == MULT
2342 && GET_CODE (op0) == NEG)
2344 rtx in1, in2;
2346 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2347 in2 = XEXP (op1, 1);
2348 return simplify_gen_binary (MINUS, mode,
2349 simplify_gen_binary (MULT, mode,
2350 in1, in2),
2351 XEXP (op0, 0));
2354 /* If one of the operands is a PLUS or a MINUS, see if we can
2355 simplify this by the associative law. This will, for example,
2356 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2357 Don't use the associative law for floating point.
2358 The inaccuracy makes it nonassociative,
2359 and subtle programs can break if operations are associated. */
2361 if (INTEGRAL_MODE_P (mode)
2362 && (plus_minus_operand_p (op0)
2363 || plus_minus_operand_p (op1))
2364 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2365 return tem;
2366 break;
2368 case MULT:
2369 if (trueop1 == constm1_rtx)
2370 return simplify_gen_unary (NEG, mode, op0, mode);
2372 if (GET_CODE (op0) == NEG)
2374 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2375 /* If op1 is a MULT as well and simplify_unary_operation
2376 just moved the NEG to the second operand, simplify_gen_binary
2377 below could through simplify_associative_operation move
2378 the NEG around again and recurse endlessly. */
2379 if (temp
2380 && GET_CODE (op1) == MULT
2381 && GET_CODE (temp) == MULT
2382 && XEXP (op1, 0) == XEXP (temp, 0)
2383 && GET_CODE (XEXP (temp, 1)) == NEG
2384 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2385 temp = NULL_RTX;
2386 if (temp)
2387 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2389 if (GET_CODE (op1) == NEG)
2391 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2392 /* If op0 is a MULT as well and simplify_unary_operation
2393 just moved the NEG to the second operand, simplify_gen_binary
2394 below could through simplify_associative_operation move
2395 the NEG around again and recurse endlessly. */
2396 if (temp
2397 && GET_CODE (op0) == MULT
2398 && GET_CODE (temp) == MULT
2399 && XEXP (op0, 0) == XEXP (temp, 0)
2400 && GET_CODE (XEXP (temp, 1)) == NEG
2401 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2402 temp = NULL_RTX;
2403 if (temp)
2404 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2407 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2408 x is NaN, since x * 0 is then also NaN. Nor is it valid
2409 when the mode has signed zeros, since multiplying a negative
2410 number by 0 will give -0, not 0. */
2411 if (!HONOR_NANS (mode)
2412 && !HONOR_SIGNED_ZEROS (mode)
2413 && trueop1 == CONST0_RTX (mode)
2414 && ! side_effects_p (op0))
2415 return op1;
2417 /* In IEEE floating point, x*1 is not equivalent to x for
2418 signalling NaNs. */
2419 if (!HONOR_SNANS (mode)
2420 && trueop1 == CONST1_RTX (mode))
2421 return op0;
2423 /* Convert multiply by constant power of two into shift. */
2424 if (CONST_SCALAR_INT_P (trueop1))
2426 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2427 if (val >= 0)
2428 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2431 /* x*2 is x+x and x*(-1) is -x */
2432 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2433 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2434 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2435 && GET_MODE (op0) == mode)
2437 REAL_VALUE_TYPE d;
2438 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2440 if (REAL_VALUES_EQUAL (d, dconst2))
2441 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2443 if (!HONOR_SNANS (mode)
2444 && REAL_VALUES_EQUAL (d, dconstm1))
2445 return simplify_gen_unary (NEG, mode, op0, mode);
2448 /* Optimize -x * -x as x * x. */
2449 if (FLOAT_MODE_P (mode)
2450 && GET_CODE (op0) == NEG
2451 && GET_CODE (op1) == NEG
2452 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2453 && !side_effects_p (XEXP (op0, 0)))
2454 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2456 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2457 if (SCALAR_FLOAT_MODE_P (mode)
2458 && GET_CODE (op0) == ABS
2459 && GET_CODE (op1) == ABS
2460 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2461 && !side_effects_p (XEXP (op0, 0)))
2462 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2464 /* Reassociate multiplication, but for floating point MULTs
2465 only when the user specifies unsafe math optimizations. */
2466 if (! FLOAT_MODE_P (mode)
2467 || flag_unsafe_math_optimizations)
2469 tem = simplify_associative_operation (code, mode, op0, op1);
2470 if (tem)
2471 return tem;
2473 break;
2475 case IOR:
2476 if (trueop1 == CONST0_RTX (mode))
2477 return op0;
2478 if (INTEGRAL_MODE_P (mode)
2479 && trueop1 == CONSTM1_RTX (mode)
2480 && !side_effects_p (op0))
2481 return op1;
2482 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2483 return op0;
2484 /* A | (~A) -> -1 */
2485 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2486 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2487 && ! side_effects_p (op0)
2488 && SCALAR_INT_MODE_P (mode))
2489 return constm1_rtx;
2491 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2492 if (CONST_INT_P (op1)
2493 && HWI_COMPUTABLE_MODE_P (mode)
2494 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2495 && !side_effects_p (op0))
2496 return op1;
2498 /* Canonicalize (X & C1) | C2. */
2499 if (GET_CODE (op0) == AND
2500 && CONST_INT_P (trueop1)
2501 && CONST_INT_P (XEXP (op0, 1)))
2503 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2504 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2505 HOST_WIDE_INT c2 = INTVAL (trueop1);
2507 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2508 if ((c1 & c2) == c1
2509 && !side_effects_p (XEXP (op0, 0)))
2510 return trueop1;
2512 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2513 if (((c1|c2) & mask) == mask)
2514 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2516 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2517 if (((c1 & ~c2) & mask) != (c1 & mask))
2519 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2520 gen_int_mode (c1 & ~c2, mode));
2521 return simplify_gen_binary (IOR, mode, tem, op1);
2525 /* Convert (A & B) | A to A. */
2526 if (GET_CODE (op0) == AND
2527 && (rtx_equal_p (XEXP (op0, 0), op1)
2528 || rtx_equal_p (XEXP (op0, 1), op1))
2529 && ! side_effects_p (XEXP (op0, 0))
2530 && ! side_effects_p (XEXP (op0, 1)))
2531 return op1;
2533 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2534 mode size to (rotate A CX). */
2536 if (GET_CODE (op1) == ASHIFT
2537 || GET_CODE (op1) == SUBREG)
2539 opleft = op1;
2540 opright = op0;
2542 else
2544 opright = op1;
2545 opleft = op0;
2548 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2549 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2550 && CONST_INT_P (XEXP (opleft, 1))
2551 && CONST_INT_P (XEXP (opright, 1))
2552 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2553 == GET_MODE_PRECISION (mode)))
2554 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2556 /* Same, but for ashift that has been "simplified" to a wider mode
2557 by simplify_shift_const. */
2559 if (GET_CODE (opleft) == SUBREG
2560 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2561 && GET_CODE (opright) == LSHIFTRT
2562 && GET_CODE (XEXP (opright, 0)) == SUBREG
2563 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2564 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2565 && (GET_MODE_SIZE (GET_MODE (opleft))
2566 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2567 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2568 SUBREG_REG (XEXP (opright, 0)))
2569 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2570 && CONST_INT_P (XEXP (opright, 1))
2571 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2572 == GET_MODE_PRECISION (mode)))
2573 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2574 XEXP (SUBREG_REG (opleft), 1));
2576 /* If we have (ior (and (X C1) C2)), simplify this by making
2577 C1 as small as possible if C1 actually changes. */
2578 if (CONST_INT_P (op1)
2579 && (HWI_COMPUTABLE_MODE_P (mode)
2580 || INTVAL (op1) > 0)
2581 && GET_CODE (op0) == AND
2582 && CONST_INT_P (XEXP (op0, 1))
2583 && CONST_INT_P (op1)
2584 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2586 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2587 gen_int_mode (UINTVAL (XEXP (op0, 1))
2588 & ~UINTVAL (op1),
2589 mode));
2590 return simplify_gen_binary (IOR, mode, tmp, op1);
2593 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2594 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2595 the PLUS does not affect any of the bits in OP1: then we can do
2596 the IOR as a PLUS and we can associate. This is valid if OP1
2597 can be safely shifted left C bits. */
2598 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2599 && GET_CODE (XEXP (op0, 0)) == PLUS
2600 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2601 && CONST_INT_P (XEXP (op0, 1))
2602 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2604 int count = INTVAL (XEXP (op0, 1));
2605 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2607 if (mask >> count == INTVAL (trueop1)
2608 && trunc_int_for_mode (mask, mode) == mask
2609 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2610 return simplify_gen_binary (ASHIFTRT, mode,
2611 plus_constant (mode, XEXP (op0, 0),
2612 mask),
2613 XEXP (op0, 1));
2616 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2617 if (tem)
2618 return tem;
2620 tem = simplify_associative_operation (code, mode, op0, op1);
2621 if (tem)
2622 return tem;
2623 break;
2625 case XOR:
2626 if (trueop1 == CONST0_RTX (mode))
2627 return op0;
2628 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2629 return simplify_gen_unary (NOT, mode, op0, mode);
2630 if (rtx_equal_p (trueop0, trueop1)
2631 && ! side_effects_p (op0)
2632 && GET_MODE_CLASS (mode) != MODE_CC)
2633 return CONST0_RTX (mode);
2635 /* Canonicalize XOR of the most significant bit to PLUS. */
2636 if (CONST_SCALAR_INT_P (op1)
2637 && mode_signbit_p (mode, op1))
2638 return simplify_gen_binary (PLUS, mode, op0, op1);
2639 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2640 if (CONST_SCALAR_INT_P (op1)
2641 && GET_CODE (op0) == PLUS
2642 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2643 && mode_signbit_p (mode, XEXP (op0, 1)))
2644 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2645 simplify_gen_binary (XOR, mode, op1,
2646 XEXP (op0, 1)));
2648 /* If we are XORing two things that have no bits in common,
2649 convert them into an IOR. This helps to detect rotation encoded
2650 using those methods and possibly other simplifications. */
2652 if (HWI_COMPUTABLE_MODE_P (mode)
2653 && (nonzero_bits (op0, mode)
2654 & nonzero_bits (op1, mode)) == 0)
2655 return (simplify_gen_binary (IOR, mode, op0, op1));
2657 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2658 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2659 (NOT y). */
2661 int num_negated = 0;
2663 if (GET_CODE (op0) == NOT)
2664 num_negated++, op0 = XEXP (op0, 0);
2665 if (GET_CODE (op1) == NOT)
2666 num_negated++, op1 = XEXP (op1, 0);
2668 if (num_negated == 2)
2669 return simplify_gen_binary (XOR, mode, op0, op1);
2670 else if (num_negated == 1)
2671 return simplify_gen_unary (NOT, mode,
2672 simplify_gen_binary (XOR, mode, op0, op1),
2673 mode);
2676 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2677 correspond to a machine insn or result in further simplifications
2678 if B is a constant. */
2680 if (GET_CODE (op0) == AND
2681 && rtx_equal_p (XEXP (op0, 1), op1)
2682 && ! side_effects_p (op1))
2683 return simplify_gen_binary (AND, mode,
2684 simplify_gen_unary (NOT, mode,
2685 XEXP (op0, 0), mode),
2686 op1);
2688 else if (GET_CODE (op0) == AND
2689 && rtx_equal_p (XEXP (op0, 0), op1)
2690 && ! side_effects_p (op1))
2691 return simplify_gen_binary (AND, mode,
2692 simplify_gen_unary (NOT, mode,
2693 XEXP (op0, 1), mode),
2694 op1);
2696 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2697 we can transform like this:
2698 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2699 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2700 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2701 Attempt a few simplifications when B and C are both constants. */
2702 if (GET_CODE (op0) == AND
2703 && CONST_INT_P (op1)
2704 && CONST_INT_P (XEXP (op0, 1)))
2706 rtx a = XEXP (op0, 0);
2707 rtx b = XEXP (op0, 1);
2708 rtx c = op1;
2709 HOST_WIDE_INT bval = INTVAL (b);
2710 HOST_WIDE_INT cval = INTVAL (c);
2712 rtx na_c
2713 = simplify_binary_operation (AND, mode,
2714 simplify_gen_unary (NOT, mode, a, mode),
2716 if ((~cval & bval) == 0)
2718 /* Try to simplify ~A&C | ~B&C. */
2719 if (na_c != NULL_RTX)
2720 return simplify_gen_binary (IOR, mode, na_c,
2721 gen_int_mode (~bval & cval, mode));
2723 else
2725 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2726 if (na_c == const0_rtx)
2728 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2729 gen_int_mode (~cval & bval,
2730 mode));
2731 return simplify_gen_binary (IOR, mode, a_nc_b,
2732 gen_int_mode (~bval & cval,
2733 mode));
2738 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2739 comparison if STORE_FLAG_VALUE is 1. */
2740 if (STORE_FLAG_VALUE == 1
2741 && trueop1 == const1_rtx
2742 && COMPARISON_P (op0)
2743 && (reversed = reversed_comparison (op0, mode)))
2744 return reversed;
2746 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2747 is (lt foo (const_int 0)), so we can perform the above
2748 simplification if STORE_FLAG_VALUE is 1. */
2750 if (STORE_FLAG_VALUE == 1
2751 && trueop1 == const1_rtx
2752 && GET_CODE (op0) == LSHIFTRT
2753 && CONST_INT_P (XEXP (op0, 1))
2754 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2755 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2757 /* (xor (comparison foo bar) (const_int sign-bit))
2758 when STORE_FLAG_VALUE is the sign bit. */
2759 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2760 && trueop1 == const_true_rtx
2761 && COMPARISON_P (op0)
2762 && (reversed = reversed_comparison (op0, mode)))
2763 return reversed;
2765 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2766 if (tem)
2767 return tem;
2769 tem = simplify_associative_operation (code, mode, op0, op1);
2770 if (tem)
2771 return tem;
2772 break;
2774 case AND:
2775 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2776 return trueop1;
2777 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2778 return op0;
2779 if (HWI_COMPUTABLE_MODE_P (mode))
2781 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2782 HOST_WIDE_INT nzop1;
2783 if (CONST_INT_P (trueop1))
2785 HOST_WIDE_INT val1 = INTVAL (trueop1);
2786 /* If we are turning off bits already known off in OP0, we need
2787 not do an AND. */
2788 if ((nzop0 & ~val1) == 0)
2789 return op0;
2791 nzop1 = nonzero_bits (trueop1, mode);
2792 /* If we are clearing all the nonzero bits, the result is zero. */
2793 if ((nzop1 & nzop0) == 0
2794 && !side_effects_p (op0) && !side_effects_p (op1))
2795 return CONST0_RTX (mode);
2797 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2798 && GET_MODE_CLASS (mode) != MODE_CC)
2799 return op0;
2800 /* A & (~A) -> 0 */
2801 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2802 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2803 && ! side_effects_p (op0)
2804 && GET_MODE_CLASS (mode) != MODE_CC)
2805 return CONST0_RTX (mode);
2807 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2808 there are no nonzero bits of C outside of X's mode. */
2809 if ((GET_CODE (op0) == SIGN_EXTEND
2810 || GET_CODE (op0) == ZERO_EXTEND)
2811 && CONST_INT_P (trueop1)
2812 && HWI_COMPUTABLE_MODE_P (mode)
2813 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2814 & UINTVAL (trueop1)) == 0)
2816 machine_mode imode = GET_MODE (XEXP (op0, 0));
2817 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2818 gen_int_mode (INTVAL (trueop1),
2819 imode));
2820 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2823 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2824 we might be able to further simplify the AND with X and potentially
2825 remove the truncation altogether. */
2826 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2828 rtx x = XEXP (op0, 0);
2829 machine_mode xmode = GET_MODE (x);
2830 tem = simplify_gen_binary (AND, xmode, x,
2831 gen_int_mode (INTVAL (trueop1), xmode));
2832 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2835 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2836 if (GET_CODE (op0) == IOR
2837 && CONST_INT_P (trueop1)
2838 && CONST_INT_P (XEXP (op0, 1)))
2840 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2841 return simplify_gen_binary (IOR, mode,
2842 simplify_gen_binary (AND, mode,
2843 XEXP (op0, 0), op1),
2844 gen_int_mode (tmp, mode));
2847 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2848 insn (and may simplify more). */
2849 if (GET_CODE (op0) == XOR
2850 && rtx_equal_p (XEXP (op0, 0), op1)
2851 && ! side_effects_p (op1))
2852 return simplify_gen_binary (AND, mode,
2853 simplify_gen_unary (NOT, mode,
2854 XEXP (op0, 1), mode),
2855 op1);
2857 if (GET_CODE (op0) == XOR
2858 && rtx_equal_p (XEXP (op0, 1), op1)
2859 && ! side_effects_p (op1))
2860 return simplify_gen_binary (AND, mode,
2861 simplify_gen_unary (NOT, mode,
2862 XEXP (op0, 0), mode),
2863 op1);
2865 /* Similarly for (~(A ^ B)) & A. */
2866 if (GET_CODE (op0) == NOT
2867 && GET_CODE (XEXP (op0, 0)) == XOR
2868 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2869 && ! side_effects_p (op1))
2870 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2872 if (GET_CODE (op0) == NOT
2873 && GET_CODE (XEXP (op0, 0)) == XOR
2874 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2875 && ! side_effects_p (op1))
2876 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2878 /* Convert (A | B) & A to A. */
2879 if (GET_CODE (op0) == IOR
2880 && (rtx_equal_p (XEXP (op0, 0), op1)
2881 || rtx_equal_p (XEXP (op0, 1), op1))
2882 && ! side_effects_p (XEXP (op0, 0))
2883 && ! side_effects_p (XEXP (op0, 1)))
2884 return op1;
2886 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2887 ((A & N) + B) & M -> (A + B) & M
2888 Similarly if (N & M) == 0,
2889 ((A | N) + B) & M -> (A + B) & M
2890 and for - instead of + and/or ^ instead of |.
2891 Also, if (N & M) == 0, then
2892 (A +- N) & M -> A & M. */
2893 if (CONST_INT_P (trueop1)
2894 && HWI_COMPUTABLE_MODE_P (mode)
2895 && ~UINTVAL (trueop1)
2896 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2897 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2899 rtx pmop[2];
2900 int which;
2902 pmop[0] = XEXP (op0, 0);
2903 pmop[1] = XEXP (op0, 1);
2905 if (CONST_INT_P (pmop[1])
2906 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2907 return simplify_gen_binary (AND, mode, pmop[0], op1);
2909 for (which = 0; which < 2; which++)
2911 tem = pmop[which];
2912 switch (GET_CODE (tem))
2914 case AND:
2915 if (CONST_INT_P (XEXP (tem, 1))
2916 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2917 == UINTVAL (trueop1))
2918 pmop[which] = XEXP (tem, 0);
2919 break;
2920 case IOR:
2921 case XOR:
2922 if (CONST_INT_P (XEXP (tem, 1))
2923 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2924 pmop[which] = XEXP (tem, 0);
2925 break;
2926 default:
2927 break;
2931 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2933 tem = simplify_gen_binary (GET_CODE (op0), mode,
2934 pmop[0], pmop[1]);
2935 return simplify_gen_binary (code, mode, tem, op1);
2939 /* (and X (ior (not X) Y) -> (and X Y) */
2940 if (GET_CODE (op1) == IOR
2941 && GET_CODE (XEXP (op1, 0)) == NOT
2942 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
2943 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2945 /* (and (ior (not X) Y) X) -> (and X Y) */
2946 if (GET_CODE (op0) == IOR
2947 && GET_CODE (XEXP (op0, 0)) == NOT
2948 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
2949 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2951 /* (and X (ior Y (not X)) -> (and X Y) */
2952 if (GET_CODE (op1) == IOR
2953 && GET_CODE (XEXP (op1, 1)) == NOT
2954 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
2955 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
2957 /* (and (ior Y (not X)) X) -> (and X Y) */
2958 if (GET_CODE (op0) == IOR
2959 && GET_CODE (XEXP (op0, 1)) == NOT
2960 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
2961 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
2963 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2964 if (tem)
2965 return tem;
2967 tem = simplify_associative_operation (code, mode, op0, op1);
2968 if (tem)
2969 return tem;
2970 break;
2972 case UDIV:
2973 /* 0/x is 0 (or x&0 if x has side-effects). */
2974 if (trueop0 == CONST0_RTX (mode))
2976 if (side_effects_p (op1))
2977 return simplify_gen_binary (AND, mode, op1, trueop0);
2978 return trueop0;
2980 /* x/1 is x. */
2981 if (trueop1 == CONST1_RTX (mode))
2983 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2984 if (tem)
2985 return tem;
2987 /* Convert divide by power of two into shift. */
2988 if (CONST_INT_P (trueop1)
2989 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2990 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2991 break;
2993 case DIV:
2994 /* Handle floating point and integers separately. */
2995 if (SCALAR_FLOAT_MODE_P (mode))
2997 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2998 safe for modes with NaNs, since 0.0 / 0.0 will then be
2999 NaN rather than 0.0. Nor is it safe for modes with signed
3000 zeros, since dividing 0 by a negative number gives -0.0 */
3001 if (trueop0 == CONST0_RTX (mode)
3002 && !HONOR_NANS (mode)
3003 && !HONOR_SIGNED_ZEROS (mode)
3004 && ! side_effects_p (op1))
3005 return op0;
3006 /* x/1.0 is x. */
3007 if (trueop1 == CONST1_RTX (mode)
3008 && !HONOR_SNANS (mode))
3009 return op0;
3011 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3012 && trueop1 != CONST0_RTX (mode))
3014 REAL_VALUE_TYPE d;
3015 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3017 /* x/-1.0 is -x. */
3018 if (REAL_VALUES_EQUAL (d, dconstm1)
3019 && !HONOR_SNANS (mode))
3020 return simplify_gen_unary (NEG, mode, op0, mode);
3022 /* Change FP division by a constant into multiplication.
3023 Only do this with -freciprocal-math. */
3024 if (flag_reciprocal_math
3025 && !REAL_VALUES_EQUAL (d, dconst0))
3027 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3028 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3029 return simplify_gen_binary (MULT, mode, op0, tem);
3033 else if (SCALAR_INT_MODE_P (mode))
3035 /* 0/x is 0 (or x&0 if x has side-effects). */
3036 if (trueop0 == CONST0_RTX (mode)
3037 && !cfun->can_throw_non_call_exceptions)
3039 if (side_effects_p (op1))
3040 return simplify_gen_binary (AND, mode, op1, trueop0);
3041 return trueop0;
3043 /* x/1 is x. */
3044 if (trueop1 == CONST1_RTX (mode))
3046 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3047 if (tem)
3048 return tem;
3050 /* x/-1 is -x. */
3051 if (trueop1 == constm1_rtx)
3053 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3054 if (x)
3055 return simplify_gen_unary (NEG, mode, x, mode);
3058 break;
3060 case UMOD:
3061 /* 0%x is 0 (or x&0 if x has side-effects). */
3062 if (trueop0 == CONST0_RTX (mode))
3064 if (side_effects_p (op1))
3065 return simplify_gen_binary (AND, mode, op1, trueop0);
3066 return trueop0;
3068 /* x%1 is 0 (of x&0 if x has side-effects). */
3069 if (trueop1 == CONST1_RTX (mode))
3071 if (side_effects_p (op0))
3072 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3073 return CONST0_RTX (mode);
3075 /* Implement modulus by power of two as AND. */
3076 if (CONST_INT_P (trueop1)
3077 && exact_log2 (UINTVAL (trueop1)) > 0)
3078 return simplify_gen_binary (AND, mode, op0,
3079 gen_int_mode (INTVAL (op1) - 1, mode));
3080 break;
3082 case MOD:
3083 /* 0%x is 0 (or x&0 if x has side-effects). */
3084 if (trueop0 == CONST0_RTX (mode))
3086 if (side_effects_p (op1))
3087 return simplify_gen_binary (AND, mode, op1, trueop0);
3088 return trueop0;
3090 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3091 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3093 if (side_effects_p (op0))
3094 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3095 return CONST0_RTX (mode);
3097 break;
3099 case ROTATERT:
3100 case ROTATE:
3101 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3102 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3103 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3104 amount instead. */
3105 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3106 if (CONST_INT_P (trueop1)
3107 && IN_RANGE (INTVAL (trueop1),
3108 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3109 GET_MODE_PRECISION (mode) - 1))
3110 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3111 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3112 - INTVAL (trueop1)));
3113 #endif
3114 /* FALLTHRU */
3115 case ASHIFTRT:
3116 if (trueop1 == CONST0_RTX (mode))
3117 return op0;
3118 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3119 return op0;
3120 /* Rotating ~0 always results in ~0. */
3121 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3122 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3123 && ! side_effects_p (op1))
3124 return op0;
3125 /* Given:
3126 scalar modes M1, M2
3127 scalar constants c1, c2
3128 size (M2) > size (M1)
3129 c1 == size (M2) - size (M1)
3130 optimize:
3131 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3132 <low_part>)
3133 (const_int <c2>))
3135 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3136 <low_part>). */
3137 if (code == ASHIFTRT
3138 && !VECTOR_MODE_P (mode)
3139 && SUBREG_P (op0)
3140 && CONST_INT_P (op1)
3141 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3142 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3143 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3144 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3145 > GET_MODE_BITSIZE (mode))
3146 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3147 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3148 - GET_MODE_BITSIZE (mode)))
3149 && subreg_lowpart_p (op0))
3151 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3152 + INTVAL (op1));
3153 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3154 tmp = simplify_gen_binary (ASHIFTRT,
3155 GET_MODE (SUBREG_REG (op0)),
3156 XEXP (SUBREG_REG (op0), 0),
3157 tmp);
3158 return simplify_gen_subreg (mode, tmp, inner_mode,
3159 subreg_lowpart_offset (mode,
3160 inner_mode));
3162 canonicalize_shift:
3163 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3165 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3166 if (val != INTVAL (op1))
3167 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3169 break;
3171 case ASHIFT:
3172 case SS_ASHIFT:
3173 case US_ASHIFT:
3174 if (trueop1 == CONST0_RTX (mode))
3175 return op0;
3176 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3177 return op0;
3178 goto canonicalize_shift;
3180 case LSHIFTRT:
3181 if (trueop1 == CONST0_RTX (mode))
3182 return op0;
3183 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3184 return op0;
3185 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3186 if (GET_CODE (op0) == CLZ
3187 && CONST_INT_P (trueop1)
3188 && STORE_FLAG_VALUE == 1
3189 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3191 machine_mode imode = GET_MODE (XEXP (op0, 0));
3192 unsigned HOST_WIDE_INT zero_val = 0;
3194 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3195 && zero_val == GET_MODE_PRECISION (imode)
3196 && INTVAL (trueop1) == exact_log2 (zero_val))
3197 return simplify_gen_relational (EQ, mode, imode,
3198 XEXP (op0, 0), const0_rtx);
3200 goto canonicalize_shift;
3202 case SMIN:
3203 if (width <= HOST_BITS_PER_WIDE_INT
3204 && mode_signbit_p (mode, trueop1)
3205 && ! side_effects_p (op0))
3206 return op1;
3207 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3208 return op0;
3209 tem = simplify_associative_operation (code, mode, op0, op1);
3210 if (tem)
3211 return tem;
3212 break;
3214 case SMAX:
3215 if (width <= HOST_BITS_PER_WIDE_INT
3216 && CONST_INT_P (trueop1)
3217 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3218 && ! side_effects_p (op0))
3219 return op1;
3220 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3221 return op0;
3222 tem = simplify_associative_operation (code, mode, op0, op1);
3223 if (tem)
3224 return tem;
3225 break;
3227 case UMIN:
3228 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3229 return op1;
3230 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3231 return op0;
3232 tem = simplify_associative_operation (code, mode, op0, op1);
3233 if (tem)
3234 return tem;
3235 break;
3237 case UMAX:
3238 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3239 return op1;
3240 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3241 return op0;
3242 tem = simplify_associative_operation (code, mode, op0, op1);
3243 if (tem)
3244 return tem;
3245 break;
3247 case SS_PLUS:
3248 case US_PLUS:
3249 case SS_MINUS:
3250 case US_MINUS:
3251 case SS_MULT:
3252 case US_MULT:
3253 case SS_DIV:
3254 case US_DIV:
3255 /* ??? There are simplifications that can be done. */
3256 return 0;
3258 case VEC_SELECT:
3259 if (!VECTOR_MODE_P (mode))
3261 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3262 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3263 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3264 gcc_assert (XVECLEN (trueop1, 0) == 1);
3265 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3267 if (GET_CODE (trueop0) == CONST_VECTOR)
3268 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3269 (trueop1, 0, 0)));
3271 /* Extract a scalar element from a nested VEC_SELECT expression
3272 (with optional nested VEC_CONCAT expression). Some targets
3273 (i386) extract scalar element from a vector using chain of
3274 nested VEC_SELECT expressions. When input operand is a memory
3275 operand, this operation can be simplified to a simple scalar
3276 load from an offseted memory address. */
3277 if (GET_CODE (trueop0) == VEC_SELECT)
3279 rtx op0 = XEXP (trueop0, 0);
3280 rtx op1 = XEXP (trueop0, 1);
3282 machine_mode opmode = GET_MODE (op0);
3283 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3284 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3286 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3287 int elem;
3289 rtvec vec;
3290 rtx tmp_op, tmp;
3292 gcc_assert (GET_CODE (op1) == PARALLEL);
3293 gcc_assert (i < n_elts);
3295 /* Select element, pointed by nested selector. */
3296 elem = INTVAL (XVECEXP (op1, 0, i));
3298 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3299 if (GET_CODE (op0) == VEC_CONCAT)
3301 rtx op00 = XEXP (op0, 0);
3302 rtx op01 = XEXP (op0, 1);
3304 machine_mode mode00, mode01;
3305 int n_elts00, n_elts01;
3307 mode00 = GET_MODE (op00);
3308 mode01 = GET_MODE (op01);
3310 /* Find out number of elements of each operand. */
3311 if (VECTOR_MODE_P (mode00))
3313 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3314 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3316 else
3317 n_elts00 = 1;
3319 if (VECTOR_MODE_P (mode01))
3321 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3322 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3324 else
3325 n_elts01 = 1;
3327 gcc_assert (n_elts == n_elts00 + n_elts01);
3329 /* Select correct operand of VEC_CONCAT
3330 and adjust selector. */
3331 if (elem < n_elts01)
3332 tmp_op = op00;
3333 else
3335 tmp_op = op01;
3336 elem -= n_elts00;
3339 else
3340 tmp_op = op0;
3342 vec = rtvec_alloc (1);
3343 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3345 tmp = gen_rtx_fmt_ee (code, mode,
3346 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3347 return tmp;
3349 if (GET_CODE (trueop0) == VEC_DUPLICATE
3350 && GET_MODE (XEXP (trueop0, 0)) == mode)
3351 return XEXP (trueop0, 0);
3353 else
3355 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3356 gcc_assert (GET_MODE_INNER (mode)
3357 == GET_MODE_INNER (GET_MODE (trueop0)));
3358 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3360 if (GET_CODE (trueop0) == CONST_VECTOR)
3362 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3363 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3364 rtvec v = rtvec_alloc (n_elts);
3365 unsigned int i;
3367 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3368 for (i = 0; i < n_elts; i++)
3370 rtx x = XVECEXP (trueop1, 0, i);
3372 gcc_assert (CONST_INT_P (x));
3373 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3374 INTVAL (x));
3377 return gen_rtx_CONST_VECTOR (mode, v);
3380 /* Recognize the identity. */
3381 if (GET_MODE (trueop0) == mode)
3383 bool maybe_ident = true;
3384 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3386 rtx j = XVECEXP (trueop1, 0, i);
3387 if (!CONST_INT_P (j) || INTVAL (j) != i)
3389 maybe_ident = false;
3390 break;
3393 if (maybe_ident)
3394 return trueop0;
3397 /* If we build {a,b} then permute it, build the result directly. */
3398 if (XVECLEN (trueop1, 0) == 2
3399 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3400 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3401 && GET_CODE (trueop0) == VEC_CONCAT
3402 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3403 && GET_MODE (XEXP (trueop0, 0)) == mode
3404 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3405 && GET_MODE (XEXP (trueop0, 1)) == mode)
3407 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3408 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3409 rtx subop0, subop1;
3411 gcc_assert (i0 < 4 && i1 < 4);
3412 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3413 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3415 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3418 if (XVECLEN (trueop1, 0) == 2
3419 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3420 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3421 && GET_CODE (trueop0) == VEC_CONCAT
3422 && GET_MODE (trueop0) == mode)
3424 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3425 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3426 rtx subop0, subop1;
3428 gcc_assert (i0 < 2 && i1 < 2);
3429 subop0 = XEXP (trueop0, i0);
3430 subop1 = XEXP (trueop0, i1);
3432 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3435 /* If we select one half of a vec_concat, return that. */
3436 if (GET_CODE (trueop0) == VEC_CONCAT
3437 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3439 rtx subop0 = XEXP (trueop0, 0);
3440 rtx subop1 = XEXP (trueop0, 1);
3441 machine_mode mode0 = GET_MODE (subop0);
3442 machine_mode mode1 = GET_MODE (subop1);
3443 int li = GET_MODE_SIZE (GET_MODE_INNER (mode0));
3444 int l0 = GET_MODE_SIZE (mode0) / li;
3445 int l1 = GET_MODE_SIZE (mode1) / li;
3446 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3447 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3449 bool success = true;
3450 for (int i = 1; i < l0; ++i)
3452 rtx j = XVECEXP (trueop1, 0, i);
3453 if (!CONST_INT_P (j) || INTVAL (j) != i)
3455 success = false;
3456 break;
3459 if (success)
3460 return subop0;
3462 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3464 bool success = true;
3465 for (int i = 1; i < l1; ++i)
3467 rtx j = XVECEXP (trueop1, 0, i);
3468 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3470 success = false;
3471 break;
3474 if (success)
3475 return subop1;
3480 if (XVECLEN (trueop1, 0) == 1
3481 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3482 && GET_CODE (trueop0) == VEC_CONCAT)
3484 rtx vec = trueop0;
3485 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3487 /* Try to find the element in the VEC_CONCAT. */
3488 while (GET_MODE (vec) != mode
3489 && GET_CODE (vec) == VEC_CONCAT)
3491 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3492 if (offset < vec_size)
3493 vec = XEXP (vec, 0);
3494 else
3496 offset -= vec_size;
3497 vec = XEXP (vec, 1);
3499 vec = avoid_constant_pool_reference (vec);
3502 if (GET_MODE (vec) == mode)
3503 return vec;
3506 /* If we select elements in a vec_merge that all come from the same
3507 operand, select from that operand directly. */
3508 if (GET_CODE (op0) == VEC_MERGE)
3510 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3511 if (CONST_INT_P (trueop02))
3513 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3514 bool all_operand0 = true;
3515 bool all_operand1 = true;
3516 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3518 rtx j = XVECEXP (trueop1, 0, i);
3519 if (sel & (1 << UINTVAL (j)))
3520 all_operand1 = false;
3521 else
3522 all_operand0 = false;
3524 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3525 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3526 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3527 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3531 /* If we have two nested selects that are inverses of each
3532 other, replace them with the source operand. */
3533 if (GET_CODE (trueop0) == VEC_SELECT
3534 && GET_MODE (XEXP (trueop0, 0)) == mode)
3536 rtx op0_subop1 = XEXP (trueop0, 1);
3537 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3538 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3540 /* Apply the outer ordering vector to the inner one. (The inner
3541 ordering vector is expressly permitted to be of a different
3542 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3543 then the two VEC_SELECTs cancel. */
3544 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3546 rtx x = XVECEXP (trueop1, 0, i);
3547 if (!CONST_INT_P (x))
3548 return 0;
3549 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3550 if (!CONST_INT_P (y) || i != INTVAL (y))
3551 return 0;
3553 return XEXP (trueop0, 0);
3556 return 0;
3557 case VEC_CONCAT:
3559 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3560 ? GET_MODE (trueop0)
3561 : GET_MODE_INNER (mode));
3562 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3563 ? GET_MODE (trueop1)
3564 : GET_MODE_INNER (mode));
3566 gcc_assert (VECTOR_MODE_P (mode));
3567 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3568 == GET_MODE_SIZE (mode));
3570 if (VECTOR_MODE_P (op0_mode))
3571 gcc_assert (GET_MODE_INNER (mode)
3572 == GET_MODE_INNER (op0_mode));
3573 else
3574 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3576 if (VECTOR_MODE_P (op1_mode))
3577 gcc_assert (GET_MODE_INNER (mode)
3578 == GET_MODE_INNER (op1_mode));
3579 else
3580 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3582 if ((GET_CODE (trueop0) == CONST_VECTOR
3583 || CONST_SCALAR_INT_P (trueop0)
3584 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3585 && (GET_CODE (trueop1) == CONST_VECTOR
3586 || CONST_SCALAR_INT_P (trueop1)
3587 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3589 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3590 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3591 rtvec v = rtvec_alloc (n_elts);
3592 unsigned int i;
3593 unsigned in_n_elts = 1;
3595 if (VECTOR_MODE_P (op0_mode))
3596 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3597 for (i = 0; i < n_elts; i++)
3599 if (i < in_n_elts)
3601 if (!VECTOR_MODE_P (op0_mode))
3602 RTVEC_ELT (v, i) = trueop0;
3603 else
3604 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3606 else
3608 if (!VECTOR_MODE_P (op1_mode))
3609 RTVEC_ELT (v, i) = trueop1;
3610 else
3611 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3612 i - in_n_elts);
3616 return gen_rtx_CONST_VECTOR (mode, v);
3619 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3620 Restrict the transformation to avoid generating a VEC_SELECT with a
3621 mode unrelated to its operand. */
3622 if (GET_CODE (trueop0) == VEC_SELECT
3623 && GET_CODE (trueop1) == VEC_SELECT
3624 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3625 && GET_MODE (XEXP (trueop0, 0)) == mode)
3627 rtx par0 = XEXP (trueop0, 1);
3628 rtx par1 = XEXP (trueop1, 1);
3629 int len0 = XVECLEN (par0, 0);
3630 int len1 = XVECLEN (par1, 0);
3631 rtvec vec = rtvec_alloc (len0 + len1);
3632 for (int i = 0; i < len0; i++)
3633 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3634 for (int i = 0; i < len1; i++)
3635 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3636 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3637 gen_rtx_PARALLEL (VOIDmode, vec));
3640 return 0;
3642 default:
3643 gcc_unreachable ();
3646 return 0;
3650 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3651 rtx op0, rtx op1)
3653 unsigned int width = GET_MODE_PRECISION (mode);
3655 if (VECTOR_MODE_P (mode)
3656 && code != VEC_CONCAT
3657 && GET_CODE (op0) == CONST_VECTOR
3658 && GET_CODE (op1) == CONST_VECTOR)
3660 unsigned n_elts = GET_MODE_NUNITS (mode);
3661 machine_mode op0mode = GET_MODE (op0);
3662 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3663 machine_mode op1mode = GET_MODE (op1);
3664 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3665 rtvec v = rtvec_alloc (n_elts);
3666 unsigned int i;
3668 gcc_assert (op0_n_elts == n_elts);
3669 gcc_assert (op1_n_elts == n_elts);
3670 for (i = 0; i < n_elts; i++)
3672 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3673 CONST_VECTOR_ELT (op0, i),
3674 CONST_VECTOR_ELT (op1, i));
3675 if (!x)
3676 return 0;
3677 RTVEC_ELT (v, i) = x;
3680 return gen_rtx_CONST_VECTOR (mode, v);
3683 if (VECTOR_MODE_P (mode)
3684 && code == VEC_CONCAT
3685 && (CONST_SCALAR_INT_P (op0)
3686 || GET_CODE (op0) == CONST_FIXED
3687 || CONST_DOUBLE_AS_FLOAT_P (op0))
3688 && (CONST_SCALAR_INT_P (op1)
3689 || CONST_DOUBLE_AS_FLOAT_P (op1)
3690 || GET_CODE (op1) == CONST_FIXED))
3692 unsigned n_elts = GET_MODE_NUNITS (mode);
3693 rtvec v = rtvec_alloc (n_elts);
3695 gcc_assert (n_elts >= 2);
3696 if (n_elts == 2)
3698 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3699 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3701 RTVEC_ELT (v, 0) = op0;
3702 RTVEC_ELT (v, 1) = op1;
3704 else
3706 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3707 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3708 unsigned i;
3710 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3711 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3712 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3714 for (i = 0; i < op0_n_elts; ++i)
3715 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3716 for (i = 0; i < op1_n_elts; ++i)
3717 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3720 return gen_rtx_CONST_VECTOR (mode, v);
3723 if (SCALAR_FLOAT_MODE_P (mode)
3724 && CONST_DOUBLE_AS_FLOAT_P (op0)
3725 && CONST_DOUBLE_AS_FLOAT_P (op1)
3726 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3728 if (code == AND
3729 || code == IOR
3730 || code == XOR)
3732 long tmp0[4];
3733 long tmp1[4];
3734 REAL_VALUE_TYPE r;
3735 int i;
3737 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3738 GET_MODE (op0));
3739 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3740 GET_MODE (op1));
3741 for (i = 0; i < 4; i++)
3743 switch (code)
3745 case AND:
3746 tmp0[i] &= tmp1[i];
3747 break;
3748 case IOR:
3749 tmp0[i] |= tmp1[i];
3750 break;
3751 case XOR:
3752 tmp0[i] ^= tmp1[i];
3753 break;
3754 default:
3755 gcc_unreachable ();
3758 real_from_target (&r, tmp0, mode);
3759 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3761 else
3763 REAL_VALUE_TYPE f0, f1, value, result;
3764 bool inexact;
3766 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3767 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3768 real_convert (&f0, mode, &f0);
3769 real_convert (&f1, mode, &f1);
3771 if (HONOR_SNANS (mode)
3772 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3773 return 0;
3775 if (code == DIV
3776 && REAL_VALUES_EQUAL (f1, dconst0)
3777 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3778 return 0;
3780 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3781 && flag_trapping_math
3782 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3784 int s0 = REAL_VALUE_NEGATIVE (f0);
3785 int s1 = REAL_VALUE_NEGATIVE (f1);
3787 switch (code)
3789 case PLUS:
3790 /* Inf + -Inf = NaN plus exception. */
3791 if (s0 != s1)
3792 return 0;
3793 break;
3794 case MINUS:
3795 /* Inf - Inf = NaN plus exception. */
3796 if (s0 == s1)
3797 return 0;
3798 break;
3799 case DIV:
3800 /* Inf / Inf = NaN plus exception. */
3801 return 0;
3802 default:
3803 break;
3807 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3808 && flag_trapping_math
3809 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3810 || (REAL_VALUE_ISINF (f1)
3811 && REAL_VALUES_EQUAL (f0, dconst0))))
3812 /* Inf * 0 = NaN plus exception. */
3813 return 0;
3815 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3816 &f0, &f1);
3817 real_convert (&result, mode, &value);
3819 /* Don't constant fold this floating point operation if
3820 the result has overflowed and flag_trapping_math. */
3822 if (flag_trapping_math
3823 && MODE_HAS_INFINITIES (mode)
3824 && REAL_VALUE_ISINF (result)
3825 && !REAL_VALUE_ISINF (f0)
3826 && !REAL_VALUE_ISINF (f1))
3827 /* Overflow plus exception. */
3828 return 0;
3830 /* Don't constant fold this floating point operation if the
3831 result may dependent upon the run-time rounding mode and
3832 flag_rounding_math is set, or if GCC's software emulation
3833 is unable to accurately represent the result. */
3835 if ((flag_rounding_math
3836 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3837 && (inexact || !real_identical (&result, &value)))
3838 return NULL_RTX;
3840 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3844 /* We can fold some multi-word operations. */
3845 if ((GET_MODE_CLASS (mode) == MODE_INT
3846 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3847 && CONST_SCALAR_INT_P (op0)
3848 && CONST_SCALAR_INT_P (op1))
3850 wide_int result;
3851 bool overflow;
3852 rtx_mode_t pop0 = std::make_pair (op0, mode);
3853 rtx_mode_t pop1 = std::make_pair (op1, mode);
3855 #if TARGET_SUPPORTS_WIDE_INT == 0
3856 /* This assert keeps the simplification from producing a result
3857 that cannot be represented in a CONST_DOUBLE but a lot of
3858 upstream callers expect that this function never fails to
3859 simplify something and so you if you added this to the test
3860 above the code would die later anyway. If this assert
3861 happens, you just need to make the port support wide int. */
3862 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3863 #endif
3864 switch (code)
3866 case MINUS:
3867 result = wi::sub (pop0, pop1);
3868 break;
3870 case PLUS:
3871 result = wi::add (pop0, pop1);
3872 break;
3874 case MULT:
3875 result = wi::mul (pop0, pop1);
3876 break;
3878 case DIV:
3879 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3880 if (overflow)
3881 return NULL_RTX;
3882 break;
3884 case MOD:
3885 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3886 if (overflow)
3887 return NULL_RTX;
3888 break;
3890 case UDIV:
3891 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3892 if (overflow)
3893 return NULL_RTX;
3894 break;
3896 case UMOD:
3897 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3898 if (overflow)
3899 return NULL_RTX;
3900 break;
3902 case AND:
3903 result = wi::bit_and (pop0, pop1);
3904 break;
3906 case IOR:
3907 result = wi::bit_or (pop0, pop1);
3908 break;
3910 case XOR:
3911 result = wi::bit_xor (pop0, pop1);
3912 break;
3914 case SMIN:
3915 result = wi::smin (pop0, pop1);
3916 break;
3918 case SMAX:
3919 result = wi::smax (pop0, pop1);
3920 break;
3922 case UMIN:
3923 result = wi::umin (pop0, pop1);
3924 break;
3926 case UMAX:
3927 result = wi::umax (pop0, pop1);
3928 break;
3930 case LSHIFTRT:
3931 case ASHIFTRT:
3932 case ASHIFT:
3934 wide_int wop1 = pop1;
3935 if (SHIFT_COUNT_TRUNCATED)
3936 wop1 = wi::umod_trunc (wop1, width);
3937 else if (wi::geu_p (wop1, width))
3938 return NULL_RTX;
3940 switch (code)
3942 case LSHIFTRT:
3943 result = wi::lrshift (pop0, wop1);
3944 break;
3946 case ASHIFTRT:
3947 result = wi::arshift (pop0, wop1);
3948 break;
3950 case ASHIFT:
3951 result = wi::lshift (pop0, wop1);
3952 break;
3954 default:
3955 gcc_unreachable ();
3957 break;
3959 case ROTATE:
3960 case ROTATERT:
3962 if (wi::neg_p (pop1))
3963 return NULL_RTX;
3965 switch (code)
3967 case ROTATE:
3968 result = wi::lrotate (pop0, pop1);
3969 break;
3971 case ROTATERT:
3972 result = wi::rrotate (pop0, pop1);
3973 break;
3975 default:
3976 gcc_unreachable ();
3978 break;
3980 default:
3981 return NULL_RTX;
3983 return immed_wide_int_const (result, mode);
3986 return NULL_RTX;
3991 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3992 PLUS or MINUS.
3994 Rather than test for specific case, we do this by a brute-force method
3995 and do all possible simplifications until no more changes occur. Then
3996 we rebuild the operation. */
3998 struct simplify_plus_minus_op_data
4000 rtx op;
4001 short neg;
4004 static bool
4005 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4007 int result;
4009 result = (commutative_operand_precedence (y)
4010 - commutative_operand_precedence (x));
4011 if (result)
4012 return result > 0;
4014 /* Group together equal REGs to do more simplification. */
4015 if (REG_P (x) && REG_P (y))
4016 return REGNO (x) > REGNO (y);
4017 else
4018 return false;
4021 static rtx
4022 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4023 rtx op1)
4025 struct simplify_plus_minus_op_data ops[16];
4026 rtx result, tem;
4027 int n_ops = 2;
4028 int changed, n_constants, canonicalized = 0;
4029 int i, j;
4031 memset (ops, 0, sizeof ops);
4033 /* Set up the two operands and then expand them until nothing has been
4034 changed. If we run out of room in our array, give up; this should
4035 almost never happen. */
4037 ops[0].op = op0;
4038 ops[0].neg = 0;
4039 ops[1].op = op1;
4040 ops[1].neg = (code == MINUS);
4044 changed = 0;
4045 n_constants = 0;
4047 for (i = 0; i < n_ops; i++)
4049 rtx this_op = ops[i].op;
4050 int this_neg = ops[i].neg;
4051 enum rtx_code this_code = GET_CODE (this_op);
4053 switch (this_code)
4055 case PLUS:
4056 case MINUS:
4057 if (n_ops == ARRAY_SIZE (ops))
4058 return NULL_RTX;
4060 ops[n_ops].op = XEXP (this_op, 1);
4061 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4062 n_ops++;
4064 ops[i].op = XEXP (this_op, 0);
4065 changed = 1;
4066 canonicalized |= this_neg || i != n_ops - 2;
4067 break;
4069 case NEG:
4070 ops[i].op = XEXP (this_op, 0);
4071 ops[i].neg = ! this_neg;
4072 changed = 1;
4073 canonicalized = 1;
4074 break;
4076 case CONST:
4077 if (n_ops != ARRAY_SIZE (ops)
4078 && GET_CODE (XEXP (this_op, 0)) == PLUS
4079 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4080 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4082 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4083 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4084 ops[n_ops].neg = this_neg;
4085 n_ops++;
4086 changed = 1;
4087 canonicalized = 1;
4089 break;
4091 case NOT:
4092 /* ~a -> (-a - 1) */
4093 if (n_ops != ARRAY_SIZE (ops))
4095 ops[n_ops].op = CONSTM1_RTX (mode);
4096 ops[n_ops++].neg = this_neg;
4097 ops[i].op = XEXP (this_op, 0);
4098 ops[i].neg = !this_neg;
4099 changed = 1;
4100 canonicalized = 1;
4102 break;
4104 case CONST_INT:
4105 n_constants++;
4106 if (this_neg)
4108 ops[i].op = neg_const_int (mode, this_op);
4109 ops[i].neg = 0;
4110 changed = 1;
4111 canonicalized = 1;
4113 break;
4115 default:
4116 break;
4120 while (changed);
4122 if (n_constants > 1)
4123 canonicalized = 1;
4125 gcc_assert (n_ops >= 2);
4127 /* If we only have two operands, we can avoid the loops. */
4128 if (n_ops == 2)
4130 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4131 rtx lhs, rhs;
4133 /* Get the two operands. Be careful with the order, especially for
4134 the cases where code == MINUS. */
4135 if (ops[0].neg && ops[1].neg)
4137 lhs = gen_rtx_NEG (mode, ops[0].op);
4138 rhs = ops[1].op;
4140 else if (ops[0].neg)
4142 lhs = ops[1].op;
4143 rhs = ops[0].op;
4145 else
4147 lhs = ops[0].op;
4148 rhs = ops[1].op;
4151 return simplify_const_binary_operation (code, mode, lhs, rhs);
4154 /* Now simplify each pair of operands until nothing changes. */
4157 /* Insertion sort is good enough for a small array. */
4158 for (i = 1; i < n_ops; i++)
4160 struct simplify_plus_minus_op_data save;
4161 j = i - 1;
4162 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4163 continue;
4165 canonicalized = 1;
4166 save = ops[i];
4168 ops[j + 1] = ops[j];
4169 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4170 ops[j + 1] = save;
4173 changed = 0;
4174 for (i = n_ops - 1; i > 0; i--)
4175 for (j = i - 1; j >= 0; j--)
4177 rtx lhs = ops[j].op, rhs = ops[i].op;
4178 int lneg = ops[j].neg, rneg = ops[i].neg;
4180 if (lhs != 0 && rhs != 0)
4182 enum rtx_code ncode = PLUS;
4184 if (lneg != rneg)
4186 ncode = MINUS;
4187 if (lneg)
4188 tem = lhs, lhs = rhs, rhs = tem;
4190 else if (swap_commutative_operands_p (lhs, rhs))
4191 tem = lhs, lhs = rhs, rhs = tem;
4193 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4194 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4196 rtx tem_lhs, tem_rhs;
4198 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4199 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4200 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4202 if (tem && !CONSTANT_P (tem))
4203 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4205 else
4206 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4208 if (tem)
4210 /* Reject "simplifications" that just wrap the two
4211 arguments in a CONST. Failure to do so can result
4212 in infinite recursion with simplify_binary_operation
4213 when it calls us to simplify CONST operations.
4214 Also, if we find such a simplification, don't try
4215 any more combinations with this rhs: We must have
4216 something like symbol+offset, ie. one of the
4217 trivial CONST expressions we handle later. */
4218 if (GET_CODE (tem) == CONST
4219 && GET_CODE (XEXP (tem, 0)) == ncode
4220 && XEXP (XEXP (tem, 0), 0) == lhs
4221 && XEXP (XEXP (tem, 0), 1) == rhs)
4222 break;
4223 lneg &= rneg;
4224 if (GET_CODE (tem) == NEG)
4225 tem = XEXP (tem, 0), lneg = !lneg;
4226 if (CONST_INT_P (tem) && lneg)
4227 tem = neg_const_int (mode, tem), lneg = 0;
4229 ops[i].op = tem;
4230 ops[i].neg = lneg;
4231 ops[j].op = NULL_RTX;
4232 changed = 1;
4233 canonicalized = 1;
4238 /* If nothing changed, fail. */
4239 if (!canonicalized)
4240 return NULL_RTX;
4242 /* Pack all the operands to the lower-numbered entries. */
4243 for (i = 0, j = 0; j < n_ops; j++)
4244 if (ops[j].op)
4246 ops[i] = ops[j];
4247 i++;
4249 n_ops = i;
4251 while (changed);
4253 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4254 if (n_ops == 2
4255 && CONST_INT_P (ops[1].op)
4256 && CONSTANT_P (ops[0].op)
4257 && ops[0].neg)
4258 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4260 /* We suppressed creation of trivial CONST expressions in the
4261 combination loop to avoid recursion. Create one manually now.
4262 The combination loop should have ensured that there is exactly
4263 one CONST_INT, and the sort will have ensured that it is last
4264 in the array and that any other constant will be next-to-last. */
4266 if (n_ops > 1
4267 && CONST_INT_P (ops[n_ops - 1].op)
4268 && CONSTANT_P (ops[n_ops - 2].op))
4270 rtx value = ops[n_ops - 1].op;
4271 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4272 value = neg_const_int (mode, value);
4273 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4274 INTVAL (value));
4275 n_ops--;
4278 /* Put a non-negated operand first, if possible. */
4280 for (i = 0; i < n_ops && ops[i].neg; i++)
4281 continue;
4282 if (i == n_ops)
4283 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4284 else if (i != 0)
4286 tem = ops[0].op;
4287 ops[0] = ops[i];
4288 ops[i].op = tem;
4289 ops[i].neg = 1;
4292 /* Now make the result by performing the requested operations. */
4293 result = ops[0].op;
4294 for (i = 1; i < n_ops; i++)
4295 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4296 mode, result, ops[i].op);
4298 return result;
4301 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4302 static bool
4303 plus_minus_operand_p (const_rtx x)
4305 return GET_CODE (x) == PLUS
4306 || GET_CODE (x) == MINUS
4307 || (GET_CODE (x) == CONST
4308 && GET_CODE (XEXP (x, 0)) == PLUS
4309 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4310 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4313 /* Like simplify_binary_operation except used for relational operators.
4314 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4315 not also be VOIDmode.
4317 CMP_MODE specifies in which mode the comparison is done in, so it is
4318 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4319 the operands or, if both are VOIDmode, the operands are compared in
4320 "infinite precision". */
4322 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4323 machine_mode cmp_mode, rtx op0, rtx op1)
4325 rtx tem, trueop0, trueop1;
4327 if (cmp_mode == VOIDmode)
4328 cmp_mode = GET_MODE (op0);
4329 if (cmp_mode == VOIDmode)
4330 cmp_mode = GET_MODE (op1);
4332 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4333 if (tem)
4335 if (SCALAR_FLOAT_MODE_P (mode))
4337 if (tem == const0_rtx)
4338 return CONST0_RTX (mode);
4339 #ifdef FLOAT_STORE_FLAG_VALUE
4341 REAL_VALUE_TYPE val;
4342 val = FLOAT_STORE_FLAG_VALUE (mode);
4343 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4345 #else
4346 return NULL_RTX;
4347 #endif
4349 if (VECTOR_MODE_P (mode))
4351 if (tem == const0_rtx)
4352 return CONST0_RTX (mode);
4353 #ifdef VECTOR_STORE_FLAG_VALUE
4355 int i, units;
4356 rtvec v;
4358 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4359 if (val == NULL_RTX)
4360 return NULL_RTX;
4361 if (val == const1_rtx)
4362 return CONST1_RTX (mode);
4364 units = GET_MODE_NUNITS (mode);
4365 v = rtvec_alloc (units);
4366 for (i = 0; i < units; i++)
4367 RTVEC_ELT (v, i) = val;
4368 return gen_rtx_raw_CONST_VECTOR (mode, v);
4370 #else
4371 return NULL_RTX;
4372 #endif
4375 return tem;
4378 /* For the following tests, ensure const0_rtx is op1. */
4379 if (swap_commutative_operands_p (op0, op1)
4380 || (op0 == const0_rtx && op1 != const0_rtx))
4381 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4383 /* If op0 is a compare, extract the comparison arguments from it. */
4384 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4385 return simplify_gen_relational (code, mode, VOIDmode,
4386 XEXP (op0, 0), XEXP (op0, 1));
4388 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4389 || CC0_P (op0))
4390 return NULL_RTX;
4392 trueop0 = avoid_constant_pool_reference (op0);
4393 trueop1 = avoid_constant_pool_reference (op1);
4394 return simplify_relational_operation_1 (code, mode, cmp_mode,
4395 trueop0, trueop1);
4398 /* This part of simplify_relational_operation is only used when CMP_MODE
4399 is not in class MODE_CC (i.e. it is a real comparison).
4401 MODE is the mode of the result, while CMP_MODE specifies in which
4402 mode the comparison is done in, so it is the mode of the operands. */
4404 static rtx
4405 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4406 machine_mode cmp_mode, rtx op0, rtx op1)
4408 enum rtx_code op0code = GET_CODE (op0);
4410 if (op1 == const0_rtx && COMPARISON_P (op0))
4412 /* If op0 is a comparison, extract the comparison arguments
4413 from it. */
4414 if (code == NE)
4416 if (GET_MODE (op0) == mode)
4417 return simplify_rtx (op0);
4418 else
4419 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4420 XEXP (op0, 0), XEXP (op0, 1));
4422 else if (code == EQ)
4424 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4425 if (new_code != UNKNOWN)
4426 return simplify_gen_relational (new_code, mode, VOIDmode,
4427 XEXP (op0, 0), XEXP (op0, 1));
4431 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4432 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4433 if ((code == LTU || code == GEU)
4434 && GET_CODE (op0) == PLUS
4435 && CONST_INT_P (XEXP (op0, 1))
4436 && (rtx_equal_p (op1, XEXP (op0, 0))
4437 || rtx_equal_p (op1, XEXP (op0, 1)))
4438 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4439 && XEXP (op0, 1) != const0_rtx)
4441 rtx new_cmp
4442 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4443 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4444 cmp_mode, XEXP (op0, 0), new_cmp);
4447 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4448 if ((code == LTU || code == GEU)
4449 && GET_CODE (op0) == PLUS
4450 && rtx_equal_p (op1, XEXP (op0, 1))
4451 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4452 && !rtx_equal_p (op1, XEXP (op0, 0)))
4453 return simplify_gen_relational (code, mode, cmp_mode, op0,
4454 copy_rtx (XEXP (op0, 0)));
4456 if (op1 == const0_rtx)
4458 /* Canonicalize (GTU x 0) as (NE x 0). */
4459 if (code == GTU)
4460 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4461 /* Canonicalize (LEU x 0) as (EQ x 0). */
4462 if (code == LEU)
4463 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4465 else if (op1 == const1_rtx)
4467 switch (code)
4469 case GE:
4470 /* Canonicalize (GE x 1) as (GT x 0). */
4471 return simplify_gen_relational (GT, mode, cmp_mode,
4472 op0, const0_rtx);
4473 case GEU:
4474 /* Canonicalize (GEU x 1) as (NE x 0). */
4475 return simplify_gen_relational (NE, mode, cmp_mode,
4476 op0, const0_rtx);
4477 case LT:
4478 /* Canonicalize (LT x 1) as (LE x 0). */
4479 return simplify_gen_relational (LE, mode, cmp_mode,
4480 op0, const0_rtx);
4481 case LTU:
4482 /* Canonicalize (LTU x 1) as (EQ x 0). */
4483 return simplify_gen_relational (EQ, mode, cmp_mode,
4484 op0, const0_rtx);
4485 default:
4486 break;
4489 else if (op1 == constm1_rtx)
4491 /* Canonicalize (LE x -1) as (LT x 0). */
4492 if (code == LE)
4493 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4494 /* Canonicalize (GT x -1) as (GE x 0). */
4495 if (code == GT)
4496 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4499 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4500 if ((code == EQ || code == NE)
4501 && (op0code == PLUS || op0code == MINUS)
4502 && CONSTANT_P (op1)
4503 && CONSTANT_P (XEXP (op0, 1))
4504 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4506 rtx x = XEXP (op0, 0);
4507 rtx c = XEXP (op0, 1);
4508 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4509 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4511 /* Detect an infinite recursive condition, where we oscillate at this
4512 simplification case between:
4513 A + B == C <---> C - B == A,
4514 where A, B, and C are all constants with non-simplifiable expressions,
4515 usually SYMBOL_REFs. */
4516 if (GET_CODE (tem) == invcode
4517 && CONSTANT_P (x)
4518 && rtx_equal_p (c, XEXP (tem, 1)))
4519 return NULL_RTX;
4521 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4524 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4525 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4526 if (code == NE
4527 && op1 == const0_rtx
4528 && GET_MODE_CLASS (mode) == MODE_INT
4529 && cmp_mode != VOIDmode
4530 /* ??? Work-around BImode bugs in the ia64 backend. */
4531 && mode != BImode
4532 && cmp_mode != BImode
4533 && nonzero_bits (op0, cmp_mode) == 1
4534 && STORE_FLAG_VALUE == 1)
4535 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4536 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4537 : lowpart_subreg (mode, op0, cmp_mode);
4539 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4540 if ((code == EQ || code == NE)
4541 && op1 == const0_rtx
4542 && op0code == XOR)
4543 return simplify_gen_relational (code, mode, cmp_mode,
4544 XEXP (op0, 0), XEXP (op0, 1));
4546 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4547 if ((code == EQ || code == NE)
4548 && op0code == XOR
4549 && rtx_equal_p (XEXP (op0, 0), op1)
4550 && !side_effects_p (XEXP (op0, 0)))
4551 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4552 CONST0_RTX (mode));
4554 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4555 if ((code == EQ || code == NE)
4556 && op0code == XOR
4557 && rtx_equal_p (XEXP (op0, 1), op1)
4558 && !side_effects_p (XEXP (op0, 1)))
4559 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4560 CONST0_RTX (mode));
4562 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4563 if ((code == EQ || code == NE)
4564 && op0code == XOR
4565 && CONST_SCALAR_INT_P (op1)
4566 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4567 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4568 simplify_gen_binary (XOR, cmp_mode,
4569 XEXP (op0, 1), op1));
4571 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4572 can be implemented with a BICS instruction on some targets, or
4573 constant-folded if y is a constant. */
4574 if ((code == EQ || code == NE)
4575 && op0code == AND
4576 && rtx_equal_p (XEXP (op0, 0), op1)
4577 && !side_effects_p (op1))
4579 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4580 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4582 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4583 CONST0_RTX (cmp_mode));
4586 /* Likewise for (eq/ne (and x y) y). */
4587 if ((code == EQ || code == NE)
4588 && op0code == AND
4589 && rtx_equal_p (XEXP (op0, 1), op1)
4590 && !side_effects_p (op1))
4592 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4593 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4595 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4596 CONST0_RTX (cmp_mode));
4599 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4600 if ((code == EQ || code == NE)
4601 && GET_CODE (op0) == BSWAP
4602 && CONST_SCALAR_INT_P (op1))
4603 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4604 simplify_gen_unary (BSWAP, cmp_mode,
4605 op1, cmp_mode));
4607 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4608 if ((code == EQ || code == NE)
4609 && GET_CODE (op0) == BSWAP
4610 && GET_CODE (op1) == BSWAP)
4611 return simplify_gen_relational (code, mode, cmp_mode,
4612 XEXP (op0, 0), XEXP (op1, 0));
4614 if (op0code == POPCOUNT && op1 == const0_rtx)
4615 switch (code)
4617 case EQ:
4618 case LE:
4619 case LEU:
4620 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4621 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4622 XEXP (op0, 0), const0_rtx);
4624 case NE:
4625 case GT:
4626 case GTU:
4627 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4628 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4629 XEXP (op0, 0), const0_rtx);
4631 default:
4632 break;
4635 return NULL_RTX;
4638 enum
4640 CMP_EQ = 1,
4641 CMP_LT = 2,
4642 CMP_GT = 4,
4643 CMP_LTU = 8,
4644 CMP_GTU = 16
4648 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4649 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4650 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4651 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4652 For floating-point comparisons, assume that the operands were ordered. */
4654 static rtx
4655 comparison_result (enum rtx_code code, int known_results)
4657 switch (code)
4659 case EQ:
4660 case UNEQ:
4661 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4662 case NE:
4663 case LTGT:
4664 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4666 case LT:
4667 case UNLT:
4668 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4669 case GE:
4670 case UNGE:
4671 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4673 case GT:
4674 case UNGT:
4675 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4676 case LE:
4677 case UNLE:
4678 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4680 case LTU:
4681 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4682 case GEU:
4683 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4685 case GTU:
4686 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4687 case LEU:
4688 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4690 case ORDERED:
4691 return const_true_rtx;
4692 case UNORDERED:
4693 return const0_rtx;
4694 default:
4695 gcc_unreachable ();
4699 /* Check if the given comparison (done in the given MODE) is actually
4700 a tautology or a contradiction. If the mode is VOID_mode, the
4701 comparison is done in "infinite precision". If no simplification
4702 is possible, this function returns zero. Otherwise, it returns
4703 either const_true_rtx or const0_rtx. */
4706 simplify_const_relational_operation (enum rtx_code code,
4707 machine_mode mode,
4708 rtx op0, rtx op1)
4710 rtx tem;
4711 rtx trueop0;
4712 rtx trueop1;
4714 gcc_assert (mode != VOIDmode
4715 || (GET_MODE (op0) == VOIDmode
4716 && GET_MODE (op1) == VOIDmode));
4718 /* If op0 is a compare, extract the comparison arguments from it. */
4719 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4721 op1 = XEXP (op0, 1);
4722 op0 = XEXP (op0, 0);
4724 if (GET_MODE (op0) != VOIDmode)
4725 mode = GET_MODE (op0);
4726 else if (GET_MODE (op1) != VOIDmode)
4727 mode = GET_MODE (op1);
4728 else
4729 return 0;
4732 /* We can't simplify MODE_CC values since we don't know what the
4733 actual comparison is. */
4734 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4735 return 0;
4737 /* Make sure the constant is second. */
4738 if (swap_commutative_operands_p (op0, op1))
4740 tem = op0, op0 = op1, op1 = tem;
4741 code = swap_condition (code);
4744 trueop0 = avoid_constant_pool_reference (op0);
4745 trueop1 = avoid_constant_pool_reference (op1);
4747 /* For integer comparisons of A and B maybe we can simplify A - B and can
4748 then simplify a comparison of that with zero. If A and B are both either
4749 a register or a CONST_INT, this can't help; testing for these cases will
4750 prevent infinite recursion here and speed things up.
4752 We can only do this for EQ and NE comparisons as otherwise we may
4753 lose or introduce overflow which we cannot disregard as undefined as
4754 we do not know the signedness of the operation on either the left or
4755 the right hand side of the comparison. */
4757 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4758 && (code == EQ || code == NE)
4759 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4760 && (REG_P (op1) || CONST_INT_P (trueop1)))
4761 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4762 /* We cannot do this if tem is a nonzero address. */
4763 && ! nonzero_address_p (tem))
4764 return simplify_const_relational_operation (signed_condition (code),
4765 mode, tem, const0_rtx);
4767 if (! HONOR_NANS (mode) && code == ORDERED)
4768 return const_true_rtx;
4770 if (! HONOR_NANS (mode) && code == UNORDERED)
4771 return const0_rtx;
4773 /* For modes without NaNs, if the two operands are equal, we know the
4774 result except if they have side-effects. Even with NaNs we know
4775 the result of unordered comparisons and, if signaling NaNs are
4776 irrelevant, also the result of LT/GT/LTGT. */
4777 if ((! HONOR_NANS (trueop0)
4778 || code == UNEQ || code == UNLE || code == UNGE
4779 || ((code == LT || code == GT || code == LTGT)
4780 && ! HONOR_SNANS (trueop0)))
4781 && rtx_equal_p (trueop0, trueop1)
4782 && ! side_effects_p (trueop0))
4783 return comparison_result (code, CMP_EQ);
4785 /* If the operands are floating-point constants, see if we can fold
4786 the result. */
4787 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4788 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4789 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4791 REAL_VALUE_TYPE d0, d1;
4793 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4794 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4796 /* Comparisons are unordered iff at least one of the values is NaN. */
4797 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4798 switch (code)
4800 case UNEQ:
4801 case UNLT:
4802 case UNGT:
4803 case UNLE:
4804 case UNGE:
4805 case NE:
4806 case UNORDERED:
4807 return const_true_rtx;
4808 case EQ:
4809 case LT:
4810 case GT:
4811 case LE:
4812 case GE:
4813 case LTGT:
4814 case ORDERED:
4815 return const0_rtx;
4816 default:
4817 return 0;
4820 return comparison_result (code,
4821 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4822 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4825 /* Otherwise, see if the operands are both integers. */
4826 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4827 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4829 /* It would be nice if we really had a mode here. However, the
4830 largest int representable on the target is as good as
4831 infinite. */
4832 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4833 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4834 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4836 if (wi::eq_p (ptrueop0, ptrueop1))
4837 return comparison_result (code, CMP_EQ);
4838 else
4840 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4841 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4842 return comparison_result (code, cr);
4846 /* Optimize comparisons with upper and lower bounds. */
4847 if (HWI_COMPUTABLE_MODE_P (mode)
4848 && CONST_INT_P (trueop1))
4850 int sign;
4851 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4852 HOST_WIDE_INT val = INTVAL (trueop1);
4853 HOST_WIDE_INT mmin, mmax;
4855 if (code == GEU
4856 || code == LEU
4857 || code == GTU
4858 || code == LTU)
4859 sign = 0;
4860 else
4861 sign = 1;
4863 /* Get a reduced range if the sign bit is zero. */
4864 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4866 mmin = 0;
4867 mmax = nonzero;
4869 else
4871 rtx mmin_rtx, mmax_rtx;
4872 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4874 mmin = INTVAL (mmin_rtx);
4875 mmax = INTVAL (mmax_rtx);
4876 if (sign)
4878 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4880 mmin >>= (sign_copies - 1);
4881 mmax >>= (sign_copies - 1);
4885 switch (code)
4887 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4888 case GEU:
4889 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4890 return const_true_rtx;
4891 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4892 return const0_rtx;
4893 break;
4894 case GE:
4895 if (val <= mmin)
4896 return const_true_rtx;
4897 if (val > mmax)
4898 return const0_rtx;
4899 break;
4901 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4902 case LEU:
4903 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4904 return const_true_rtx;
4905 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4906 return const0_rtx;
4907 break;
4908 case LE:
4909 if (val >= mmax)
4910 return const_true_rtx;
4911 if (val < mmin)
4912 return const0_rtx;
4913 break;
4915 case EQ:
4916 /* x == y is always false for y out of range. */
4917 if (val < mmin || val > mmax)
4918 return const0_rtx;
4919 break;
4921 /* x > y is always false for y >= mmax, always true for y < mmin. */
4922 case GTU:
4923 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4924 return const0_rtx;
4925 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4926 return const_true_rtx;
4927 break;
4928 case GT:
4929 if (val >= mmax)
4930 return const0_rtx;
4931 if (val < mmin)
4932 return const_true_rtx;
4933 break;
4935 /* x < y is always false for y <= mmin, always true for y > mmax. */
4936 case LTU:
4937 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4938 return const0_rtx;
4939 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4940 return const_true_rtx;
4941 break;
4942 case LT:
4943 if (val <= mmin)
4944 return const0_rtx;
4945 if (val > mmax)
4946 return const_true_rtx;
4947 break;
4949 case NE:
4950 /* x != y is always true for y out of range. */
4951 if (val < mmin || val > mmax)
4952 return const_true_rtx;
4953 break;
4955 default:
4956 break;
4960 /* Optimize integer comparisons with zero. */
4961 if (trueop1 == const0_rtx)
4963 /* Some addresses are known to be nonzero. We don't know
4964 their sign, but equality comparisons are known. */
4965 if (nonzero_address_p (trueop0))
4967 if (code == EQ || code == LEU)
4968 return const0_rtx;
4969 if (code == NE || code == GTU)
4970 return const_true_rtx;
4973 /* See if the first operand is an IOR with a constant. If so, we
4974 may be able to determine the result of this comparison. */
4975 if (GET_CODE (op0) == IOR)
4977 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4978 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4980 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
4981 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4982 && (UINTVAL (inner_const)
4983 & ((unsigned HOST_WIDE_INT) 1
4984 << sign_bitnum)));
4986 switch (code)
4988 case EQ:
4989 case LEU:
4990 return const0_rtx;
4991 case NE:
4992 case GTU:
4993 return const_true_rtx;
4994 case LT:
4995 case LE:
4996 if (has_sign)
4997 return const_true_rtx;
4998 break;
4999 case GT:
5000 case GE:
5001 if (has_sign)
5002 return const0_rtx;
5003 break;
5004 default:
5005 break;
5011 /* Optimize comparison of ABS with zero. */
5012 if (trueop1 == CONST0_RTX (mode)
5013 && (GET_CODE (trueop0) == ABS
5014 || (GET_CODE (trueop0) == FLOAT_EXTEND
5015 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5017 switch (code)
5019 case LT:
5020 /* Optimize abs(x) < 0.0. */
5021 if (!HONOR_SNANS (mode)
5022 && (!INTEGRAL_MODE_P (mode)
5023 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5025 if (INTEGRAL_MODE_P (mode)
5026 && (issue_strict_overflow_warning
5027 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5028 warning (OPT_Wstrict_overflow,
5029 ("assuming signed overflow does not occur when "
5030 "assuming abs (x) < 0 is false"));
5031 return const0_rtx;
5033 break;
5035 case GE:
5036 /* Optimize abs(x) >= 0.0. */
5037 if (!HONOR_NANS (mode)
5038 && (!INTEGRAL_MODE_P (mode)
5039 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5041 if (INTEGRAL_MODE_P (mode)
5042 && (issue_strict_overflow_warning
5043 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5044 warning (OPT_Wstrict_overflow,
5045 ("assuming signed overflow does not occur when "
5046 "assuming abs (x) >= 0 is true"));
5047 return const_true_rtx;
5049 break;
5051 case UNGE:
5052 /* Optimize ! (abs(x) < 0.0). */
5053 return const_true_rtx;
5055 default:
5056 break;
5060 return 0;
5063 /* Simplify CODE, an operation with result mode MODE and three operands,
5064 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5065 a constant. Return 0 if no simplifications is possible. */
5068 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5069 machine_mode op0_mode, rtx op0, rtx op1,
5070 rtx op2)
5072 unsigned int width = GET_MODE_PRECISION (mode);
5073 bool any_change = false;
5074 rtx tem, trueop2;
5076 /* VOIDmode means "infinite" precision. */
5077 if (width == 0)
5078 width = HOST_BITS_PER_WIDE_INT;
5080 switch (code)
5082 case FMA:
5083 /* Simplify negations around the multiplication. */
5084 /* -a * -b + c => a * b + c. */
5085 if (GET_CODE (op0) == NEG)
5087 tem = simplify_unary_operation (NEG, mode, op1, mode);
5088 if (tem)
5089 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5091 else if (GET_CODE (op1) == NEG)
5093 tem = simplify_unary_operation (NEG, mode, op0, mode);
5094 if (tem)
5095 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5098 /* Canonicalize the two multiplication operands. */
5099 /* a * -b + c => -b * a + c. */
5100 if (swap_commutative_operands_p (op0, op1))
5101 tem = op0, op0 = op1, op1 = tem, any_change = true;
5103 if (any_change)
5104 return gen_rtx_FMA (mode, op0, op1, op2);
5105 return NULL_RTX;
5107 case SIGN_EXTRACT:
5108 case ZERO_EXTRACT:
5109 if (CONST_INT_P (op0)
5110 && CONST_INT_P (op1)
5111 && CONST_INT_P (op2)
5112 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5113 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5115 /* Extracting a bit-field from a constant */
5116 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5117 HOST_WIDE_INT op1val = INTVAL (op1);
5118 HOST_WIDE_INT op2val = INTVAL (op2);
5119 if (BITS_BIG_ENDIAN)
5120 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5121 else
5122 val >>= op2val;
5124 if (HOST_BITS_PER_WIDE_INT != op1val)
5126 /* First zero-extend. */
5127 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5128 /* If desired, propagate sign bit. */
5129 if (code == SIGN_EXTRACT
5130 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5131 != 0)
5132 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5135 return gen_int_mode (val, mode);
5137 break;
5139 case IF_THEN_ELSE:
5140 if (CONST_INT_P (op0))
5141 return op0 != const0_rtx ? op1 : op2;
5143 /* Convert c ? a : a into "a". */
5144 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5145 return op1;
5147 /* Convert a != b ? a : b into "a". */
5148 if (GET_CODE (op0) == NE
5149 && ! side_effects_p (op0)
5150 && ! HONOR_NANS (mode)
5151 && ! HONOR_SIGNED_ZEROS (mode)
5152 && ((rtx_equal_p (XEXP (op0, 0), op1)
5153 && rtx_equal_p (XEXP (op0, 1), op2))
5154 || (rtx_equal_p (XEXP (op0, 0), op2)
5155 && rtx_equal_p (XEXP (op0, 1), op1))))
5156 return op1;
5158 /* Convert a == b ? a : b into "b". */
5159 if (GET_CODE (op0) == EQ
5160 && ! side_effects_p (op0)
5161 && ! HONOR_NANS (mode)
5162 && ! HONOR_SIGNED_ZEROS (mode)
5163 && ((rtx_equal_p (XEXP (op0, 0), op1)
5164 && rtx_equal_p (XEXP (op0, 1), op2))
5165 || (rtx_equal_p (XEXP (op0, 0), op2)
5166 && rtx_equal_p (XEXP (op0, 1), op1))))
5167 return op2;
5169 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5171 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5172 ? GET_MODE (XEXP (op0, 1))
5173 : GET_MODE (XEXP (op0, 0)));
5174 rtx temp;
5176 /* Look for happy constants in op1 and op2. */
5177 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5179 HOST_WIDE_INT t = INTVAL (op1);
5180 HOST_WIDE_INT f = INTVAL (op2);
5182 if (t == STORE_FLAG_VALUE && f == 0)
5183 code = GET_CODE (op0);
5184 else if (t == 0 && f == STORE_FLAG_VALUE)
5186 enum rtx_code tmp;
5187 tmp = reversed_comparison_code (op0, NULL_RTX);
5188 if (tmp == UNKNOWN)
5189 break;
5190 code = tmp;
5192 else
5193 break;
5195 return simplify_gen_relational (code, mode, cmp_mode,
5196 XEXP (op0, 0), XEXP (op0, 1));
5199 if (cmp_mode == VOIDmode)
5200 cmp_mode = op0_mode;
5201 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5202 cmp_mode, XEXP (op0, 0),
5203 XEXP (op0, 1));
5205 /* See if any simplifications were possible. */
5206 if (temp)
5208 if (CONST_INT_P (temp))
5209 return temp == const0_rtx ? op2 : op1;
5210 else if (temp)
5211 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5214 break;
5216 case VEC_MERGE:
5217 gcc_assert (GET_MODE (op0) == mode);
5218 gcc_assert (GET_MODE (op1) == mode);
5219 gcc_assert (VECTOR_MODE_P (mode));
5220 trueop2 = avoid_constant_pool_reference (op2);
5221 if (CONST_INT_P (trueop2))
5223 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5224 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5225 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5226 unsigned HOST_WIDE_INT mask;
5227 if (n_elts == HOST_BITS_PER_WIDE_INT)
5228 mask = -1;
5229 else
5230 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5232 if (!(sel & mask) && !side_effects_p (op0))
5233 return op1;
5234 if ((sel & mask) == mask && !side_effects_p (op1))
5235 return op0;
5237 rtx trueop0 = avoid_constant_pool_reference (op0);
5238 rtx trueop1 = avoid_constant_pool_reference (op1);
5239 if (GET_CODE (trueop0) == CONST_VECTOR
5240 && GET_CODE (trueop1) == CONST_VECTOR)
5242 rtvec v = rtvec_alloc (n_elts);
5243 unsigned int i;
5245 for (i = 0; i < n_elts; i++)
5246 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5247 ? CONST_VECTOR_ELT (trueop0, i)
5248 : CONST_VECTOR_ELT (trueop1, i));
5249 return gen_rtx_CONST_VECTOR (mode, v);
5252 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5253 if no element from a appears in the result. */
5254 if (GET_CODE (op0) == VEC_MERGE)
5256 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5257 if (CONST_INT_P (tem))
5259 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5260 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5261 return simplify_gen_ternary (code, mode, mode,
5262 XEXP (op0, 1), op1, op2);
5263 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5264 return simplify_gen_ternary (code, mode, mode,
5265 XEXP (op0, 0), op1, op2);
5268 if (GET_CODE (op1) == VEC_MERGE)
5270 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5271 if (CONST_INT_P (tem))
5273 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5274 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5275 return simplify_gen_ternary (code, mode, mode,
5276 op0, XEXP (op1, 1), op2);
5277 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5278 return simplify_gen_ternary (code, mode, mode,
5279 op0, XEXP (op1, 0), op2);
5283 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5284 with a. */
5285 if (GET_CODE (op0) == VEC_DUPLICATE
5286 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5287 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5288 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5290 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5291 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5293 if (XEXP (XEXP (op0, 0), 0) == op1
5294 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5295 return op1;
5300 if (rtx_equal_p (op0, op1)
5301 && !side_effects_p (op2) && !side_effects_p (op1))
5302 return op0;
5304 break;
5306 default:
5307 gcc_unreachable ();
5310 return 0;
5313 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5314 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5315 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5317 Works by unpacking OP into a collection of 8-bit values
5318 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5319 and then repacking them again for OUTERMODE. */
5321 static rtx
5322 simplify_immed_subreg (machine_mode outermode, rtx op,
5323 machine_mode innermode, unsigned int byte)
5325 enum {
5326 value_bit = 8,
5327 value_mask = (1 << value_bit) - 1
5329 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5330 int value_start;
5331 int i;
5332 int elem;
5334 int num_elem;
5335 rtx * elems;
5336 int elem_bitsize;
5337 rtx result_s;
5338 rtvec result_v = NULL;
5339 enum mode_class outer_class;
5340 machine_mode outer_submode;
5341 int max_bitsize;
5343 /* Some ports misuse CCmode. */
5344 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5345 return op;
5347 /* We have no way to represent a complex constant at the rtl level. */
5348 if (COMPLEX_MODE_P (outermode))
5349 return NULL_RTX;
5351 /* We support any size mode. */
5352 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5353 GET_MODE_BITSIZE (innermode));
5355 /* Unpack the value. */
5357 if (GET_CODE (op) == CONST_VECTOR)
5359 num_elem = CONST_VECTOR_NUNITS (op);
5360 elems = &CONST_VECTOR_ELT (op, 0);
5361 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5363 else
5365 num_elem = 1;
5366 elems = &op;
5367 elem_bitsize = max_bitsize;
5369 /* If this asserts, it is too complicated; reducing value_bit may help. */
5370 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5371 /* I don't know how to handle endianness of sub-units. */
5372 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5374 for (elem = 0; elem < num_elem; elem++)
5376 unsigned char * vp;
5377 rtx el = elems[elem];
5379 /* Vectors are kept in target memory order. (This is probably
5380 a mistake.) */
5382 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5383 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5384 / BITS_PER_UNIT);
5385 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5386 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5387 unsigned bytele = (subword_byte % UNITS_PER_WORD
5388 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5389 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5392 switch (GET_CODE (el))
5394 case CONST_INT:
5395 for (i = 0;
5396 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5397 i += value_bit)
5398 *vp++ = INTVAL (el) >> i;
5399 /* CONST_INTs are always logically sign-extended. */
5400 for (; i < elem_bitsize; i += value_bit)
5401 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5402 break;
5404 case CONST_WIDE_INT:
5406 rtx_mode_t val = std::make_pair (el, innermode);
5407 unsigned char extend = wi::sign_mask (val);
5409 for (i = 0; i < elem_bitsize; i += value_bit)
5410 *vp++ = wi::extract_uhwi (val, i, value_bit);
5411 for (; i < elem_bitsize; i += value_bit)
5412 *vp++ = extend;
5414 break;
5416 case CONST_DOUBLE:
5417 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5419 unsigned char extend = 0;
5420 /* If this triggers, someone should have generated a
5421 CONST_INT instead. */
5422 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5424 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5425 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5426 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5428 *vp++
5429 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5430 i += value_bit;
5433 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5434 extend = -1;
5435 for (; i < elem_bitsize; i += value_bit)
5436 *vp++ = extend;
5438 else
5440 /* This is big enough for anything on the platform. */
5441 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5442 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5444 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5445 gcc_assert (bitsize <= elem_bitsize);
5446 gcc_assert (bitsize % value_bit == 0);
5448 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5449 GET_MODE (el));
5451 /* real_to_target produces its result in words affected by
5452 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5453 and use WORDS_BIG_ENDIAN instead; see the documentation
5454 of SUBREG in rtl.texi. */
5455 for (i = 0; i < bitsize; i += value_bit)
5457 int ibase;
5458 if (WORDS_BIG_ENDIAN)
5459 ibase = bitsize - 1 - i;
5460 else
5461 ibase = i;
5462 *vp++ = tmp[ibase / 32] >> i % 32;
5465 /* It shouldn't matter what's done here, so fill it with
5466 zero. */
5467 for (; i < elem_bitsize; i += value_bit)
5468 *vp++ = 0;
5470 break;
5472 case CONST_FIXED:
5473 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5475 for (i = 0; i < elem_bitsize; i += value_bit)
5476 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5478 else
5480 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5481 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5482 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5483 i += value_bit)
5484 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5485 >> (i - HOST_BITS_PER_WIDE_INT);
5486 for (; i < elem_bitsize; i += value_bit)
5487 *vp++ = 0;
5489 break;
5491 default:
5492 gcc_unreachable ();
5496 /* Now, pick the right byte to start with. */
5497 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5498 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5499 will already have offset 0. */
5500 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5502 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5503 - byte);
5504 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5505 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5506 byte = (subword_byte % UNITS_PER_WORD
5507 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5510 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5511 so if it's become negative it will instead be very large.) */
5512 gcc_assert (byte < GET_MODE_SIZE (innermode));
5514 /* Convert from bytes to chunks of size value_bit. */
5515 value_start = byte * (BITS_PER_UNIT / value_bit);
5517 /* Re-pack the value. */
5519 if (VECTOR_MODE_P (outermode))
5521 num_elem = GET_MODE_NUNITS (outermode);
5522 result_v = rtvec_alloc (num_elem);
5523 elems = &RTVEC_ELT (result_v, 0);
5524 outer_submode = GET_MODE_INNER (outermode);
5526 else
5528 num_elem = 1;
5529 elems = &result_s;
5530 outer_submode = outermode;
5533 outer_class = GET_MODE_CLASS (outer_submode);
5534 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5536 gcc_assert (elem_bitsize % value_bit == 0);
5537 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5539 for (elem = 0; elem < num_elem; elem++)
5541 unsigned char *vp;
5543 /* Vectors are stored in target memory order. (This is probably
5544 a mistake.) */
5546 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5547 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5548 / BITS_PER_UNIT);
5549 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5550 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5551 unsigned bytele = (subword_byte % UNITS_PER_WORD
5552 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5553 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5556 switch (outer_class)
5558 case MODE_INT:
5559 case MODE_PARTIAL_INT:
5561 int u;
5562 int base = 0;
5563 int units
5564 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5565 / HOST_BITS_PER_WIDE_INT;
5566 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5567 wide_int r;
5569 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5570 return NULL_RTX;
5571 for (u = 0; u < units; u++)
5573 unsigned HOST_WIDE_INT buf = 0;
5574 for (i = 0;
5575 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5576 i += value_bit)
5577 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5579 tmp[u] = buf;
5580 base += HOST_BITS_PER_WIDE_INT;
5582 r = wide_int::from_array (tmp, units,
5583 GET_MODE_PRECISION (outer_submode));
5584 #if TARGET_SUPPORTS_WIDE_INT == 0
5585 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5586 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5587 return NULL_RTX;
5588 #endif
5589 elems[elem] = immed_wide_int_const (r, outer_submode);
5591 break;
5593 case MODE_FLOAT:
5594 case MODE_DECIMAL_FLOAT:
5596 REAL_VALUE_TYPE r;
5597 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5599 /* real_from_target wants its input in words affected by
5600 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5601 and use WORDS_BIG_ENDIAN instead; see the documentation
5602 of SUBREG in rtl.texi. */
5603 for (i = 0; i < max_bitsize / 32; i++)
5604 tmp[i] = 0;
5605 for (i = 0; i < elem_bitsize; i += value_bit)
5607 int ibase;
5608 if (WORDS_BIG_ENDIAN)
5609 ibase = elem_bitsize - 1 - i;
5610 else
5611 ibase = i;
5612 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5615 real_from_target (&r, tmp, outer_submode);
5616 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5618 break;
5620 case MODE_FRACT:
5621 case MODE_UFRACT:
5622 case MODE_ACCUM:
5623 case MODE_UACCUM:
5625 FIXED_VALUE_TYPE f;
5626 f.data.low = 0;
5627 f.data.high = 0;
5628 f.mode = outer_submode;
5630 for (i = 0;
5631 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5632 i += value_bit)
5633 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5634 for (; i < elem_bitsize; i += value_bit)
5635 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5636 << (i - HOST_BITS_PER_WIDE_INT));
5638 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5640 break;
5642 default:
5643 gcc_unreachable ();
5646 if (VECTOR_MODE_P (outermode))
5647 return gen_rtx_CONST_VECTOR (outermode, result_v);
5648 else
5649 return result_s;
5652 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5653 Return 0 if no simplifications are possible. */
5655 simplify_subreg (machine_mode outermode, rtx op,
5656 machine_mode innermode, unsigned int byte)
5658 /* Little bit of sanity checking. */
5659 gcc_assert (innermode != VOIDmode);
5660 gcc_assert (outermode != VOIDmode);
5661 gcc_assert (innermode != BLKmode);
5662 gcc_assert (outermode != BLKmode);
5664 gcc_assert (GET_MODE (op) == innermode
5665 || GET_MODE (op) == VOIDmode);
5667 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5668 return NULL_RTX;
5670 if (byte >= GET_MODE_SIZE (innermode))
5671 return NULL_RTX;
5673 if (outermode == innermode && !byte)
5674 return op;
5676 if (CONST_SCALAR_INT_P (op)
5677 || CONST_DOUBLE_AS_FLOAT_P (op)
5678 || GET_CODE (op) == CONST_FIXED
5679 || GET_CODE (op) == CONST_VECTOR)
5680 return simplify_immed_subreg (outermode, op, innermode, byte);
5682 /* Changing mode twice with SUBREG => just change it once,
5683 or not at all if changing back op starting mode. */
5684 if (GET_CODE (op) == SUBREG)
5686 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5687 int final_offset = byte + SUBREG_BYTE (op);
5688 rtx newx;
5690 if (outermode == innermostmode
5691 && byte == 0 && SUBREG_BYTE (op) == 0)
5692 return SUBREG_REG (op);
5694 /* The SUBREG_BYTE represents offset, as if the value were stored
5695 in memory. Irritating exception is paradoxical subreg, where
5696 we define SUBREG_BYTE to be 0. On big endian machines, this
5697 value should be negative. For a moment, undo this exception. */
5698 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5700 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5701 if (WORDS_BIG_ENDIAN)
5702 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5703 if (BYTES_BIG_ENDIAN)
5704 final_offset += difference % UNITS_PER_WORD;
5706 if (SUBREG_BYTE (op) == 0
5707 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5709 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5710 if (WORDS_BIG_ENDIAN)
5711 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5712 if (BYTES_BIG_ENDIAN)
5713 final_offset += difference % UNITS_PER_WORD;
5716 /* See whether resulting subreg will be paradoxical. */
5717 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5719 /* In nonparadoxical subregs we can't handle negative offsets. */
5720 if (final_offset < 0)
5721 return NULL_RTX;
5722 /* Bail out in case resulting subreg would be incorrect. */
5723 if (final_offset % GET_MODE_SIZE (outermode)
5724 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5725 return NULL_RTX;
5727 else
5729 int offset = 0;
5730 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5732 /* In paradoxical subreg, see if we are still looking on lower part.
5733 If so, our SUBREG_BYTE will be 0. */
5734 if (WORDS_BIG_ENDIAN)
5735 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5736 if (BYTES_BIG_ENDIAN)
5737 offset += difference % UNITS_PER_WORD;
5738 if (offset == final_offset)
5739 final_offset = 0;
5740 else
5741 return NULL_RTX;
5744 /* Recurse for further possible simplifications. */
5745 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5746 final_offset);
5747 if (newx)
5748 return newx;
5749 if (validate_subreg (outermode, innermostmode,
5750 SUBREG_REG (op), final_offset))
5752 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5753 if (SUBREG_PROMOTED_VAR_P (op)
5754 && SUBREG_PROMOTED_SIGN (op) >= 0
5755 && GET_MODE_CLASS (outermode) == MODE_INT
5756 && IN_RANGE (GET_MODE_SIZE (outermode),
5757 GET_MODE_SIZE (innermode),
5758 GET_MODE_SIZE (innermostmode))
5759 && subreg_lowpart_p (newx))
5761 SUBREG_PROMOTED_VAR_P (newx) = 1;
5762 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5764 return newx;
5766 return NULL_RTX;
5769 /* SUBREG of a hard register => just change the register number
5770 and/or mode. If the hard register is not valid in that mode,
5771 suppress this simplification. If the hard register is the stack,
5772 frame, or argument pointer, leave this as a SUBREG. */
5774 if (REG_P (op) && HARD_REGISTER_P (op))
5776 unsigned int regno, final_regno;
5778 regno = REGNO (op);
5779 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5780 if (HARD_REGISTER_NUM_P (final_regno))
5782 rtx x;
5783 int final_offset = byte;
5785 /* Adjust offset for paradoxical subregs. */
5786 if (byte == 0
5787 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5789 int difference = (GET_MODE_SIZE (innermode)
5790 - GET_MODE_SIZE (outermode));
5791 if (WORDS_BIG_ENDIAN)
5792 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5793 if (BYTES_BIG_ENDIAN)
5794 final_offset += difference % UNITS_PER_WORD;
5797 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5799 /* Propagate original regno. We don't have any way to specify
5800 the offset inside original regno, so do so only for lowpart.
5801 The information is used only by alias analysis that can not
5802 grog partial register anyway. */
5804 if (subreg_lowpart_offset (outermode, innermode) == byte)
5805 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5806 return x;
5810 /* If we have a SUBREG of a register that we are replacing and we are
5811 replacing it with a MEM, make a new MEM and try replacing the
5812 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5813 or if we would be widening it. */
5815 if (MEM_P (op)
5816 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5817 /* Allow splitting of volatile memory references in case we don't
5818 have instruction to move the whole thing. */
5819 && (! MEM_VOLATILE_P (op)
5820 || ! have_insn_for (SET, innermode))
5821 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5822 return adjust_address_nv (op, outermode, byte);
5824 /* Handle complex values represented as CONCAT
5825 of real and imaginary part. */
5826 if (GET_CODE (op) == CONCAT)
5828 unsigned int part_size, final_offset;
5829 rtx part, res;
5831 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5832 if (byte < part_size)
5834 part = XEXP (op, 0);
5835 final_offset = byte;
5837 else
5839 part = XEXP (op, 1);
5840 final_offset = byte - part_size;
5843 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5844 return NULL_RTX;
5846 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5847 if (res)
5848 return res;
5849 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5850 return gen_rtx_SUBREG (outermode, part, final_offset);
5851 return NULL_RTX;
5854 /* A SUBREG resulting from a zero extension may fold to zero if
5855 it extracts higher bits that the ZERO_EXTEND's source bits. */
5856 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5858 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5859 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5860 return CONST0_RTX (outermode);
5863 if (SCALAR_INT_MODE_P (outermode)
5864 && SCALAR_INT_MODE_P (innermode)
5865 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5866 && byte == subreg_lowpart_offset (outermode, innermode))
5868 rtx tem = simplify_truncation (outermode, op, innermode);
5869 if (tem)
5870 return tem;
5873 return NULL_RTX;
5876 /* Make a SUBREG operation or equivalent if it folds. */
5879 simplify_gen_subreg (machine_mode outermode, rtx op,
5880 machine_mode innermode, unsigned int byte)
5882 rtx newx;
5884 newx = simplify_subreg (outermode, op, innermode, byte);
5885 if (newx)
5886 return newx;
5888 if (GET_CODE (op) == SUBREG
5889 || GET_CODE (op) == CONCAT
5890 || GET_MODE (op) == VOIDmode)
5891 return NULL_RTX;
5893 if (validate_subreg (outermode, innermode, op, byte))
5894 return gen_rtx_SUBREG (outermode, op, byte);
5896 return NULL_RTX;
5899 /* Simplify X, an rtx expression.
5901 Return the simplified expression or NULL if no simplifications
5902 were possible.
5904 This is the preferred entry point into the simplification routines;
5905 however, we still allow passes to call the more specific routines.
5907 Right now GCC has three (yes, three) major bodies of RTL simplification
5908 code that need to be unified.
5910 1. fold_rtx in cse.c. This code uses various CSE specific
5911 information to aid in RTL simplification.
5913 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5914 it uses combine specific information to aid in RTL
5915 simplification.
5917 3. The routines in this file.
5920 Long term we want to only have one body of simplification code; to
5921 get to that state I recommend the following steps:
5923 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5924 which are not pass dependent state into these routines.
5926 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5927 use this routine whenever possible.
5929 3. Allow for pass dependent state to be provided to these
5930 routines and add simplifications based on the pass dependent
5931 state. Remove code from cse.c & combine.c that becomes
5932 redundant/dead.
5934 It will take time, but ultimately the compiler will be easier to
5935 maintain and improve. It's totally silly that when we add a
5936 simplification that it needs to be added to 4 places (3 for RTL
5937 simplification and 1 for tree simplification. */
5940 simplify_rtx (const_rtx x)
5942 const enum rtx_code code = GET_CODE (x);
5943 const machine_mode mode = GET_MODE (x);
5945 switch (GET_RTX_CLASS (code))
5947 case RTX_UNARY:
5948 return simplify_unary_operation (code, mode,
5949 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5950 case RTX_COMM_ARITH:
5951 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5952 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5954 /* Fall through.... */
5956 case RTX_BIN_ARITH:
5957 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5959 case RTX_TERNARY:
5960 case RTX_BITFIELD_OPS:
5961 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5962 XEXP (x, 0), XEXP (x, 1),
5963 XEXP (x, 2));
5965 case RTX_COMPARE:
5966 case RTX_COMM_COMPARE:
5967 return simplify_relational_operation (code, mode,
5968 ((GET_MODE (XEXP (x, 0))
5969 != VOIDmode)
5970 ? GET_MODE (XEXP (x, 0))
5971 : GET_MODE (XEXP (x, 1))),
5972 XEXP (x, 0),
5973 XEXP (x, 1));
5975 case RTX_EXTRA:
5976 if (code == SUBREG)
5977 return simplify_subreg (mode, SUBREG_REG (x),
5978 GET_MODE (SUBREG_REG (x)),
5979 SUBREG_BYTE (x));
5980 break;
5982 case RTX_OBJ:
5983 if (code == LO_SUM)
5985 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5986 if (GET_CODE (XEXP (x, 0)) == HIGH
5987 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5988 return XEXP (x, 1);
5990 break;
5992 default:
5993 break;
5995 return NULL;