2014-08-04 Ed Schonberg <schonberg@adacore.com>
[official-gcc.git] / gcc / simplify-rtx.c
blob9f6dbe119c67416f7fadfe533b5af646873b8c5e
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "varasm.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "flags.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "function.h"
35 #include "expr.h"
36 #include "diagnostic-core.h"
37 #include "ggc.h"
38 #include "target.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Much code operates on (low, high) pairs; the low value is an
43 unsigned wide int, the high value a signed wide int. We
44 occasionally need to sign extend from low to high as if low were a
45 signed wide int. */
46 #define HWI_SIGN_EXTEND(low) \
47 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
49 static rtx neg_const_int (enum machine_mode, const_rtx);
50 static bool plus_minus_operand_p (const_rtx);
51 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
52 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
53 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
54 unsigned int);
55 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
56 rtx, rtx);
57 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
58 enum machine_mode, rtx, rtx);
59 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
60 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
61 rtx, rtx, rtx, rtx);
63 /* Negate a CONST_INT rtx, truncating (because a conversion from a
64 maximally negative number can overflow). */
65 static rtx
66 neg_const_int (enum machine_mode mode, const_rtx i)
68 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
71 /* Test whether expression, X, is an immediate constant that represents
72 the most significant bit of machine mode MODE. */
74 bool
75 mode_signbit_p (enum machine_mode mode, const_rtx x)
77 unsigned HOST_WIDE_INT val;
78 unsigned int width;
80 if (GET_MODE_CLASS (mode) != MODE_INT)
81 return false;
83 width = GET_MODE_PRECISION (mode);
84 if (width == 0)
85 return false;
87 if (width <= HOST_BITS_PER_WIDE_INT
88 && CONST_INT_P (x))
89 val = INTVAL (x);
90 #if TARGET_SUPPORTS_WIDE_INT
91 else if (CONST_WIDE_INT_P (x))
93 unsigned int i;
94 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
95 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
96 return false;
97 for (i = 0; i < elts - 1; i++)
98 if (CONST_WIDE_INT_ELT (x, i) != 0)
99 return false;
100 val = CONST_WIDE_INT_ELT (x, elts - 1);
101 width %= HOST_BITS_PER_WIDE_INT;
102 if (width == 0)
103 width = HOST_BITS_PER_WIDE_INT;
105 #else
106 else if (width <= HOST_BITS_PER_DOUBLE_INT
107 && CONST_DOUBLE_AS_INT_P (x)
108 && CONST_DOUBLE_LOW (x) == 0)
110 val = CONST_DOUBLE_HIGH (x);
111 width -= HOST_BITS_PER_WIDE_INT;
113 #endif
114 else
115 /* X is not an integer constant. */
116 return false;
118 if (width < HOST_BITS_PER_WIDE_INT)
119 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
120 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
123 /* Test whether VAL is equal to the most significant bit of mode MODE
124 (after masking with the mode mask of MODE). Returns false if the
125 precision of MODE is too large to handle. */
127 bool
128 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
130 unsigned int width;
132 if (GET_MODE_CLASS (mode) != MODE_INT)
133 return false;
135 width = GET_MODE_PRECISION (mode);
136 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
137 return false;
139 val &= GET_MODE_MASK (mode);
140 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
143 /* Test whether the most significant bit of mode MODE is set in VAL.
144 Returns false if the precision of MODE is too large to handle. */
145 bool
146 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
148 unsigned int width;
150 if (GET_MODE_CLASS (mode) != MODE_INT)
151 return false;
153 width = GET_MODE_PRECISION (mode);
154 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
155 return false;
157 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
158 return val != 0;
161 /* Test whether the most significant bit of mode MODE is clear in VAL.
162 Returns false if the precision of MODE is too large to handle. */
163 bool
164 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
166 unsigned int width;
168 if (GET_MODE_CLASS (mode) != MODE_INT)
169 return false;
171 width = GET_MODE_PRECISION (mode);
172 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
173 return false;
175 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
176 return val == 0;
179 /* Make a binary operation by properly ordering the operands and
180 seeing if the expression folds. */
183 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
184 rtx op1)
186 rtx tem;
188 /* If this simplifies, do it. */
189 tem = simplify_binary_operation (code, mode, op0, op1);
190 if (tem)
191 return tem;
193 /* Put complex operands first and constants second if commutative. */
194 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
195 && swap_commutative_operands_p (op0, op1))
196 tem = op0, op0 = op1, op1 = tem;
198 return gen_rtx_fmt_ee (code, mode, op0, op1);
201 /* If X is a MEM referencing the constant pool, return the real value.
202 Otherwise return X. */
204 avoid_constant_pool_reference (rtx x)
206 rtx c, tmp, addr;
207 enum machine_mode cmode;
208 HOST_WIDE_INT offset = 0;
210 switch (GET_CODE (x))
212 case MEM:
213 break;
215 case FLOAT_EXTEND:
216 /* Handle float extensions of constant pool references. */
217 tmp = XEXP (x, 0);
218 c = avoid_constant_pool_reference (tmp);
219 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
221 REAL_VALUE_TYPE d;
223 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
224 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
226 return x;
228 default:
229 return x;
232 if (GET_MODE (x) == BLKmode)
233 return x;
235 addr = XEXP (x, 0);
237 /* Call target hook to avoid the effects of -fpic etc.... */
238 addr = targetm.delegitimize_address (addr);
240 /* Split the address into a base and integer offset. */
241 if (GET_CODE (addr) == CONST
242 && GET_CODE (XEXP (addr, 0)) == PLUS
243 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
245 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
246 addr = XEXP (XEXP (addr, 0), 0);
249 if (GET_CODE (addr) == LO_SUM)
250 addr = XEXP (addr, 1);
252 /* If this is a constant pool reference, we can turn it into its
253 constant and hope that simplifications happen. */
254 if (GET_CODE (addr) == SYMBOL_REF
255 && CONSTANT_POOL_ADDRESS_P (addr))
257 c = get_pool_constant (addr);
258 cmode = get_pool_mode (addr);
260 /* If we're accessing the constant in a different mode than it was
261 originally stored, attempt to fix that up via subreg simplifications.
262 If that fails we have no choice but to return the original memory. */
263 if ((offset != 0 || cmode != GET_MODE (x))
264 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
266 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
267 if (tem && CONSTANT_P (tem))
268 return tem;
270 else
271 return c;
274 return x;
277 /* Simplify a MEM based on its attributes. This is the default
278 delegitimize_address target hook, and it's recommended that every
279 overrider call it. */
282 delegitimize_mem_from_attrs (rtx x)
284 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285 use their base addresses as equivalent. */
286 if (MEM_P (x)
287 && MEM_EXPR (x)
288 && MEM_OFFSET_KNOWN_P (x))
290 tree decl = MEM_EXPR (x);
291 enum machine_mode mode = GET_MODE (x);
292 HOST_WIDE_INT offset = 0;
294 switch (TREE_CODE (decl))
296 default:
297 decl = NULL;
298 break;
300 case VAR_DECL:
301 break;
303 case ARRAY_REF:
304 case ARRAY_RANGE_REF:
305 case COMPONENT_REF:
306 case BIT_FIELD_REF:
307 case REALPART_EXPR:
308 case IMAGPART_EXPR:
309 case VIEW_CONVERT_EXPR:
311 HOST_WIDE_INT bitsize, bitpos;
312 tree toffset;
313 int unsignedp, volatilep = 0;
315 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
316 &mode, &unsignedp, &volatilep, false);
317 if (bitsize != GET_MODE_BITSIZE (mode)
318 || (bitpos % BITS_PER_UNIT)
319 || (toffset && !tree_fits_shwi_p (toffset)))
320 decl = NULL;
321 else
323 offset += bitpos / BITS_PER_UNIT;
324 if (toffset)
325 offset += tree_to_shwi (toffset);
327 break;
331 if (decl
332 && mode == GET_MODE (x)
333 && TREE_CODE (decl) == VAR_DECL
334 && (TREE_STATIC (decl)
335 || DECL_THREAD_LOCAL_P (decl))
336 && DECL_RTL_SET_P (decl)
337 && MEM_P (DECL_RTL (decl)))
339 rtx newx;
341 offset += MEM_OFFSET (x);
343 newx = DECL_RTL (decl);
345 if (MEM_P (newx))
347 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
349 /* Avoid creating a new MEM needlessly if we already had
350 the same address. We do if there's no OFFSET and the
351 old address X is identical to NEWX, or if X is of the
352 form (plus NEWX OFFSET), or the NEWX is of the form
353 (plus Y (const_int Z)) and X is that with the offset
354 added: (plus Y (const_int Z+OFFSET)). */
355 if (!((offset == 0
356 || (GET_CODE (o) == PLUS
357 && GET_CODE (XEXP (o, 1)) == CONST_INT
358 && (offset == INTVAL (XEXP (o, 1))
359 || (GET_CODE (n) == PLUS
360 && GET_CODE (XEXP (n, 1)) == CONST_INT
361 && (INTVAL (XEXP (n, 1)) + offset
362 == INTVAL (XEXP (o, 1)))
363 && (n = XEXP (n, 0))))
364 && (o = XEXP (o, 0))))
365 && rtx_equal_p (o, n)))
366 x = adjust_address_nv (newx, mode, offset);
368 else if (GET_MODE (x) == GET_MODE (newx)
369 && offset == 0)
370 x = newx;
374 return x;
377 /* Make a unary operation by first seeing if it folds and otherwise making
378 the specified operation. */
381 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
382 enum machine_mode op_mode)
384 rtx tem;
386 /* If this simplifies, use it. */
387 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
388 return tem;
390 return gen_rtx_fmt_e (code, mode, op);
393 /* Likewise for ternary operations. */
396 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
397 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
399 rtx tem;
401 /* If this simplifies, use it. */
402 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
403 op0, op1, op2)))
404 return tem;
406 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
409 /* Likewise, for relational operations.
410 CMP_MODE specifies mode comparison is done in. */
413 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
414 enum machine_mode cmp_mode, rtx op0, rtx op1)
416 rtx tem;
418 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
419 op0, op1)))
420 return tem;
422 return gen_rtx_fmt_ee (code, mode, op0, op1);
425 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
426 and simplify the result. If FN is non-NULL, call this callback on each
427 X, if it returns non-NULL, replace X with its return value and simplify the
428 result. */
431 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
432 rtx (*fn) (rtx, const_rtx, void *), void *data)
434 enum rtx_code code = GET_CODE (x);
435 enum machine_mode mode = GET_MODE (x);
436 enum machine_mode op_mode;
437 const char *fmt;
438 rtx op0, op1, op2, newx, op;
439 rtvec vec, newvec;
440 int i, j;
442 if (__builtin_expect (fn != NULL, 0))
444 newx = fn (x, old_rtx, data);
445 if (newx)
446 return newx;
448 else if (rtx_equal_p (x, old_rtx))
449 return copy_rtx ((rtx) data);
451 switch (GET_RTX_CLASS (code))
453 case RTX_UNARY:
454 op0 = XEXP (x, 0);
455 op_mode = GET_MODE (op0);
456 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
457 if (op0 == XEXP (x, 0))
458 return x;
459 return simplify_gen_unary (code, mode, op0, op_mode);
461 case RTX_BIN_ARITH:
462 case RTX_COMM_ARITH:
463 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
464 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
465 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
466 return x;
467 return simplify_gen_binary (code, mode, op0, op1);
469 case RTX_COMPARE:
470 case RTX_COMM_COMPARE:
471 op0 = XEXP (x, 0);
472 op1 = XEXP (x, 1);
473 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
474 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
475 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
476 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
477 return x;
478 return simplify_gen_relational (code, mode, op_mode, op0, op1);
480 case RTX_TERNARY:
481 case RTX_BITFIELD_OPS:
482 op0 = XEXP (x, 0);
483 op_mode = GET_MODE (op0);
484 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
485 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
486 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
487 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
488 return x;
489 if (op_mode == VOIDmode)
490 op_mode = GET_MODE (op0);
491 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
493 case RTX_EXTRA:
494 if (code == SUBREG)
496 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
497 if (op0 == SUBREG_REG (x))
498 return x;
499 op0 = simplify_gen_subreg (GET_MODE (x), op0,
500 GET_MODE (SUBREG_REG (x)),
501 SUBREG_BYTE (x));
502 return op0 ? op0 : x;
504 break;
506 case RTX_OBJ:
507 if (code == MEM)
509 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
510 if (op0 == XEXP (x, 0))
511 return x;
512 return replace_equiv_address_nv (x, op0);
514 else if (code == LO_SUM)
516 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
517 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
519 /* (lo_sum (high x) x) -> x */
520 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
521 return op1;
523 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
524 return x;
525 return gen_rtx_LO_SUM (mode, op0, op1);
527 break;
529 default:
530 break;
533 newx = x;
534 fmt = GET_RTX_FORMAT (code);
535 for (i = 0; fmt[i]; i++)
536 switch (fmt[i])
538 case 'E':
539 vec = XVEC (x, i);
540 newvec = XVEC (newx, i);
541 for (j = 0; j < GET_NUM_ELEM (vec); j++)
543 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
544 old_rtx, fn, data);
545 if (op != RTVEC_ELT (vec, j))
547 if (newvec == vec)
549 newvec = shallow_copy_rtvec (vec);
550 if (x == newx)
551 newx = shallow_copy_rtx (x);
552 XVEC (newx, i) = newvec;
554 RTVEC_ELT (newvec, j) = op;
557 break;
559 case 'e':
560 if (XEXP (x, i))
562 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
563 if (op != XEXP (x, i))
565 if (x == newx)
566 newx = shallow_copy_rtx (x);
567 XEXP (newx, i) = op;
570 break;
572 return newx;
575 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
576 resulting RTX. Return a new RTX which is as simplified as possible. */
579 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
581 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
584 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
585 Only handle cases where the truncated value is inherently an rvalue.
587 RTL provides two ways of truncating a value:
589 1. a lowpart subreg. This form is only a truncation when both
590 the outer and inner modes (here MODE and OP_MODE respectively)
591 are scalar integers, and only then when the subreg is used as
592 an rvalue.
594 It is only valid to form such truncating subregs if the
595 truncation requires no action by the target. The onus for
596 proving this is on the creator of the subreg -- e.g. the
597 caller to simplify_subreg or simplify_gen_subreg -- and typically
598 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
600 2. a TRUNCATE. This form handles both scalar and compound integers.
602 The first form is preferred where valid. However, the TRUNCATE
603 handling in simplify_unary_operation turns the second form into the
604 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
605 so it is generally safe to form rvalue truncations using:
607 simplify_gen_unary (TRUNCATE, ...)
609 and leave simplify_unary_operation to work out which representation
610 should be used.
612 Because of the proof requirements on (1), simplify_truncation must
613 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
614 regardless of whether the outer truncation came from a SUBREG or a
615 TRUNCATE. For example, if the caller has proven that an SImode
616 truncation of:
618 (and:DI X Y)
620 is a no-op and can be represented as a subreg, it does not follow
621 that SImode truncations of X and Y are also no-ops. On a target
622 like 64-bit MIPS that requires SImode values to be stored in
623 sign-extended form, an SImode truncation of:
625 (and:DI (reg:DI X) (const_int 63))
627 is trivially a no-op because only the lower 6 bits can be set.
628 However, X is still an arbitrary 64-bit number and so we cannot
629 assume that truncating it too is a no-op. */
631 static rtx
632 simplify_truncation (enum machine_mode mode, rtx op,
633 enum machine_mode op_mode)
635 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
636 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
637 gcc_assert (precision <= op_precision);
639 /* Optimize truncations of zero and sign extended values. */
640 if (GET_CODE (op) == ZERO_EXTEND
641 || GET_CODE (op) == SIGN_EXTEND)
643 /* There are three possibilities. If MODE is the same as the
644 origmode, we can omit both the extension and the subreg.
645 If MODE is not larger than the origmode, we can apply the
646 truncation without the extension. Finally, if the outermode
647 is larger than the origmode, we can just extend to the appropriate
648 mode. */
649 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
650 if (mode == origmode)
651 return XEXP (op, 0);
652 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
653 return simplify_gen_unary (TRUNCATE, mode,
654 XEXP (op, 0), origmode);
655 else
656 return simplify_gen_unary (GET_CODE (op), mode,
657 XEXP (op, 0), origmode);
660 /* If the machine can perform operations in the truncated mode, distribute
661 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
662 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
663 if (1
664 #ifdef WORD_REGISTER_OPERATIONS
665 && precision >= BITS_PER_WORD
666 #endif
667 && (GET_CODE (op) == PLUS
668 || GET_CODE (op) == MINUS
669 || GET_CODE (op) == MULT))
671 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
672 if (op0)
674 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
675 if (op1)
676 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
680 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
681 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
682 the outer subreg is effectively a truncation to the original mode. */
683 if ((GET_CODE (op) == LSHIFTRT
684 || GET_CODE (op) == ASHIFTRT)
685 /* Ensure that OP_MODE is at least twice as wide as MODE
686 to avoid the possibility that an outer LSHIFTRT shifts by more
687 than the sign extension's sign_bit_copies and introduces zeros
688 into the high bits of the result. */
689 && 2 * precision <= op_precision
690 && CONST_INT_P (XEXP (op, 1))
691 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
692 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
693 && UINTVAL (XEXP (op, 1)) < precision)
694 return simplify_gen_binary (ASHIFTRT, mode,
695 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
697 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
698 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
699 the outer subreg is effectively a truncation to the original mode. */
700 if ((GET_CODE (op) == LSHIFTRT
701 || GET_CODE (op) == ASHIFTRT)
702 && CONST_INT_P (XEXP (op, 1))
703 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
704 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
705 && UINTVAL (XEXP (op, 1)) < precision)
706 return simplify_gen_binary (LSHIFTRT, mode,
707 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
709 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
710 to (ashift:QI (x:QI) C), where C is a suitable small constant and
711 the outer subreg is effectively a truncation to the original mode. */
712 if (GET_CODE (op) == ASHIFT
713 && CONST_INT_P (XEXP (op, 1))
714 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
715 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
716 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
717 && UINTVAL (XEXP (op, 1)) < precision)
718 return simplify_gen_binary (ASHIFT, mode,
719 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
721 /* Recognize a word extraction from a multi-word subreg. */
722 if ((GET_CODE (op) == LSHIFTRT
723 || GET_CODE (op) == ASHIFTRT)
724 && SCALAR_INT_MODE_P (mode)
725 && SCALAR_INT_MODE_P (op_mode)
726 && precision >= BITS_PER_WORD
727 && 2 * precision <= op_precision
728 && CONST_INT_P (XEXP (op, 1))
729 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
730 && UINTVAL (XEXP (op, 1)) < op_precision)
732 int byte = subreg_lowpart_offset (mode, op_mode);
733 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
734 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
735 (WORDS_BIG_ENDIAN
736 ? byte - shifted_bytes
737 : byte + shifted_bytes));
740 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
741 and try replacing the TRUNCATE and shift with it. Don't do this
742 if the MEM has a mode-dependent address. */
743 if ((GET_CODE (op) == LSHIFTRT
744 || GET_CODE (op) == ASHIFTRT)
745 && SCALAR_INT_MODE_P (op_mode)
746 && MEM_P (XEXP (op, 0))
747 && CONST_INT_P (XEXP (op, 1))
748 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
749 && INTVAL (XEXP (op, 1)) > 0
750 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
751 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
752 MEM_ADDR_SPACE (XEXP (op, 0)))
753 && ! MEM_VOLATILE_P (XEXP (op, 0))
754 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
755 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
757 int byte = subreg_lowpart_offset (mode, op_mode);
758 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
759 return adjust_address_nv (XEXP (op, 0), mode,
760 (WORDS_BIG_ENDIAN
761 ? byte - shifted_bytes
762 : byte + shifted_bytes));
765 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
766 (OP:SI foo:SI) if OP is NEG or ABS. */
767 if ((GET_CODE (op) == ABS
768 || GET_CODE (op) == NEG)
769 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
770 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
771 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
772 return simplify_gen_unary (GET_CODE (op), mode,
773 XEXP (XEXP (op, 0), 0), mode);
775 /* (truncate:A (subreg:B (truncate:C X) 0)) is
776 (truncate:A X). */
777 if (GET_CODE (op) == SUBREG
778 && SCALAR_INT_MODE_P (mode)
779 && SCALAR_INT_MODE_P (op_mode)
780 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
781 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
782 && subreg_lowpart_p (op))
784 rtx inner = XEXP (SUBREG_REG (op), 0);
785 if (GET_MODE_PRECISION (mode)
786 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
787 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
788 else
789 /* If subreg above is paradoxical and C is narrower
790 than A, return (subreg:A (truncate:C X) 0). */
791 return simplify_gen_subreg (mode, SUBREG_REG (op),
792 GET_MODE (SUBREG_REG (op)), 0);
795 /* (truncate:A (truncate:B X)) is (truncate:A X). */
796 if (GET_CODE (op) == TRUNCATE)
797 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
798 GET_MODE (XEXP (op, 0)));
800 return NULL_RTX;
803 /* Try to simplify a unary operation CODE whose output mode is to be
804 MODE with input operand OP whose mode was originally OP_MODE.
805 Return zero if no simplification can be made. */
807 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
808 rtx op, enum machine_mode op_mode)
810 rtx trueop, tem;
812 trueop = avoid_constant_pool_reference (op);
814 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
815 if (tem)
816 return tem;
818 return simplify_unary_operation_1 (code, mode, op);
821 /* Perform some simplifications we can do even if the operands
822 aren't constant. */
823 static rtx
824 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
826 enum rtx_code reversed;
827 rtx temp;
829 switch (code)
831 case NOT:
832 /* (not (not X)) == X. */
833 if (GET_CODE (op) == NOT)
834 return XEXP (op, 0);
836 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
837 comparison is all ones. */
838 if (COMPARISON_P (op)
839 && (mode == BImode || STORE_FLAG_VALUE == -1)
840 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
841 return simplify_gen_relational (reversed, mode, VOIDmode,
842 XEXP (op, 0), XEXP (op, 1));
844 /* (not (plus X -1)) can become (neg X). */
845 if (GET_CODE (op) == PLUS
846 && XEXP (op, 1) == constm1_rtx)
847 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
849 /* Similarly, (not (neg X)) is (plus X -1). */
850 if (GET_CODE (op) == NEG)
851 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
852 CONSTM1_RTX (mode));
854 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
855 if (GET_CODE (op) == XOR
856 && CONST_INT_P (XEXP (op, 1))
857 && (temp = simplify_unary_operation (NOT, mode,
858 XEXP (op, 1), mode)) != 0)
859 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
861 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
862 if (GET_CODE (op) == PLUS
863 && CONST_INT_P (XEXP (op, 1))
864 && mode_signbit_p (mode, XEXP (op, 1))
865 && (temp = simplify_unary_operation (NOT, mode,
866 XEXP (op, 1), mode)) != 0)
867 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
870 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
871 operands other than 1, but that is not valid. We could do a
872 similar simplification for (not (lshiftrt C X)) where C is
873 just the sign bit, but this doesn't seem common enough to
874 bother with. */
875 if (GET_CODE (op) == ASHIFT
876 && XEXP (op, 0) == const1_rtx)
878 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
879 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
882 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
883 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
884 so we can perform the above simplification. */
885 if (STORE_FLAG_VALUE == -1
886 && GET_CODE (op) == ASHIFTRT
887 && CONST_INT_P (XEXP (op, 1))
888 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
889 return simplify_gen_relational (GE, mode, VOIDmode,
890 XEXP (op, 0), const0_rtx);
893 if (GET_CODE (op) == SUBREG
894 && subreg_lowpart_p (op)
895 && (GET_MODE_SIZE (GET_MODE (op))
896 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
897 && GET_CODE (SUBREG_REG (op)) == ASHIFT
898 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
900 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
901 rtx x;
903 x = gen_rtx_ROTATE (inner_mode,
904 simplify_gen_unary (NOT, inner_mode, const1_rtx,
905 inner_mode),
906 XEXP (SUBREG_REG (op), 1));
907 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
908 if (temp)
909 return temp;
912 /* Apply De Morgan's laws to reduce number of patterns for machines
913 with negating logical insns (and-not, nand, etc.). If result has
914 only one NOT, put it first, since that is how the patterns are
915 coded. */
916 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
918 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
919 enum machine_mode op_mode;
921 op_mode = GET_MODE (in1);
922 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
924 op_mode = GET_MODE (in2);
925 if (op_mode == VOIDmode)
926 op_mode = mode;
927 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
929 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
931 rtx tem = in2;
932 in2 = in1; in1 = tem;
935 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
936 mode, in1, in2);
939 /* (not (bswap x)) -> (bswap (not x)). */
940 if (GET_CODE (op) == BSWAP)
942 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
943 return simplify_gen_unary (BSWAP, mode, x, mode);
945 break;
947 case NEG:
948 /* (neg (neg X)) == X. */
949 if (GET_CODE (op) == NEG)
950 return XEXP (op, 0);
952 /* (neg (plus X 1)) can become (not X). */
953 if (GET_CODE (op) == PLUS
954 && XEXP (op, 1) == const1_rtx)
955 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
957 /* Similarly, (neg (not X)) is (plus X 1). */
958 if (GET_CODE (op) == NOT)
959 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
960 CONST1_RTX (mode));
962 /* (neg (minus X Y)) can become (minus Y X). This transformation
963 isn't safe for modes with signed zeros, since if X and Y are
964 both +0, (minus Y X) is the same as (minus X Y). If the
965 rounding mode is towards +infinity (or -infinity) then the two
966 expressions will be rounded differently. */
967 if (GET_CODE (op) == MINUS
968 && !HONOR_SIGNED_ZEROS (mode)
969 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
970 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
972 if (GET_CODE (op) == PLUS
973 && !HONOR_SIGNED_ZEROS (mode)
974 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
976 /* (neg (plus A C)) is simplified to (minus -C A). */
977 if (CONST_SCALAR_INT_P (XEXP (op, 1))
978 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
980 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
981 if (temp)
982 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
985 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
986 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
987 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
990 /* (neg (mult A B)) becomes (mult A (neg B)).
991 This works even for floating-point values. */
992 if (GET_CODE (op) == MULT
993 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
995 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
996 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
999 /* NEG commutes with ASHIFT since it is multiplication. Only do
1000 this if we can then eliminate the NEG (e.g., if the operand
1001 is a constant). */
1002 if (GET_CODE (op) == ASHIFT)
1004 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1005 if (temp)
1006 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1009 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1010 C is equal to the width of MODE minus 1. */
1011 if (GET_CODE (op) == ASHIFTRT
1012 && CONST_INT_P (XEXP (op, 1))
1013 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1014 return simplify_gen_binary (LSHIFTRT, mode,
1015 XEXP (op, 0), XEXP (op, 1));
1017 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1018 C is equal to the width of MODE minus 1. */
1019 if (GET_CODE (op) == LSHIFTRT
1020 && CONST_INT_P (XEXP (op, 1))
1021 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1022 return simplify_gen_binary (ASHIFTRT, mode,
1023 XEXP (op, 0), XEXP (op, 1));
1025 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1026 if (GET_CODE (op) == XOR
1027 && XEXP (op, 1) == const1_rtx
1028 && nonzero_bits (XEXP (op, 0), mode) == 1)
1029 return plus_constant (mode, XEXP (op, 0), -1);
1031 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1032 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1033 if (GET_CODE (op) == LT
1034 && XEXP (op, 1) == const0_rtx
1035 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1037 enum machine_mode inner = GET_MODE (XEXP (op, 0));
1038 int isize = GET_MODE_PRECISION (inner);
1039 if (STORE_FLAG_VALUE == 1)
1041 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1042 GEN_INT (isize - 1));
1043 if (mode == inner)
1044 return temp;
1045 if (GET_MODE_PRECISION (mode) > isize)
1046 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1047 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1049 else if (STORE_FLAG_VALUE == -1)
1051 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1052 GEN_INT (isize - 1));
1053 if (mode == inner)
1054 return temp;
1055 if (GET_MODE_PRECISION (mode) > isize)
1056 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1057 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1060 break;
1062 case TRUNCATE:
1063 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1064 with the umulXi3_highpart patterns. */
1065 if (GET_CODE (op) == LSHIFTRT
1066 && GET_CODE (XEXP (op, 0)) == MULT)
1067 break;
1069 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1071 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1073 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1074 if (temp)
1075 return temp;
1077 /* We can't handle truncation to a partial integer mode here
1078 because we don't know the real bitsize of the partial
1079 integer mode. */
1080 break;
1083 if (GET_MODE (op) != VOIDmode)
1085 temp = simplify_truncation (mode, op, GET_MODE (op));
1086 if (temp)
1087 return temp;
1090 /* If we know that the value is already truncated, we can
1091 replace the TRUNCATE with a SUBREG. */
1092 if (GET_MODE_NUNITS (mode) == 1
1093 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1094 || truncated_to_mode (mode, op)))
1096 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1097 if (temp)
1098 return temp;
1101 /* A truncate of a comparison can be replaced with a subreg if
1102 STORE_FLAG_VALUE permits. This is like the previous test,
1103 but it works even if the comparison is done in a mode larger
1104 than HOST_BITS_PER_WIDE_INT. */
1105 if (HWI_COMPUTABLE_MODE_P (mode)
1106 && COMPARISON_P (op)
1107 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1109 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1110 if (temp)
1111 return temp;
1114 /* A truncate of a memory is just loading the low part of the memory
1115 if we are not changing the meaning of the address. */
1116 if (GET_CODE (op) == MEM
1117 && !VECTOR_MODE_P (mode)
1118 && !MEM_VOLATILE_P (op)
1119 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1121 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1122 if (temp)
1123 return temp;
1126 break;
1128 case FLOAT_TRUNCATE:
1129 if (DECIMAL_FLOAT_MODE_P (mode))
1130 break;
1132 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1133 if (GET_CODE (op) == FLOAT_EXTEND
1134 && GET_MODE (XEXP (op, 0)) == mode)
1135 return XEXP (op, 0);
1137 /* (float_truncate:SF (float_truncate:DF foo:XF))
1138 = (float_truncate:SF foo:XF).
1139 This may eliminate double rounding, so it is unsafe.
1141 (float_truncate:SF (float_extend:XF foo:DF))
1142 = (float_truncate:SF foo:DF).
1144 (float_truncate:DF (float_extend:XF foo:SF))
1145 = (float_extend:SF foo:DF). */
1146 if ((GET_CODE (op) == FLOAT_TRUNCATE
1147 && flag_unsafe_math_optimizations)
1148 || GET_CODE (op) == FLOAT_EXTEND)
1149 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1150 0)))
1151 > GET_MODE_SIZE (mode)
1152 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1153 mode,
1154 XEXP (op, 0), mode);
1156 /* (float_truncate (float x)) is (float x) */
1157 if (GET_CODE (op) == FLOAT
1158 && (flag_unsafe_math_optimizations
1159 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1160 && ((unsigned)significand_size (GET_MODE (op))
1161 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1162 - num_sign_bit_copies (XEXP (op, 0),
1163 GET_MODE (XEXP (op, 0))))))))
1164 return simplify_gen_unary (FLOAT, mode,
1165 XEXP (op, 0),
1166 GET_MODE (XEXP (op, 0)));
1168 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1169 (OP:SF foo:SF) if OP is NEG or ABS. */
1170 if ((GET_CODE (op) == ABS
1171 || GET_CODE (op) == NEG)
1172 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1173 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1174 return simplify_gen_unary (GET_CODE (op), mode,
1175 XEXP (XEXP (op, 0), 0), mode);
1177 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1178 is (float_truncate:SF x). */
1179 if (GET_CODE (op) == SUBREG
1180 && subreg_lowpart_p (op)
1181 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1182 return SUBREG_REG (op);
1183 break;
1185 case FLOAT_EXTEND:
1186 if (DECIMAL_FLOAT_MODE_P (mode))
1187 break;
1189 /* (float_extend (float_extend x)) is (float_extend x)
1191 (float_extend (float x)) is (float x) assuming that double
1192 rounding can't happen.
1194 if (GET_CODE (op) == FLOAT_EXTEND
1195 || (GET_CODE (op) == FLOAT
1196 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1197 && ((unsigned)significand_size (GET_MODE (op))
1198 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1199 - num_sign_bit_copies (XEXP (op, 0),
1200 GET_MODE (XEXP (op, 0)))))))
1201 return simplify_gen_unary (GET_CODE (op), mode,
1202 XEXP (op, 0),
1203 GET_MODE (XEXP (op, 0)));
1205 break;
1207 case ABS:
1208 /* (abs (neg <foo>)) -> (abs <foo>) */
1209 if (GET_CODE (op) == NEG)
1210 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1211 GET_MODE (XEXP (op, 0)));
1213 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1214 do nothing. */
1215 if (GET_MODE (op) == VOIDmode)
1216 break;
1218 /* If operand is something known to be positive, ignore the ABS. */
1219 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1220 || val_signbit_known_clear_p (GET_MODE (op),
1221 nonzero_bits (op, GET_MODE (op))))
1222 return op;
1224 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1225 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1226 return gen_rtx_NEG (mode, op);
1228 break;
1230 case FFS:
1231 /* (ffs (*_extend <X>)) = (ffs <X>) */
1232 if (GET_CODE (op) == SIGN_EXTEND
1233 || GET_CODE (op) == ZERO_EXTEND)
1234 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1235 GET_MODE (XEXP (op, 0)));
1236 break;
1238 case POPCOUNT:
1239 switch (GET_CODE (op))
1241 case BSWAP:
1242 case ZERO_EXTEND:
1243 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1244 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1245 GET_MODE (XEXP (op, 0)));
1247 case ROTATE:
1248 case ROTATERT:
1249 /* Rotations don't affect popcount. */
1250 if (!side_effects_p (XEXP (op, 1)))
1251 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1252 GET_MODE (XEXP (op, 0)));
1253 break;
1255 default:
1256 break;
1258 break;
1260 case PARITY:
1261 switch (GET_CODE (op))
1263 case NOT:
1264 case BSWAP:
1265 case ZERO_EXTEND:
1266 case SIGN_EXTEND:
1267 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1268 GET_MODE (XEXP (op, 0)));
1270 case ROTATE:
1271 case ROTATERT:
1272 /* Rotations don't affect parity. */
1273 if (!side_effects_p (XEXP (op, 1)))
1274 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1275 GET_MODE (XEXP (op, 0)));
1276 break;
1278 default:
1279 break;
1281 break;
1283 case BSWAP:
1284 /* (bswap (bswap x)) -> x. */
1285 if (GET_CODE (op) == BSWAP)
1286 return XEXP (op, 0);
1287 break;
1289 case FLOAT:
1290 /* (float (sign_extend <X>)) = (float <X>). */
1291 if (GET_CODE (op) == SIGN_EXTEND)
1292 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1293 GET_MODE (XEXP (op, 0)));
1294 break;
1296 case SIGN_EXTEND:
1297 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1298 becomes just the MINUS if its mode is MODE. This allows
1299 folding switch statements on machines using casesi (such as
1300 the VAX). */
1301 if (GET_CODE (op) == TRUNCATE
1302 && GET_MODE (XEXP (op, 0)) == mode
1303 && GET_CODE (XEXP (op, 0)) == MINUS
1304 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1305 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1306 return XEXP (op, 0);
1308 /* Extending a widening multiplication should be canonicalized to
1309 a wider widening multiplication. */
1310 if (GET_CODE (op) == MULT)
1312 rtx lhs = XEXP (op, 0);
1313 rtx rhs = XEXP (op, 1);
1314 enum rtx_code lcode = GET_CODE (lhs);
1315 enum rtx_code rcode = GET_CODE (rhs);
1317 /* Widening multiplies usually extend both operands, but sometimes
1318 they use a shift to extract a portion of a register. */
1319 if ((lcode == SIGN_EXTEND
1320 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1321 && (rcode == SIGN_EXTEND
1322 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1324 enum machine_mode lmode = GET_MODE (lhs);
1325 enum machine_mode rmode = GET_MODE (rhs);
1326 int bits;
1328 if (lcode == ASHIFTRT)
1329 /* Number of bits not shifted off the end. */
1330 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1331 else /* lcode == SIGN_EXTEND */
1332 /* Size of inner mode. */
1333 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1335 if (rcode == ASHIFTRT)
1336 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1337 else /* rcode == SIGN_EXTEND */
1338 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1340 /* We can only widen multiplies if the result is mathematiclly
1341 equivalent. I.e. if overflow was impossible. */
1342 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1343 return simplify_gen_binary
1344 (MULT, mode,
1345 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1346 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1350 /* Check for a sign extension of a subreg of a promoted
1351 variable, where the promotion is sign-extended, and the
1352 target mode is the same as the variable's promotion. */
1353 if (GET_CODE (op) == SUBREG
1354 && SUBREG_PROMOTED_VAR_P (op)
1355 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1356 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1358 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1359 if (temp)
1360 return temp;
1363 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1364 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1365 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1367 gcc_assert (GET_MODE_BITSIZE (mode)
1368 > GET_MODE_BITSIZE (GET_MODE (op)));
1369 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1370 GET_MODE (XEXP (op, 0)));
1373 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1374 is (sign_extend:M (subreg:O <X>)) if there is mode with
1375 GET_MODE_BITSIZE (N) - I bits.
1376 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1377 is similarly (zero_extend:M (subreg:O <X>)). */
1378 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1379 && GET_CODE (XEXP (op, 0)) == ASHIFT
1380 && CONST_INT_P (XEXP (op, 1))
1381 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1382 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1384 enum machine_mode tmode
1385 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1386 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1387 gcc_assert (GET_MODE_BITSIZE (mode)
1388 > GET_MODE_BITSIZE (GET_MODE (op)));
1389 if (tmode != BLKmode)
1391 rtx inner =
1392 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1393 if (inner)
1394 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1395 ? SIGN_EXTEND : ZERO_EXTEND,
1396 mode, inner, tmode);
1400 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1401 /* As we do not know which address space the pointer is referring to,
1402 we can do this only if the target does not support different pointer
1403 or address modes depending on the address space. */
1404 if (target_default_pointer_address_modes_p ()
1405 && ! POINTERS_EXTEND_UNSIGNED
1406 && mode == Pmode && GET_MODE (op) == ptr_mode
1407 && (CONSTANT_P (op)
1408 || (GET_CODE (op) == SUBREG
1409 && REG_P (SUBREG_REG (op))
1410 && REG_POINTER (SUBREG_REG (op))
1411 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1412 return convert_memory_address (Pmode, op);
1413 #endif
1414 break;
1416 case ZERO_EXTEND:
1417 /* Check for a zero extension of a subreg of a promoted
1418 variable, where the promotion is zero-extended, and the
1419 target mode is the same as the variable's promotion. */
1420 if (GET_CODE (op) == SUBREG
1421 && SUBREG_PROMOTED_VAR_P (op)
1422 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1423 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1425 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1426 if (temp)
1427 return temp;
1430 /* Extending a widening multiplication should be canonicalized to
1431 a wider widening multiplication. */
1432 if (GET_CODE (op) == MULT)
1434 rtx lhs = XEXP (op, 0);
1435 rtx rhs = XEXP (op, 1);
1436 enum rtx_code lcode = GET_CODE (lhs);
1437 enum rtx_code rcode = GET_CODE (rhs);
1439 /* Widening multiplies usually extend both operands, but sometimes
1440 they use a shift to extract a portion of a register. */
1441 if ((lcode == ZERO_EXTEND
1442 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1443 && (rcode == ZERO_EXTEND
1444 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1446 enum machine_mode lmode = GET_MODE (lhs);
1447 enum machine_mode rmode = GET_MODE (rhs);
1448 int bits;
1450 if (lcode == LSHIFTRT)
1451 /* Number of bits not shifted off the end. */
1452 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1453 else /* lcode == ZERO_EXTEND */
1454 /* Size of inner mode. */
1455 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1457 if (rcode == LSHIFTRT)
1458 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1459 else /* rcode == ZERO_EXTEND */
1460 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1462 /* We can only widen multiplies if the result is mathematiclly
1463 equivalent. I.e. if overflow was impossible. */
1464 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1465 return simplify_gen_binary
1466 (MULT, mode,
1467 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1468 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1472 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1473 if (GET_CODE (op) == ZERO_EXTEND)
1474 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1475 GET_MODE (XEXP (op, 0)));
1477 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1478 is (zero_extend:M (subreg:O <X>)) if there is mode with
1479 GET_MODE_BITSIZE (N) - I bits. */
1480 if (GET_CODE (op) == LSHIFTRT
1481 && GET_CODE (XEXP (op, 0)) == ASHIFT
1482 && CONST_INT_P (XEXP (op, 1))
1483 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1484 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1486 enum machine_mode tmode
1487 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1488 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1489 if (tmode != BLKmode)
1491 rtx inner =
1492 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1493 if (inner)
1494 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1498 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1499 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1500 of mode N. E.g.
1501 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1502 (and:SI (reg:SI) (const_int 63)). */
1503 if (GET_CODE (op) == SUBREG
1504 && GET_MODE_PRECISION (GET_MODE (op))
1505 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1506 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1507 <= HOST_BITS_PER_WIDE_INT
1508 && GET_MODE_PRECISION (mode)
1509 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1510 && subreg_lowpart_p (op)
1511 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1512 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1514 if (GET_MODE_PRECISION (mode)
1515 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1516 return SUBREG_REG (op);
1517 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1518 GET_MODE (SUBREG_REG (op)));
1521 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1522 /* As we do not know which address space the pointer is referring to,
1523 we can do this only if the target does not support different pointer
1524 or address modes depending on the address space. */
1525 if (target_default_pointer_address_modes_p ()
1526 && POINTERS_EXTEND_UNSIGNED > 0
1527 && mode == Pmode && GET_MODE (op) == ptr_mode
1528 && (CONSTANT_P (op)
1529 || (GET_CODE (op) == SUBREG
1530 && REG_P (SUBREG_REG (op))
1531 && REG_POINTER (SUBREG_REG (op))
1532 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1533 return convert_memory_address (Pmode, op);
1534 #endif
1535 break;
1537 default:
1538 break;
1541 return 0;
1544 /* Try to compute the value of a unary operation CODE whose output mode is to
1545 be MODE with input operand OP whose mode was originally OP_MODE.
1546 Return zero if the value cannot be computed. */
1548 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1549 rtx op, enum machine_mode op_mode)
1551 unsigned int width = GET_MODE_PRECISION (mode);
1553 if (code == VEC_DUPLICATE)
1555 gcc_assert (VECTOR_MODE_P (mode));
1556 if (GET_MODE (op) != VOIDmode)
1558 if (!VECTOR_MODE_P (GET_MODE (op)))
1559 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1560 else
1561 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1562 (GET_MODE (op)));
1564 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1565 || GET_CODE (op) == CONST_VECTOR)
1567 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1568 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1569 rtvec v = rtvec_alloc (n_elts);
1570 unsigned int i;
1572 if (GET_CODE (op) != CONST_VECTOR)
1573 for (i = 0; i < n_elts; i++)
1574 RTVEC_ELT (v, i) = op;
1575 else
1577 enum machine_mode inmode = GET_MODE (op);
1578 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1579 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1581 gcc_assert (in_n_elts < n_elts);
1582 gcc_assert ((n_elts % in_n_elts) == 0);
1583 for (i = 0; i < n_elts; i++)
1584 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1586 return gen_rtx_CONST_VECTOR (mode, v);
1590 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1592 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1593 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1594 enum machine_mode opmode = GET_MODE (op);
1595 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1596 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1597 rtvec v = rtvec_alloc (n_elts);
1598 unsigned int i;
1600 gcc_assert (op_n_elts == n_elts);
1601 for (i = 0; i < n_elts; i++)
1603 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1604 CONST_VECTOR_ELT (op, i),
1605 GET_MODE_INNER (opmode));
1606 if (!x)
1607 return 0;
1608 RTVEC_ELT (v, i) = x;
1610 return gen_rtx_CONST_VECTOR (mode, v);
1613 /* The order of these tests is critical so that, for example, we don't
1614 check the wrong mode (input vs. output) for a conversion operation,
1615 such as FIX. At some point, this should be simplified. */
1617 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1619 REAL_VALUE_TYPE d;
1621 if (op_mode == VOIDmode)
1623 /* CONST_INT have VOIDmode as the mode. We assume that all
1624 the bits of the constant are significant, though, this is
1625 a dangerous assumption as many times CONST_INTs are
1626 created and used with garbage in the bits outside of the
1627 precision of the implied mode of the const_int. */
1628 op_mode = MAX_MODE_INT;
1631 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1632 d = real_value_truncate (mode, d);
1633 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1635 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1637 REAL_VALUE_TYPE d;
1639 if (op_mode == VOIDmode)
1641 /* CONST_INT have VOIDmode as the mode. We assume that all
1642 the bits of the constant are significant, though, this is
1643 a dangerous assumption as many times CONST_INTs are
1644 created and used with garbage in the bits outside of the
1645 precision of the implied mode of the const_int. */
1646 op_mode = MAX_MODE_INT;
1649 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1650 d = real_value_truncate (mode, d);
1651 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1654 if (CONST_SCALAR_INT_P (op) && width > 0)
1656 wide_int result;
1657 enum machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1658 rtx_mode_t op0 = std::make_pair (op, imode);
1659 int int_value;
1661 #if TARGET_SUPPORTS_WIDE_INT == 0
1662 /* This assert keeps the simplification from producing a result
1663 that cannot be represented in a CONST_DOUBLE but a lot of
1664 upstream callers expect that this function never fails to
1665 simplify something and so you if you added this to the test
1666 above the code would die later anyway. If this assert
1667 happens, you just need to make the port support wide int. */
1668 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1669 #endif
1671 switch (code)
1673 case NOT:
1674 result = wi::bit_not (op0);
1675 break;
1677 case NEG:
1678 result = wi::neg (op0);
1679 break;
1681 case ABS:
1682 result = wi::abs (op0);
1683 break;
1685 case FFS:
1686 result = wi::shwi (wi::ffs (op0), mode);
1687 break;
1689 case CLZ:
1690 if (wi::ne_p (op0, 0))
1691 int_value = wi::clz (op0);
1692 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1693 int_value = GET_MODE_PRECISION (mode);
1694 result = wi::shwi (int_value, mode);
1695 break;
1697 case CLRSB:
1698 result = wi::shwi (wi::clrsb (op0), mode);
1699 break;
1701 case CTZ:
1702 if (wi::ne_p (op0, 0))
1703 int_value = wi::ctz (op0);
1704 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1705 int_value = GET_MODE_PRECISION (mode);
1706 result = wi::shwi (int_value, mode);
1707 break;
1709 case POPCOUNT:
1710 result = wi::shwi (wi::popcount (op0), mode);
1711 break;
1713 case PARITY:
1714 result = wi::shwi (wi::parity (op0), mode);
1715 break;
1717 case BSWAP:
1718 result = wide_int (op0).bswap ();
1719 break;
1721 case TRUNCATE:
1722 case ZERO_EXTEND:
1723 result = wide_int::from (op0, width, UNSIGNED);
1724 break;
1726 case SIGN_EXTEND:
1727 result = wide_int::from (op0, width, SIGNED);
1728 break;
1730 case SQRT:
1731 default:
1732 return 0;
1735 return immed_wide_int_const (result, mode);
1738 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1739 && SCALAR_FLOAT_MODE_P (mode)
1740 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1742 REAL_VALUE_TYPE d;
1743 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1745 switch (code)
1747 case SQRT:
1748 return 0;
1749 case ABS:
1750 d = real_value_abs (&d);
1751 break;
1752 case NEG:
1753 d = real_value_negate (&d);
1754 break;
1755 case FLOAT_TRUNCATE:
1756 d = real_value_truncate (mode, d);
1757 break;
1758 case FLOAT_EXTEND:
1759 /* All this does is change the mode, unless changing
1760 mode class. */
1761 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1762 real_convert (&d, mode, &d);
1763 break;
1764 case FIX:
1765 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1766 break;
1767 case NOT:
1769 long tmp[4];
1770 int i;
1772 real_to_target (tmp, &d, GET_MODE (op));
1773 for (i = 0; i < 4; i++)
1774 tmp[i] = ~tmp[i];
1775 real_from_target (&d, tmp, mode);
1776 break;
1778 default:
1779 gcc_unreachable ();
1781 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1783 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1784 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1785 && GET_MODE_CLASS (mode) == MODE_INT
1786 && width > 0)
1788 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1789 operators are intentionally left unspecified (to ease implementation
1790 by target backends), for consistency, this routine implements the
1791 same semantics for constant folding as used by the middle-end. */
1793 /* This was formerly used only for non-IEEE float.
1794 eggert@twinsun.com says it is safe for IEEE also. */
1795 REAL_VALUE_TYPE x, t;
1796 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1797 wide_int wmax, wmin;
1798 /* This is part of the abi to real_to_integer, but we check
1799 things before making this call. */
1800 bool fail;
1802 switch (code)
1804 case FIX:
1805 if (REAL_VALUE_ISNAN (x))
1806 return const0_rtx;
1808 /* Test against the signed upper bound. */
1809 wmax = wi::max_value (width, SIGNED);
1810 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1811 if (REAL_VALUES_LESS (t, x))
1812 return immed_wide_int_const (wmax, mode);
1814 /* Test against the signed lower bound. */
1815 wmin = wi::min_value (width, SIGNED);
1816 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1817 if (REAL_VALUES_LESS (x, t))
1818 return immed_wide_int_const (wmin, mode);
1820 return immed_wide_int_const (real_to_integer (&x, &fail, width), mode);
1821 break;
1823 case UNSIGNED_FIX:
1824 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1825 return const0_rtx;
1827 /* Test against the unsigned upper bound. */
1828 wmax = wi::max_value (width, UNSIGNED);
1829 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1830 if (REAL_VALUES_LESS (t, x))
1831 return immed_wide_int_const (wmax, mode);
1833 return immed_wide_int_const (real_to_integer (&x, &fail, width),
1834 mode);
1835 break;
1837 default:
1838 gcc_unreachable ();
1842 return NULL_RTX;
1845 /* Subroutine of simplify_binary_operation to simplify a binary operation
1846 CODE that can commute with byte swapping, with result mode MODE and
1847 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1848 Return zero if no simplification or canonicalization is possible. */
1850 static rtx
1851 simplify_byte_swapping_operation (enum rtx_code code, enum machine_mode mode,
1852 rtx op0, rtx op1)
1854 rtx tem;
1856 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1857 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1859 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1860 simplify_gen_unary (BSWAP, mode, op1, mode));
1861 return simplify_gen_unary (BSWAP, mode, tem, mode);
1864 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1865 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1867 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1868 return simplify_gen_unary (BSWAP, mode, tem, mode);
1871 return NULL_RTX;
1874 /* Subroutine of simplify_binary_operation to simplify a commutative,
1875 associative binary operation CODE with result mode MODE, operating
1876 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1877 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1878 canonicalization is possible. */
1880 static rtx
1881 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1882 rtx op0, rtx op1)
1884 rtx tem;
1886 /* Linearize the operator to the left. */
1887 if (GET_CODE (op1) == code)
1889 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1890 if (GET_CODE (op0) == code)
1892 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1893 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1896 /* "a op (b op c)" becomes "(b op c) op a". */
1897 if (! swap_commutative_operands_p (op1, op0))
1898 return simplify_gen_binary (code, mode, op1, op0);
1900 tem = op0;
1901 op0 = op1;
1902 op1 = tem;
1905 if (GET_CODE (op0) == code)
1907 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1908 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1910 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1911 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1914 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1915 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1916 if (tem != 0)
1917 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1919 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1920 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1921 if (tem != 0)
1922 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1925 return 0;
1929 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1930 and OP1. Return 0 if no simplification is possible.
1932 Don't use this for relational operations such as EQ or LT.
1933 Use simplify_relational_operation instead. */
1935 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1936 rtx op0, rtx op1)
1938 rtx trueop0, trueop1;
1939 rtx tem;
1941 /* Relational operations don't work here. We must know the mode
1942 of the operands in order to do the comparison correctly.
1943 Assuming a full word can give incorrect results.
1944 Consider comparing 128 with -128 in QImode. */
1945 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1946 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1948 /* Make sure the constant is second. */
1949 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1950 && swap_commutative_operands_p (op0, op1))
1952 tem = op0, op0 = op1, op1 = tem;
1955 trueop0 = avoid_constant_pool_reference (op0);
1956 trueop1 = avoid_constant_pool_reference (op1);
1958 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1959 if (tem)
1960 return tem;
1961 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1964 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1965 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1966 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1967 actual constants. */
1969 static rtx
1970 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1971 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1973 rtx tem, reversed, opleft, opright;
1974 HOST_WIDE_INT val;
1975 unsigned int width = GET_MODE_PRECISION (mode);
1977 /* Even if we can't compute a constant result,
1978 there are some cases worth simplifying. */
1980 switch (code)
1982 case PLUS:
1983 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1984 when x is NaN, infinite, or finite and nonzero. They aren't
1985 when x is -0 and the rounding mode is not towards -infinity,
1986 since (-0) + 0 is then 0. */
1987 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1988 return op0;
1990 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1991 transformations are safe even for IEEE. */
1992 if (GET_CODE (op0) == NEG)
1993 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1994 else if (GET_CODE (op1) == NEG)
1995 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1997 /* (~a) + 1 -> -a */
1998 if (INTEGRAL_MODE_P (mode)
1999 && GET_CODE (op0) == NOT
2000 && trueop1 == const1_rtx)
2001 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2003 /* Handle both-operands-constant cases. We can only add
2004 CONST_INTs to constants since the sum of relocatable symbols
2005 can't be handled by most assemblers. Don't add CONST_INT
2006 to CONST_INT since overflow won't be computed properly if wider
2007 than HOST_BITS_PER_WIDE_INT. */
2009 if ((GET_CODE (op0) == CONST
2010 || GET_CODE (op0) == SYMBOL_REF
2011 || GET_CODE (op0) == LABEL_REF)
2012 && CONST_INT_P (op1))
2013 return plus_constant (mode, op0, INTVAL (op1));
2014 else if ((GET_CODE (op1) == CONST
2015 || GET_CODE (op1) == SYMBOL_REF
2016 || GET_CODE (op1) == LABEL_REF)
2017 && CONST_INT_P (op0))
2018 return plus_constant (mode, op1, INTVAL (op0));
2020 /* See if this is something like X * C - X or vice versa or
2021 if the multiplication is written as a shift. If so, we can
2022 distribute and make a new multiply, shift, or maybe just
2023 have X (if C is 2 in the example above). But don't make
2024 something more expensive than we had before. */
2026 if (SCALAR_INT_MODE_P (mode))
2028 rtx lhs = op0, rhs = op1;
2030 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2031 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2033 if (GET_CODE (lhs) == NEG)
2035 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2036 lhs = XEXP (lhs, 0);
2038 else if (GET_CODE (lhs) == MULT
2039 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2041 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2042 lhs = XEXP (lhs, 0);
2044 else if (GET_CODE (lhs) == ASHIFT
2045 && CONST_INT_P (XEXP (lhs, 1))
2046 && INTVAL (XEXP (lhs, 1)) >= 0
2047 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2049 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2050 GET_MODE_PRECISION (mode));
2051 lhs = XEXP (lhs, 0);
2054 if (GET_CODE (rhs) == NEG)
2056 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2057 rhs = XEXP (rhs, 0);
2059 else if (GET_CODE (rhs) == MULT
2060 && CONST_INT_P (XEXP (rhs, 1)))
2062 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2063 rhs = XEXP (rhs, 0);
2065 else if (GET_CODE (rhs) == ASHIFT
2066 && CONST_INT_P (XEXP (rhs, 1))
2067 && INTVAL (XEXP (rhs, 1)) >= 0
2068 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2070 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2071 GET_MODE_PRECISION (mode));
2072 rhs = XEXP (rhs, 0);
2075 if (rtx_equal_p (lhs, rhs))
2077 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2078 rtx coeff;
2079 bool speed = optimize_function_for_speed_p (cfun);
2081 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2083 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2084 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2085 ? tem : 0;
2089 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2090 if (CONST_SCALAR_INT_P (op1)
2091 && GET_CODE (op0) == XOR
2092 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2093 && mode_signbit_p (mode, op1))
2094 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2095 simplify_gen_binary (XOR, mode, op1,
2096 XEXP (op0, 1)));
2098 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2099 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2100 && GET_CODE (op0) == MULT
2101 && GET_CODE (XEXP (op0, 0)) == NEG)
2103 rtx in1, in2;
2105 in1 = XEXP (XEXP (op0, 0), 0);
2106 in2 = XEXP (op0, 1);
2107 return simplify_gen_binary (MINUS, mode, op1,
2108 simplify_gen_binary (MULT, mode,
2109 in1, in2));
2112 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2113 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2114 is 1. */
2115 if (COMPARISON_P (op0)
2116 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2117 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2118 && (reversed = reversed_comparison (op0, mode)))
2119 return
2120 simplify_gen_unary (NEG, mode, reversed, mode);
2122 /* If one of the operands is a PLUS or a MINUS, see if we can
2123 simplify this by the associative law.
2124 Don't use the associative law for floating point.
2125 The inaccuracy makes it nonassociative,
2126 and subtle programs can break if operations are associated. */
2128 if (INTEGRAL_MODE_P (mode)
2129 && (plus_minus_operand_p (op0)
2130 || plus_minus_operand_p (op1))
2131 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2132 return tem;
2134 /* Reassociate floating point addition only when the user
2135 specifies associative math operations. */
2136 if (FLOAT_MODE_P (mode)
2137 && flag_associative_math)
2139 tem = simplify_associative_operation (code, mode, op0, op1);
2140 if (tem)
2141 return tem;
2143 break;
2145 case COMPARE:
2146 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2147 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2148 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2149 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2151 rtx xop00 = XEXP (op0, 0);
2152 rtx xop10 = XEXP (op1, 0);
2154 #ifdef HAVE_cc0
2155 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2156 #else
2157 if (REG_P (xop00) && REG_P (xop10)
2158 && GET_MODE (xop00) == GET_MODE (xop10)
2159 && REGNO (xop00) == REGNO (xop10)
2160 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2161 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2162 #endif
2163 return xop00;
2165 break;
2167 case MINUS:
2168 /* We can't assume x-x is 0 even with non-IEEE floating point,
2169 but since it is zero except in very strange circumstances, we
2170 will treat it as zero with -ffinite-math-only. */
2171 if (rtx_equal_p (trueop0, trueop1)
2172 && ! side_effects_p (op0)
2173 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2174 return CONST0_RTX (mode);
2176 /* Change subtraction from zero into negation. (0 - x) is the
2177 same as -x when x is NaN, infinite, or finite and nonzero.
2178 But if the mode has signed zeros, and does not round towards
2179 -infinity, then 0 - 0 is 0, not -0. */
2180 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2181 return simplify_gen_unary (NEG, mode, op1, mode);
2183 /* (-1 - a) is ~a. */
2184 if (trueop0 == constm1_rtx)
2185 return simplify_gen_unary (NOT, mode, op1, mode);
2187 /* Subtracting 0 has no effect unless the mode has signed zeros
2188 and supports rounding towards -infinity. In such a case,
2189 0 - 0 is -0. */
2190 if (!(HONOR_SIGNED_ZEROS (mode)
2191 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2192 && trueop1 == CONST0_RTX (mode))
2193 return op0;
2195 /* See if this is something like X * C - X or vice versa or
2196 if the multiplication is written as a shift. If so, we can
2197 distribute and make a new multiply, shift, or maybe just
2198 have X (if C is 2 in the example above). But don't make
2199 something more expensive than we had before. */
2201 if (SCALAR_INT_MODE_P (mode))
2203 rtx lhs = op0, rhs = op1;
2205 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2206 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2208 if (GET_CODE (lhs) == NEG)
2210 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2211 lhs = XEXP (lhs, 0);
2213 else if (GET_CODE (lhs) == MULT
2214 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2216 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2217 lhs = XEXP (lhs, 0);
2219 else if (GET_CODE (lhs) == ASHIFT
2220 && CONST_INT_P (XEXP (lhs, 1))
2221 && INTVAL (XEXP (lhs, 1)) >= 0
2222 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2224 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2225 GET_MODE_PRECISION (mode));
2226 lhs = XEXP (lhs, 0);
2229 if (GET_CODE (rhs) == NEG)
2231 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2232 rhs = XEXP (rhs, 0);
2234 else if (GET_CODE (rhs) == MULT
2235 && CONST_INT_P (XEXP (rhs, 1)))
2237 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2238 rhs = XEXP (rhs, 0);
2240 else if (GET_CODE (rhs) == ASHIFT
2241 && CONST_INT_P (XEXP (rhs, 1))
2242 && INTVAL (XEXP (rhs, 1)) >= 0
2243 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2245 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2246 GET_MODE_PRECISION (mode));
2247 negcoeff1 = -negcoeff1;
2248 rhs = XEXP (rhs, 0);
2251 if (rtx_equal_p (lhs, rhs))
2253 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2254 rtx coeff;
2255 bool speed = optimize_function_for_speed_p (cfun);
2257 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2259 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2260 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2261 ? tem : 0;
2265 /* (a - (-b)) -> (a + b). True even for IEEE. */
2266 if (GET_CODE (op1) == NEG)
2267 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2269 /* (-x - c) may be simplified as (-c - x). */
2270 if (GET_CODE (op0) == NEG
2271 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2273 tem = simplify_unary_operation (NEG, mode, op1, mode);
2274 if (tem)
2275 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2278 /* Don't let a relocatable value get a negative coeff. */
2279 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2280 return simplify_gen_binary (PLUS, mode,
2281 op0,
2282 neg_const_int (mode, op1));
2284 /* (x - (x & y)) -> (x & ~y) */
2285 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2287 if (rtx_equal_p (op0, XEXP (op1, 0)))
2289 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2290 GET_MODE (XEXP (op1, 1)));
2291 return simplify_gen_binary (AND, mode, op0, tem);
2293 if (rtx_equal_p (op0, XEXP (op1, 1)))
2295 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2296 GET_MODE (XEXP (op1, 0)));
2297 return simplify_gen_binary (AND, mode, op0, tem);
2301 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2302 by reversing the comparison code if valid. */
2303 if (STORE_FLAG_VALUE == 1
2304 && trueop0 == const1_rtx
2305 && COMPARISON_P (op1)
2306 && (reversed = reversed_comparison (op1, mode)))
2307 return reversed;
2309 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2310 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2311 && GET_CODE (op1) == MULT
2312 && GET_CODE (XEXP (op1, 0)) == NEG)
2314 rtx in1, in2;
2316 in1 = XEXP (XEXP (op1, 0), 0);
2317 in2 = XEXP (op1, 1);
2318 return simplify_gen_binary (PLUS, mode,
2319 simplify_gen_binary (MULT, mode,
2320 in1, in2),
2321 op0);
2324 /* Canonicalize (minus (neg A) (mult B C)) to
2325 (minus (mult (neg B) C) A). */
2326 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2327 && GET_CODE (op1) == MULT
2328 && GET_CODE (op0) == NEG)
2330 rtx in1, in2;
2332 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2333 in2 = XEXP (op1, 1);
2334 return simplify_gen_binary (MINUS, mode,
2335 simplify_gen_binary (MULT, mode,
2336 in1, in2),
2337 XEXP (op0, 0));
2340 /* If one of the operands is a PLUS or a MINUS, see if we can
2341 simplify this by the associative law. This will, for example,
2342 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2343 Don't use the associative law for floating point.
2344 The inaccuracy makes it nonassociative,
2345 and subtle programs can break if operations are associated. */
2347 if (INTEGRAL_MODE_P (mode)
2348 && (plus_minus_operand_p (op0)
2349 || plus_minus_operand_p (op1))
2350 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2351 return tem;
2352 break;
2354 case MULT:
2355 if (trueop1 == constm1_rtx)
2356 return simplify_gen_unary (NEG, mode, op0, mode);
2358 if (GET_CODE (op0) == NEG)
2360 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2361 /* If op1 is a MULT as well and simplify_unary_operation
2362 just moved the NEG to the second operand, simplify_gen_binary
2363 below could through simplify_associative_operation move
2364 the NEG around again and recurse endlessly. */
2365 if (temp
2366 && GET_CODE (op1) == MULT
2367 && GET_CODE (temp) == MULT
2368 && XEXP (op1, 0) == XEXP (temp, 0)
2369 && GET_CODE (XEXP (temp, 1)) == NEG
2370 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2371 temp = NULL_RTX;
2372 if (temp)
2373 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2375 if (GET_CODE (op1) == NEG)
2377 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2378 /* If op0 is a MULT as well and simplify_unary_operation
2379 just moved the NEG to the second operand, simplify_gen_binary
2380 below could through simplify_associative_operation move
2381 the NEG around again and recurse endlessly. */
2382 if (temp
2383 && GET_CODE (op0) == MULT
2384 && GET_CODE (temp) == MULT
2385 && XEXP (op0, 0) == XEXP (temp, 0)
2386 && GET_CODE (XEXP (temp, 1)) == NEG
2387 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2388 temp = NULL_RTX;
2389 if (temp)
2390 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2393 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2394 x is NaN, since x * 0 is then also NaN. Nor is it valid
2395 when the mode has signed zeros, since multiplying a negative
2396 number by 0 will give -0, not 0. */
2397 if (!HONOR_NANS (mode)
2398 && !HONOR_SIGNED_ZEROS (mode)
2399 && trueop1 == CONST0_RTX (mode)
2400 && ! side_effects_p (op0))
2401 return op1;
2403 /* In IEEE floating point, x*1 is not equivalent to x for
2404 signalling NaNs. */
2405 if (!HONOR_SNANS (mode)
2406 && trueop1 == CONST1_RTX (mode))
2407 return op0;
2409 /* Convert multiply by constant power of two into shift. */
2410 if (CONST_SCALAR_INT_P (trueop1))
2412 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2413 if (val >= 0)
2414 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2417 /* x*2 is x+x and x*(-1) is -x */
2418 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2419 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2420 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2421 && GET_MODE (op0) == mode)
2423 REAL_VALUE_TYPE d;
2424 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2426 if (REAL_VALUES_EQUAL (d, dconst2))
2427 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2429 if (!HONOR_SNANS (mode)
2430 && REAL_VALUES_EQUAL (d, dconstm1))
2431 return simplify_gen_unary (NEG, mode, op0, mode);
2434 /* Optimize -x * -x as x * x. */
2435 if (FLOAT_MODE_P (mode)
2436 && GET_CODE (op0) == NEG
2437 && GET_CODE (op1) == NEG
2438 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2439 && !side_effects_p (XEXP (op0, 0)))
2440 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2442 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2443 if (SCALAR_FLOAT_MODE_P (mode)
2444 && GET_CODE (op0) == ABS
2445 && GET_CODE (op1) == ABS
2446 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2447 && !side_effects_p (XEXP (op0, 0)))
2448 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2450 /* Reassociate multiplication, but for floating point MULTs
2451 only when the user specifies unsafe math optimizations. */
2452 if (! FLOAT_MODE_P (mode)
2453 || flag_unsafe_math_optimizations)
2455 tem = simplify_associative_operation (code, mode, op0, op1);
2456 if (tem)
2457 return tem;
2459 break;
2461 case IOR:
2462 if (trueop1 == CONST0_RTX (mode))
2463 return op0;
2464 if (INTEGRAL_MODE_P (mode)
2465 && trueop1 == CONSTM1_RTX (mode)
2466 && !side_effects_p (op0))
2467 return op1;
2468 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2469 return op0;
2470 /* A | (~A) -> -1 */
2471 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2472 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2473 && ! side_effects_p (op0)
2474 && SCALAR_INT_MODE_P (mode))
2475 return constm1_rtx;
2477 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2478 if (CONST_INT_P (op1)
2479 && HWI_COMPUTABLE_MODE_P (mode)
2480 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2481 && !side_effects_p (op0))
2482 return op1;
2484 /* Canonicalize (X & C1) | C2. */
2485 if (GET_CODE (op0) == AND
2486 && CONST_INT_P (trueop1)
2487 && CONST_INT_P (XEXP (op0, 1)))
2489 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2490 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2491 HOST_WIDE_INT c2 = INTVAL (trueop1);
2493 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2494 if ((c1 & c2) == c1
2495 && !side_effects_p (XEXP (op0, 0)))
2496 return trueop1;
2498 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2499 if (((c1|c2) & mask) == mask)
2500 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2502 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2503 if (((c1 & ~c2) & mask) != (c1 & mask))
2505 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2506 gen_int_mode (c1 & ~c2, mode));
2507 return simplify_gen_binary (IOR, mode, tem, op1);
2511 /* Convert (A & B) | A to A. */
2512 if (GET_CODE (op0) == AND
2513 && (rtx_equal_p (XEXP (op0, 0), op1)
2514 || rtx_equal_p (XEXP (op0, 1), op1))
2515 && ! side_effects_p (XEXP (op0, 0))
2516 && ! side_effects_p (XEXP (op0, 1)))
2517 return op1;
2519 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2520 mode size to (rotate A CX). */
2522 if (GET_CODE (op1) == ASHIFT
2523 || GET_CODE (op1) == SUBREG)
2525 opleft = op1;
2526 opright = op0;
2528 else
2530 opright = op1;
2531 opleft = op0;
2534 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2535 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2536 && CONST_INT_P (XEXP (opleft, 1))
2537 && CONST_INT_P (XEXP (opright, 1))
2538 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2539 == GET_MODE_PRECISION (mode)))
2540 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2542 /* Same, but for ashift that has been "simplified" to a wider mode
2543 by simplify_shift_const. */
2545 if (GET_CODE (opleft) == SUBREG
2546 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2547 && GET_CODE (opright) == LSHIFTRT
2548 && GET_CODE (XEXP (opright, 0)) == SUBREG
2549 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2550 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2551 && (GET_MODE_SIZE (GET_MODE (opleft))
2552 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2553 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2554 SUBREG_REG (XEXP (opright, 0)))
2555 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2556 && CONST_INT_P (XEXP (opright, 1))
2557 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2558 == GET_MODE_PRECISION (mode)))
2559 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2560 XEXP (SUBREG_REG (opleft), 1));
2562 /* If we have (ior (and (X C1) C2)), simplify this by making
2563 C1 as small as possible if C1 actually changes. */
2564 if (CONST_INT_P (op1)
2565 && (HWI_COMPUTABLE_MODE_P (mode)
2566 || INTVAL (op1) > 0)
2567 && GET_CODE (op0) == AND
2568 && CONST_INT_P (XEXP (op0, 1))
2569 && CONST_INT_P (op1)
2570 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2572 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2573 gen_int_mode (UINTVAL (XEXP (op0, 1))
2574 & ~UINTVAL (op1),
2575 mode));
2576 return simplify_gen_binary (IOR, mode, tmp, op1);
2579 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2580 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2581 the PLUS does not affect any of the bits in OP1: then we can do
2582 the IOR as a PLUS and we can associate. This is valid if OP1
2583 can be safely shifted left C bits. */
2584 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2585 && GET_CODE (XEXP (op0, 0)) == PLUS
2586 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2587 && CONST_INT_P (XEXP (op0, 1))
2588 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2590 int count = INTVAL (XEXP (op0, 1));
2591 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2593 if (mask >> count == INTVAL (trueop1)
2594 && trunc_int_for_mode (mask, mode) == mask
2595 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2596 return simplify_gen_binary (ASHIFTRT, mode,
2597 plus_constant (mode, XEXP (op0, 0),
2598 mask),
2599 XEXP (op0, 1));
2602 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2603 if (tem)
2604 return tem;
2606 tem = simplify_associative_operation (code, mode, op0, op1);
2607 if (tem)
2608 return tem;
2609 break;
2611 case XOR:
2612 if (trueop1 == CONST0_RTX (mode))
2613 return op0;
2614 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2615 return simplify_gen_unary (NOT, mode, op0, mode);
2616 if (rtx_equal_p (trueop0, trueop1)
2617 && ! side_effects_p (op0)
2618 && GET_MODE_CLASS (mode) != MODE_CC)
2619 return CONST0_RTX (mode);
2621 /* Canonicalize XOR of the most significant bit to PLUS. */
2622 if (CONST_SCALAR_INT_P (op1)
2623 && mode_signbit_p (mode, op1))
2624 return simplify_gen_binary (PLUS, mode, op0, op1);
2625 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2626 if (CONST_SCALAR_INT_P (op1)
2627 && GET_CODE (op0) == PLUS
2628 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2629 && mode_signbit_p (mode, XEXP (op0, 1)))
2630 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2631 simplify_gen_binary (XOR, mode, op1,
2632 XEXP (op0, 1)));
2634 /* If we are XORing two things that have no bits in common,
2635 convert them into an IOR. This helps to detect rotation encoded
2636 using those methods and possibly other simplifications. */
2638 if (HWI_COMPUTABLE_MODE_P (mode)
2639 && (nonzero_bits (op0, mode)
2640 & nonzero_bits (op1, mode)) == 0)
2641 return (simplify_gen_binary (IOR, mode, op0, op1));
2643 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2644 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2645 (NOT y). */
2647 int num_negated = 0;
2649 if (GET_CODE (op0) == NOT)
2650 num_negated++, op0 = XEXP (op0, 0);
2651 if (GET_CODE (op1) == NOT)
2652 num_negated++, op1 = XEXP (op1, 0);
2654 if (num_negated == 2)
2655 return simplify_gen_binary (XOR, mode, op0, op1);
2656 else if (num_negated == 1)
2657 return simplify_gen_unary (NOT, mode,
2658 simplify_gen_binary (XOR, mode, op0, op1),
2659 mode);
2662 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2663 correspond to a machine insn or result in further simplifications
2664 if B is a constant. */
2666 if (GET_CODE (op0) == AND
2667 && rtx_equal_p (XEXP (op0, 1), op1)
2668 && ! side_effects_p (op1))
2669 return simplify_gen_binary (AND, mode,
2670 simplify_gen_unary (NOT, mode,
2671 XEXP (op0, 0), mode),
2672 op1);
2674 else if (GET_CODE (op0) == AND
2675 && rtx_equal_p (XEXP (op0, 0), op1)
2676 && ! side_effects_p (op1))
2677 return simplify_gen_binary (AND, mode,
2678 simplify_gen_unary (NOT, mode,
2679 XEXP (op0, 1), mode),
2680 op1);
2682 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2683 we can transform like this:
2684 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2685 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2686 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2687 Attempt a few simplifications when B and C are both constants. */
2688 if (GET_CODE (op0) == AND
2689 && CONST_INT_P (op1)
2690 && CONST_INT_P (XEXP (op0, 1)))
2692 rtx a = XEXP (op0, 0);
2693 rtx b = XEXP (op0, 1);
2694 rtx c = op1;
2695 HOST_WIDE_INT bval = INTVAL (b);
2696 HOST_WIDE_INT cval = INTVAL (c);
2698 rtx na_c
2699 = simplify_binary_operation (AND, mode,
2700 simplify_gen_unary (NOT, mode, a, mode),
2702 if ((~cval & bval) == 0)
2704 /* Try to simplify ~A&C | ~B&C. */
2705 if (na_c != NULL_RTX)
2706 return simplify_gen_binary (IOR, mode, na_c,
2707 gen_int_mode (~bval & cval, mode));
2709 else
2711 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2712 if (na_c == const0_rtx)
2714 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2715 gen_int_mode (~cval & bval,
2716 mode));
2717 return simplify_gen_binary (IOR, mode, a_nc_b,
2718 gen_int_mode (~bval & cval,
2719 mode));
2724 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2725 comparison if STORE_FLAG_VALUE is 1. */
2726 if (STORE_FLAG_VALUE == 1
2727 && trueop1 == const1_rtx
2728 && COMPARISON_P (op0)
2729 && (reversed = reversed_comparison (op0, mode)))
2730 return reversed;
2732 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2733 is (lt foo (const_int 0)), so we can perform the above
2734 simplification if STORE_FLAG_VALUE is 1. */
2736 if (STORE_FLAG_VALUE == 1
2737 && trueop1 == const1_rtx
2738 && GET_CODE (op0) == LSHIFTRT
2739 && CONST_INT_P (XEXP (op0, 1))
2740 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2741 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2743 /* (xor (comparison foo bar) (const_int sign-bit))
2744 when STORE_FLAG_VALUE is the sign bit. */
2745 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2746 && trueop1 == const_true_rtx
2747 && COMPARISON_P (op0)
2748 && (reversed = reversed_comparison (op0, mode)))
2749 return reversed;
2751 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2752 if (tem)
2753 return tem;
2755 tem = simplify_associative_operation (code, mode, op0, op1);
2756 if (tem)
2757 return tem;
2758 break;
2760 case AND:
2761 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2762 return trueop1;
2763 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2764 return op0;
2765 if (HWI_COMPUTABLE_MODE_P (mode))
2767 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2768 HOST_WIDE_INT nzop1;
2769 if (CONST_INT_P (trueop1))
2771 HOST_WIDE_INT val1 = INTVAL (trueop1);
2772 /* If we are turning off bits already known off in OP0, we need
2773 not do an AND. */
2774 if ((nzop0 & ~val1) == 0)
2775 return op0;
2777 nzop1 = nonzero_bits (trueop1, mode);
2778 /* If we are clearing all the nonzero bits, the result is zero. */
2779 if ((nzop1 & nzop0) == 0
2780 && !side_effects_p (op0) && !side_effects_p (op1))
2781 return CONST0_RTX (mode);
2783 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2784 && GET_MODE_CLASS (mode) != MODE_CC)
2785 return op0;
2786 /* A & (~A) -> 0 */
2787 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2788 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2789 && ! side_effects_p (op0)
2790 && GET_MODE_CLASS (mode) != MODE_CC)
2791 return CONST0_RTX (mode);
2793 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2794 there are no nonzero bits of C outside of X's mode. */
2795 if ((GET_CODE (op0) == SIGN_EXTEND
2796 || GET_CODE (op0) == ZERO_EXTEND)
2797 && CONST_INT_P (trueop1)
2798 && HWI_COMPUTABLE_MODE_P (mode)
2799 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2800 & UINTVAL (trueop1)) == 0)
2802 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2803 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2804 gen_int_mode (INTVAL (trueop1),
2805 imode));
2806 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2809 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2810 we might be able to further simplify the AND with X and potentially
2811 remove the truncation altogether. */
2812 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2814 rtx x = XEXP (op0, 0);
2815 enum machine_mode xmode = GET_MODE (x);
2816 tem = simplify_gen_binary (AND, xmode, x,
2817 gen_int_mode (INTVAL (trueop1), xmode));
2818 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2821 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2822 if (GET_CODE (op0) == IOR
2823 && CONST_INT_P (trueop1)
2824 && CONST_INT_P (XEXP (op0, 1)))
2826 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2827 return simplify_gen_binary (IOR, mode,
2828 simplify_gen_binary (AND, mode,
2829 XEXP (op0, 0), op1),
2830 gen_int_mode (tmp, mode));
2833 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2834 insn (and may simplify more). */
2835 if (GET_CODE (op0) == XOR
2836 && rtx_equal_p (XEXP (op0, 0), op1)
2837 && ! side_effects_p (op1))
2838 return simplify_gen_binary (AND, mode,
2839 simplify_gen_unary (NOT, mode,
2840 XEXP (op0, 1), mode),
2841 op1);
2843 if (GET_CODE (op0) == XOR
2844 && rtx_equal_p (XEXP (op0, 1), op1)
2845 && ! side_effects_p (op1))
2846 return simplify_gen_binary (AND, mode,
2847 simplify_gen_unary (NOT, mode,
2848 XEXP (op0, 0), mode),
2849 op1);
2851 /* Similarly for (~(A ^ B)) & A. */
2852 if (GET_CODE (op0) == NOT
2853 && GET_CODE (XEXP (op0, 0)) == XOR
2854 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2855 && ! side_effects_p (op1))
2856 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2858 if (GET_CODE (op0) == NOT
2859 && GET_CODE (XEXP (op0, 0)) == XOR
2860 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2861 && ! side_effects_p (op1))
2862 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2864 /* Convert (A | B) & A to A. */
2865 if (GET_CODE (op0) == IOR
2866 && (rtx_equal_p (XEXP (op0, 0), op1)
2867 || rtx_equal_p (XEXP (op0, 1), op1))
2868 && ! side_effects_p (XEXP (op0, 0))
2869 && ! side_effects_p (XEXP (op0, 1)))
2870 return op1;
2872 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2873 ((A & N) + B) & M -> (A + B) & M
2874 Similarly if (N & M) == 0,
2875 ((A | N) + B) & M -> (A + B) & M
2876 and for - instead of + and/or ^ instead of |.
2877 Also, if (N & M) == 0, then
2878 (A +- N) & M -> A & M. */
2879 if (CONST_INT_P (trueop1)
2880 && HWI_COMPUTABLE_MODE_P (mode)
2881 && ~UINTVAL (trueop1)
2882 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2883 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2885 rtx pmop[2];
2886 int which;
2888 pmop[0] = XEXP (op0, 0);
2889 pmop[1] = XEXP (op0, 1);
2891 if (CONST_INT_P (pmop[1])
2892 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2893 return simplify_gen_binary (AND, mode, pmop[0], op1);
2895 for (which = 0; which < 2; which++)
2897 tem = pmop[which];
2898 switch (GET_CODE (tem))
2900 case AND:
2901 if (CONST_INT_P (XEXP (tem, 1))
2902 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2903 == UINTVAL (trueop1))
2904 pmop[which] = XEXP (tem, 0);
2905 break;
2906 case IOR:
2907 case XOR:
2908 if (CONST_INT_P (XEXP (tem, 1))
2909 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2910 pmop[which] = XEXP (tem, 0);
2911 break;
2912 default:
2913 break;
2917 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2919 tem = simplify_gen_binary (GET_CODE (op0), mode,
2920 pmop[0], pmop[1]);
2921 return simplify_gen_binary (code, mode, tem, op1);
2925 /* (and X (ior (not X) Y) -> (and X Y) */
2926 if (GET_CODE (op1) == IOR
2927 && GET_CODE (XEXP (op1, 0)) == NOT
2928 && op0 == XEXP (XEXP (op1, 0), 0))
2929 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2931 /* (and (ior (not X) Y) X) -> (and X Y) */
2932 if (GET_CODE (op0) == IOR
2933 && GET_CODE (XEXP (op0, 0)) == NOT
2934 && op1 == XEXP (XEXP (op0, 0), 0))
2935 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2937 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2938 if (tem)
2939 return tem;
2941 tem = simplify_associative_operation (code, mode, op0, op1);
2942 if (tem)
2943 return tem;
2944 break;
2946 case UDIV:
2947 /* 0/x is 0 (or x&0 if x has side-effects). */
2948 if (trueop0 == CONST0_RTX (mode))
2950 if (side_effects_p (op1))
2951 return simplify_gen_binary (AND, mode, op1, trueop0);
2952 return trueop0;
2954 /* x/1 is x. */
2955 if (trueop1 == CONST1_RTX (mode))
2957 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2958 if (tem)
2959 return tem;
2961 /* Convert divide by power of two into shift. */
2962 if (CONST_INT_P (trueop1)
2963 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2964 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2965 break;
2967 case DIV:
2968 /* Handle floating point and integers separately. */
2969 if (SCALAR_FLOAT_MODE_P (mode))
2971 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2972 safe for modes with NaNs, since 0.0 / 0.0 will then be
2973 NaN rather than 0.0. Nor is it safe for modes with signed
2974 zeros, since dividing 0 by a negative number gives -0.0 */
2975 if (trueop0 == CONST0_RTX (mode)
2976 && !HONOR_NANS (mode)
2977 && !HONOR_SIGNED_ZEROS (mode)
2978 && ! side_effects_p (op1))
2979 return op0;
2980 /* x/1.0 is x. */
2981 if (trueop1 == CONST1_RTX (mode)
2982 && !HONOR_SNANS (mode))
2983 return op0;
2985 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2986 && trueop1 != CONST0_RTX (mode))
2988 REAL_VALUE_TYPE d;
2989 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2991 /* x/-1.0 is -x. */
2992 if (REAL_VALUES_EQUAL (d, dconstm1)
2993 && !HONOR_SNANS (mode))
2994 return simplify_gen_unary (NEG, mode, op0, mode);
2996 /* Change FP division by a constant into multiplication.
2997 Only do this with -freciprocal-math. */
2998 if (flag_reciprocal_math
2999 && !REAL_VALUES_EQUAL (d, dconst0))
3001 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3002 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3003 return simplify_gen_binary (MULT, mode, op0, tem);
3007 else if (SCALAR_INT_MODE_P (mode))
3009 /* 0/x is 0 (or x&0 if x has side-effects). */
3010 if (trueop0 == CONST0_RTX (mode)
3011 && !cfun->can_throw_non_call_exceptions)
3013 if (side_effects_p (op1))
3014 return simplify_gen_binary (AND, mode, op1, trueop0);
3015 return trueop0;
3017 /* x/1 is x. */
3018 if (trueop1 == CONST1_RTX (mode))
3020 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3021 if (tem)
3022 return tem;
3024 /* x/-1 is -x. */
3025 if (trueop1 == constm1_rtx)
3027 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3028 if (x)
3029 return simplify_gen_unary (NEG, mode, x, mode);
3032 break;
3034 case UMOD:
3035 /* 0%x is 0 (or x&0 if x has side-effects). */
3036 if (trueop0 == CONST0_RTX (mode))
3038 if (side_effects_p (op1))
3039 return simplify_gen_binary (AND, mode, op1, trueop0);
3040 return trueop0;
3042 /* x%1 is 0 (of x&0 if x has side-effects). */
3043 if (trueop1 == CONST1_RTX (mode))
3045 if (side_effects_p (op0))
3046 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3047 return CONST0_RTX (mode);
3049 /* Implement modulus by power of two as AND. */
3050 if (CONST_INT_P (trueop1)
3051 && exact_log2 (UINTVAL (trueop1)) > 0)
3052 return simplify_gen_binary (AND, mode, op0,
3053 gen_int_mode (INTVAL (op1) - 1, mode));
3054 break;
3056 case MOD:
3057 /* 0%x is 0 (or x&0 if x has side-effects). */
3058 if (trueop0 == CONST0_RTX (mode))
3060 if (side_effects_p (op1))
3061 return simplify_gen_binary (AND, mode, op1, trueop0);
3062 return trueop0;
3064 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3065 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3067 if (side_effects_p (op0))
3068 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3069 return CONST0_RTX (mode);
3071 break;
3073 case ROTATERT:
3074 case ROTATE:
3075 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3076 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3077 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3078 amount instead. */
3079 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3080 if (CONST_INT_P (trueop1)
3081 && IN_RANGE (INTVAL (trueop1),
3082 GET_MODE_BITSIZE (mode) / 2 + (code == ROTATE),
3083 GET_MODE_BITSIZE (mode) - 1))
3084 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3085 mode, op0, GEN_INT (GET_MODE_BITSIZE (mode)
3086 - INTVAL (trueop1)));
3087 #endif
3088 /* FALLTHRU */
3089 case ASHIFTRT:
3090 if (trueop1 == CONST0_RTX (mode))
3091 return op0;
3092 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3093 return op0;
3094 /* Rotating ~0 always results in ~0. */
3095 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3096 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3097 && ! side_effects_p (op1))
3098 return op0;
3099 canonicalize_shift:
3100 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3102 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3103 if (val != INTVAL (op1))
3104 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3106 break;
3108 case ASHIFT:
3109 case SS_ASHIFT:
3110 case US_ASHIFT:
3111 if (trueop1 == CONST0_RTX (mode))
3112 return op0;
3113 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3114 return op0;
3115 goto canonicalize_shift;
3117 case LSHIFTRT:
3118 if (trueop1 == CONST0_RTX (mode))
3119 return op0;
3120 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3121 return op0;
3122 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3123 if (GET_CODE (op0) == CLZ
3124 && CONST_INT_P (trueop1)
3125 && STORE_FLAG_VALUE == 1
3126 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3128 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3129 unsigned HOST_WIDE_INT zero_val = 0;
3131 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3132 && zero_val == GET_MODE_PRECISION (imode)
3133 && INTVAL (trueop1) == exact_log2 (zero_val))
3134 return simplify_gen_relational (EQ, mode, imode,
3135 XEXP (op0, 0), const0_rtx);
3137 goto canonicalize_shift;
3139 case SMIN:
3140 if (width <= HOST_BITS_PER_WIDE_INT
3141 && mode_signbit_p (mode, trueop1)
3142 && ! side_effects_p (op0))
3143 return op1;
3144 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3145 return op0;
3146 tem = simplify_associative_operation (code, mode, op0, op1);
3147 if (tem)
3148 return tem;
3149 break;
3151 case SMAX:
3152 if (width <= HOST_BITS_PER_WIDE_INT
3153 && CONST_INT_P (trueop1)
3154 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3155 && ! side_effects_p (op0))
3156 return op1;
3157 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3158 return op0;
3159 tem = simplify_associative_operation (code, mode, op0, op1);
3160 if (tem)
3161 return tem;
3162 break;
3164 case UMIN:
3165 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3166 return op1;
3167 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3168 return op0;
3169 tem = simplify_associative_operation (code, mode, op0, op1);
3170 if (tem)
3171 return tem;
3172 break;
3174 case UMAX:
3175 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3176 return op1;
3177 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3178 return op0;
3179 tem = simplify_associative_operation (code, mode, op0, op1);
3180 if (tem)
3181 return tem;
3182 break;
3184 case SS_PLUS:
3185 case US_PLUS:
3186 case SS_MINUS:
3187 case US_MINUS:
3188 case SS_MULT:
3189 case US_MULT:
3190 case SS_DIV:
3191 case US_DIV:
3192 /* ??? There are simplifications that can be done. */
3193 return 0;
3195 case VEC_SELECT:
3196 if (!VECTOR_MODE_P (mode))
3198 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3199 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3200 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3201 gcc_assert (XVECLEN (trueop1, 0) == 1);
3202 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3204 if (GET_CODE (trueop0) == CONST_VECTOR)
3205 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3206 (trueop1, 0, 0)));
3208 /* Extract a scalar element from a nested VEC_SELECT expression
3209 (with optional nested VEC_CONCAT expression). Some targets
3210 (i386) extract scalar element from a vector using chain of
3211 nested VEC_SELECT expressions. When input operand is a memory
3212 operand, this operation can be simplified to a simple scalar
3213 load from an offseted memory address. */
3214 if (GET_CODE (trueop0) == VEC_SELECT)
3216 rtx op0 = XEXP (trueop0, 0);
3217 rtx op1 = XEXP (trueop0, 1);
3219 enum machine_mode opmode = GET_MODE (op0);
3220 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3221 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3223 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3224 int elem;
3226 rtvec vec;
3227 rtx tmp_op, tmp;
3229 gcc_assert (GET_CODE (op1) == PARALLEL);
3230 gcc_assert (i < n_elts);
3232 /* Select element, pointed by nested selector. */
3233 elem = INTVAL (XVECEXP (op1, 0, i));
3235 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3236 if (GET_CODE (op0) == VEC_CONCAT)
3238 rtx op00 = XEXP (op0, 0);
3239 rtx op01 = XEXP (op0, 1);
3241 enum machine_mode mode00, mode01;
3242 int n_elts00, n_elts01;
3244 mode00 = GET_MODE (op00);
3245 mode01 = GET_MODE (op01);
3247 /* Find out number of elements of each operand. */
3248 if (VECTOR_MODE_P (mode00))
3250 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3251 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3253 else
3254 n_elts00 = 1;
3256 if (VECTOR_MODE_P (mode01))
3258 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3259 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3261 else
3262 n_elts01 = 1;
3264 gcc_assert (n_elts == n_elts00 + n_elts01);
3266 /* Select correct operand of VEC_CONCAT
3267 and adjust selector. */
3268 if (elem < n_elts01)
3269 tmp_op = op00;
3270 else
3272 tmp_op = op01;
3273 elem -= n_elts00;
3276 else
3277 tmp_op = op0;
3279 vec = rtvec_alloc (1);
3280 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3282 tmp = gen_rtx_fmt_ee (code, mode,
3283 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3284 return tmp;
3286 if (GET_CODE (trueop0) == VEC_DUPLICATE
3287 && GET_MODE (XEXP (trueop0, 0)) == mode)
3288 return XEXP (trueop0, 0);
3290 else
3292 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3293 gcc_assert (GET_MODE_INNER (mode)
3294 == GET_MODE_INNER (GET_MODE (trueop0)));
3295 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3297 if (GET_CODE (trueop0) == CONST_VECTOR)
3299 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3300 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3301 rtvec v = rtvec_alloc (n_elts);
3302 unsigned int i;
3304 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3305 for (i = 0; i < n_elts; i++)
3307 rtx x = XVECEXP (trueop1, 0, i);
3309 gcc_assert (CONST_INT_P (x));
3310 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3311 INTVAL (x));
3314 return gen_rtx_CONST_VECTOR (mode, v);
3317 /* Recognize the identity. */
3318 if (GET_MODE (trueop0) == mode)
3320 bool maybe_ident = true;
3321 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3323 rtx j = XVECEXP (trueop1, 0, i);
3324 if (!CONST_INT_P (j) || INTVAL (j) != i)
3326 maybe_ident = false;
3327 break;
3330 if (maybe_ident)
3331 return trueop0;
3334 /* If we build {a,b} then permute it, build the result directly. */
3335 if (XVECLEN (trueop1, 0) == 2
3336 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3337 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3338 && GET_CODE (trueop0) == VEC_CONCAT
3339 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3340 && GET_MODE (XEXP (trueop0, 0)) == mode
3341 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3342 && GET_MODE (XEXP (trueop0, 1)) == mode)
3344 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3345 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3346 rtx subop0, subop1;
3348 gcc_assert (i0 < 4 && i1 < 4);
3349 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3350 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3352 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3355 if (XVECLEN (trueop1, 0) == 2
3356 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3357 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3358 && GET_CODE (trueop0) == VEC_CONCAT
3359 && GET_MODE (trueop0) == mode)
3361 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3362 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3363 rtx subop0, subop1;
3365 gcc_assert (i0 < 2 && i1 < 2);
3366 subop0 = XEXP (trueop0, i0);
3367 subop1 = XEXP (trueop0, i1);
3369 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3372 /* If we select one half of a vec_concat, return that. */
3373 if (GET_CODE (trueop0) == VEC_CONCAT
3374 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3376 rtx subop0 = XEXP (trueop0, 0);
3377 rtx subop1 = XEXP (trueop0, 1);
3378 enum machine_mode mode0 = GET_MODE (subop0);
3379 enum machine_mode mode1 = GET_MODE (subop1);
3380 int li = GET_MODE_SIZE (GET_MODE_INNER (mode0));
3381 int l0 = GET_MODE_SIZE (mode0) / li;
3382 int l1 = GET_MODE_SIZE (mode1) / li;
3383 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3384 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3386 bool success = true;
3387 for (int i = 1; i < l0; ++i)
3389 rtx j = XVECEXP (trueop1, 0, i);
3390 if (!CONST_INT_P (j) || INTVAL (j) != i)
3392 success = false;
3393 break;
3396 if (success)
3397 return subop0;
3399 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3401 bool success = true;
3402 for (int i = 1; i < l1; ++i)
3404 rtx j = XVECEXP (trueop1, 0, i);
3405 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3407 success = false;
3408 break;
3411 if (success)
3412 return subop1;
3417 if (XVECLEN (trueop1, 0) == 1
3418 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3419 && GET_CODE (trueop0) == VEC_CONCAT)
3421 rtx vec = trueop0;
3422 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3424 /* Try to find the element in the VEC_CONCAT. */
3425 while (GET_MODE (vec) != mode
3426 && GET_CODE (vec) == VEC_CONCAT)
3428 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3429 if (offset < vec_size)
3430 vec = XEXP (vec, 0);
3431 else
3433 offset -= vec_size;
3434 vec = XEXP (vec, 1);
3436 vec = avoid_constant_pool_reference (vec);
3439 if (GET_MODE (vec) == mode)
3440 return vec;
3443 /* If we select elements in a vec_merge that all come from the same
3444 operand, select from that operand directly. */
3445 if (GET_CODE (op0) == VEC_MERGE)
3447 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3448 if (CONST_INT_P (trueop02))
3450 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3451 bool all_operand0 = true;
3452 bool all_operand1 = true;
3453 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3455 rtx j = XVECEXP (trueop1, 0, i);
3456 if (sel & (1 << UINTVAL (j)))
3457 all_operand1 = false;
3458 else
3459 all_operand0 = false;
3461 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3462 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3463 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3464 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3468 /* If we have two nested selects that are inverses of each
3469 other, replace them with the source operand. */
3470 if (GET_CODE (trueop0) == VEC_SELECT
3471 && GET_MODE (XEXP (trueop0, 0)) == mode)
3473 rtx op0_subop1 = XEXP (trueop0, 1);
3474 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3475 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3477 /* Apply the outer ordering vector to the inner one. (The inner
3478 ordering vector is expressly permitted to be of a different
3479 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3480 then the two VEC_SELECTs cancel. */
3481 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3483 rtx x = XVECEXP (trueop1, 0, i);
3484 if (!CONST_INT_P (x))
3485 return 0;
3486 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3487 if (!CONST_INT_P (y) || i != INTVAL (y))
3488 return 0;
3490 return XEXP (trueop0, 0);
3493 return 0;
3494 case VEC_CONCAT:
3496 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3497 ? GET_MODE (trueop0)
3498 : GET_MODE_INNER (mode));
3499 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3500 ? GET_MODE (trueop1)
3501 : GET_MODE_INNER (mode));
3503 gcc_assert (VECTOR_MODE_P (mode));
3504 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3505 == GET_MODE_SIZE (mode));
3507 if (VECTOR_MODE_P (op0_mode))
3508 gcc_assert (GET_MODE_INNER (mode)
3509 == GET_MODE_INNER (op0_mode));
3510 else
3511 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3513 if (VECTOR_MODE_P (op1_mode))
3514 gcc_assert (GET_MODE_INNER (mode)
3515 == GET_MODE_INNER (op1_mode));
3516 else
3517 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3519 if ((GET_CODE (trueop0) == CONST_VECTOR
3520 || CONST_SCALAR_INT_P (trueop0)
3521 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3522 && (GET_CODE (trueop1) == CONST_VECTOR
3523 || CONST_SCALAR_INT_P (trueop1)
3524 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3526 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3527 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3528 rtvec v = rtvec_alloc (n_elts);
3529 unsigned int i;
3530 unsigned in_n_elts = 1;
3532 if (VECTOR_MODE_P (op0_mode))
3533 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3534 for (i = 0; i < n_elts; i++)
3536 if (i < in_n_elts)
3538 if (!VECTOR_MODE_P (op0_mode))
3539 RTVEC_ELT (v, i) = trueop0;
3540 else
3541 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3543 else
3545 if (!VECTOR_MODE_P (op1_mode))
3546 RTVEC_ELT (v, i) = trueop1;
3547 else
3548 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3549 i - in_n_elts);
3553 return gen_rtx_CONST_VECTOR (mode, v);
3556 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3557 Restrict the transformation to avoid generating a VEC_SELECT with a
3558 mode unrelated to its operand. */
3559 if (GET_CODE (trueop0) == VEC_SELECT
3560 && GET_CODE (trueop1) == VEC_SELECT
3561 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3562 && GET_MODE (XEXP (trueop0, 0)) == mode)
3564 rtx par0 = XEXP (trueop0, 1);
3565 rtx par1 = XEXP (trueop1, 1);
3566 int len0 = XVECLEN (par0, 0);
3567 int len1 = XVECLEN (par1, 0);
3568 rtvec vec = rtvec_alloc (len0 + len1);
3569 for (int i = 0; i < len0; i++)
3570 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3571 for (int i = 0; i < len1; i++)
3572 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3573 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3574 gen_rtx_PARALLEL (VOIDmode, vec));
3577 return 0;
3579 default:
3580 gcc_unreachable ();
3583 return 0;
3587 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3588 rtx op0, rtx op1)
3590 unsigned int width = GET_MODE_PRECISION (mode);
3592 if (VECTOR_MODE_P (mode)
3593 && code != VEC_CONCAT
3594 && GET_CODE (op0) == CONST_VECTOR
3595 && GET_CODE (op1) == CONST_VECTOR)
3597 unsigned n_elts = GET_MODE_NUNITS (mode);
3598 enum machine_mode op0mode = GET_MODE (op0);
3599 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3600 enum machine_mode op1mode = GET_MODE (op1);
3601 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3602 rtvec v = rtvec_alloc (n_elts);
3603 unsigned int i;
3605 gcc_assert (op0_n_elts == n_elts);
3606 gcc_assert (op1_n_elts == n_elts);
3607 for (i = 0; i < n_elts; i++)
3609 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3610 CONST_VECTOR_ELT (op0, i),
3611 CONST_VECTOR_ELT (op1, i));
3612 if (!x)
3613 return 0;
3614 RTVEC_ELT (v, i) = x;
3617 return gen_rtx_CONST_VECTOR (mode, v);
3620 if (VECTOR_MODE_P (mode)
3621 && code == VEC_CONCAT
3622 && (CONST_SCALAR_INT_P (op0)
3623 || GET_CODE (op0) == CONST_FIXED
3624 || CONST_DOUBLE_AS_FLOAT_P (op0))
3625 && (CONST_SCALAR_INT_P (op1)
3626 || CONST_DOUBLE_AS_FLOAT_P (op1)
3627 || GET_CODE (op1) == CONST_FIXED))
3629 unsigned n_elts = GET_MODE_NUNITS (mode);
3630 rtvec v = rtvec_alloc (n_elts);
3632 gcc_assert (n_elts >= 2);
3633 if (n_elts == 2)
3635 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3636 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3638 RTVEC_ELT (v, 0) = op0;
3639 RTVEC_ELT (v, 1) = op1;
3641 else
3643 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3644 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3645 unsigned i;
3647 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3648 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3649 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3651 for (i = 0; i < op0_n_elts; ++i)
3652 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3653 for (i = 0; i < op1_n_elts; ++i)
3654 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3657 return gen_rtx_CONST_VECTOR (mode, v);
3660 if (SCALAR_FLOAT_MODE_P (mode)
3661 && CONST_DOUBLE_AS_FLOAT_P (op0)
3662 && CONST_DOUBLE_AS_FLOAT_P (op1)
3663 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3665 if (code == AND
3666 || code == IOR
3667 || code == XOR)
3669 long tmp0[4];
3670 long tmp1[4];
3671 REAL_VALUE_TYPE r;
3672 int i;
3674 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3675 GET_MODE (op0));
3676 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3677 GET_MODE (op1));
3678 for (i = 0; i < 4; i++)
3680 switch (code)
3682 case AND:
3683 tmp0[i] &= tmp1[i];
3684 break;
3685 case IOR:
3686 tmp0[i] |= tmp1[i];
3687 break;
3688 case XOR:
3689 tmp0[i] ^= tmp1[i];
3690 break;
3691 default:
3692 gcc_unreachable ();
3695 real_from_target (&r, tmp0, mode);
3696 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3698 else
3700 REAL_VALUE_TYPE f0, f1, value, result;
3701 bool inexact;
3703 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3704 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3705 real_convert (&f0, mode, &f0);
3706 real_convert (&f1, mode, &f1);
3708 if (HONOR_SNANS (mode)
3709 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3710 return 0;
3712 if (code == DIV
3713 && REAL_VALUES_EQUAL (f1, dconst0)
3714 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3715 return 0;
3717 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3718 && flag_trapping_math
3719 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3721 int s0 = REAL_VALUE_NEGATIVE (f0);
3722 int s1 = REAL_VALUE_NEGATIVE (f1);
3724 switch (code)
3726 case PLUS:
3727 /* Inf + -Inf = NaN plus exception. */
3728 if (s0 != s1)
3729 return 0;
3730 break;
3731 case MINUS:
3732 /* Inf - Inf = NaN plus exception. */
3733 if (s0 == s1)
3734 return 0;
3735 break;
3736 case DIV:
3737 /* Inf / Inf = NaN plus exception. */
3738 return 0;
3739 default:
3740 break;
3744 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3745 && flag_trapping_math
3746 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3747 || (REAL_VALUE_ISINF (f1)
3748 && REAL_VALUES_EQUAL (f0, dconst0))))
3749 /* Inf * 0 = NaN plus exception. */
3750 return 0;
3752 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3753 &f0, &f1);
3754 real_convert (&result, mode, &value);
3756 /* Don't constant fold this floating point operation if
3757 the result has overflowed and flag_trapping_math. */
3759 if (flag_trapping_math
3760 && MODE_HAS_INFINITIES (mode)
3761 && REAL_VALUE_ISINF (result)
3762 && !REAL_VALUE_ISINF (f0)
3763 && !REAL_VALUE_ISINF (f1))
3764 /* Overflow plus exception. */
3765 return 0;
3767 /* Don't constant fold this floating point operation if the
3768 result may dependent upon the run-time rounding mode and
3769 flag_rounding_math is set, or if GCC's software emulation
3770 is unable to accurately represent the result. */
3772 if ((flag_rounding_math
3773 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3774 && (inexact || !real_identical (&result, &value)))
3775 return NULL_RTX;
3777 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3781 /* We can fold some multi-word operations. */
3782 if (GET_MODE_CLASS (mode) == MODE_INT
3783 && CONST_SCALAR_INT_P (op0)
3784 && CONST_SCALAR_INT_P (op1))
3786 wide_int result;
3787 bool overflow;
3788 rtx_mode_t pop0 = std::make_pair (op0, mode);
3789 rtx_mode_t pop1 = std::make_pair (op1, mode);
3791 #if TARGET_SUPPORTS_WIDE_INT == 0
3792 /* This assert keeps the simplification from producing a result
3793 that cannot be represented in a CONST_DOUBLE but a lot of
3794 upstream callers expect that this function never fails to
3795 simplify something and so you if you added this to the test
3796 above the code would die later anyway. If this assert
3797 happens, you just need to make the port support wide int. */
3798 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3799 #endif
3800 switch (code)
3802 case MINUS:
3803 result = wi::sub (pop0, pop1);
3804 break;
3806 case PLUS:
3807 result = wi::add (pop0, pop1);
3808 break;
3810 case MULT:
3811 result = wi::mul (pop0, pop1);
3812 break;
3814 case DIV:
3815 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3816 if (overflow)
3817 return NULL_RTX;
3818 break;
3820 case MOD:
3821 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3822 if (overflow)
3823 return NULL_RTX;
3824 break;
3826 case UDIV:
3827 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3828 if (overflow)
3829 return NULL_RTX;
3830 break;
3832 case UMOD:
3833 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3834 if (overflow)
3835 return NULL_RTX;
3836 break;
3838 case AND:
3839 result = wi::bit_and (pop0, pop1);
3840 break;
3842 case IOR:
3843 result = wi::bit_or (pop0, pop1);
3844 break;
3846 case XOR:
3847 result = wi::bit_xor (pop0, pop1);
3848 break;
3850 case SMIN:
3851 result = wi::smin (pop0, pop1);
3852 break;
3854 case SMAX:
3855 result = wi::smax (pop0, pop1);
3856 break;
3858 case UMIN:
3859 result = wi::umin (pop0, pop1);
3860 break;
3862 case UMAX:
3863 result = wi::umax (pop0, pop1);
3864 break;
3866 case LSHIFTRT:
3867 case ASHIFTRT:
3868 case ASHIFT:
3870 wide_int wop1 = pop1;
3871 if (SHIFT_COUNT_TRUNCATED)
3872 wop1 = wi::umod_trunc (wop1, width);
3873 else if (wi::geu_p (wop1, width))
3874 return NULL_RTX;
3876 switch (code)
3878 case LSHIFTRT:
3879 result = wi::lrshift (pop0, wop1);
3880 break;
3882 case ASHIFTRT:
3883 result = wi::arshift (pop0, wop1);
3884 break;
3886 case ASHIFT:
3887 result = wi::lshift (pop0, wop1);
3888 break;
3890 default:
3891 gcc_unreachable ();
3893 break;
3895 case ROTATE:
3896 case ROTATERT:
3898 if (wi::neg_p (pop1))
3899 return NULL_RTX;
3901 switch (code)
3903 case ROTATE:
3904 result = wi::lrotate (pop0, pop1);
3905 break;
3907 case ROTATERT:
3908 result = wi::rrotate (pop0, pop1);
3909 break;
3911 default:
3912 gcc_unreachable ();
3914 break;
3916 default:
3917 return NULL_RTX;
3919 return immed_wide_int_const (result, mode);
3922 return NULL_RTX;
3927 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3928 PLUS or MINUS.
3930 Rather than test for specific case, we do this by a brute-force method
3931 and do all possible simplifications until no more changes occur. Then
3932 we rebuild the operation. */
3934 struct simplify_plus_minus_op_data
3936 rtx op;
3937 short neg;
3940 static bool
3941 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3943 int result;
3945 result = (commutative_operand_precedence (y)
3946 - commutative_operand_precedence (x));
3947 if (result)
3948 return result > 0;
3950 /* Group together equal REGs to do more simplification. */
3951 if (REG_P (x) && REG_P (y))
3952 return REGNO (x) > REGNO (y);
3953 else
3954 return false;
3957 static rtx
3958 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3959 rtx op1)
3961 struct simplify_plus_minus_op_data ops[8];
3962 rtx result, tem;
3963 int n_ops = 2, input_ops = 2;
3964 int changed, n_constants = 0, canonicalized = 0;
3965 int i, j;
3967 memset (ops, 0, sizeof ops);
3969 /* Set up the two operands and then expand them until nothing has been
3970 changed. If we run out of room in our array, give up; this should
3971 almost never happen. */
3973 ops[0].op = op0;
3974 ops[0].neg = 0;
3975 ops[1].op = op1;
3976 ops[1].neg = (code == MINUS);
3980 changed = 0;
3982 for (i = 0; i < n_ops; i++)
3984 rtx this_op = ops[i].op;
3985 int this_neg = ops[i].neg;
3986 enum rtx_code this_code = GET_CODE (this_op);
3988 switch (this_code)
3990 case PLUS:
3991 case MINUS:
3992 if (n_ops == 7)
3993 return NULL_RTX;
3995 ops[n_ops].op = XEXP (this_op, 1);
3996 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3997 n_ops++;
3999 ops[i].op = XEXP (this_op, 0);
4000 input_ops++;
4001 changed = 1;
4002 canonicalized |= this_neg;
4003 break;
4005 case NEG:
4006 ops[i].op = XEXP (this_op, 0);
4007 ops[i].neg = ! this_neg;
4008 changed = 1;
4009 canonicalized = 1;
4010 break;
4012 case CONST:
4013 if (n_ops < 7
4014 && GET_CODE (XEXP (this_op, 0)) == PLUS
4015 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4016 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4018 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4019 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4020 ops[n_ops].neg = this_neg;
4021 n_ops++;
4022 changed = 1;
4023 canonicalized = 1;
4025 break;
4027 case NOT:
4028 /* ~a -> (-a - 1) */
4029 if (n_ops != 7)
4031 ops[n_ops].op = CONSTM1_RTX (mode);
4032 ops[n_ops++].neg = this_neg;
4033 ops[i].op = XEXP (this_op, 0);
4034 ops[i].neg = !this_neg;
4035 changed = 1;
4036 canonicalized = 1;
4038 break;
4040 case CONST_INT:
4041 n_constants++;
4042 if (this_neg)
4044 ops[i].op = neg_const_int (mode, this_op);
4045 ops[i].neg = 0;
4046 changed = 1;
4047 canonicalized = 1;
4049 break;
4051 default:
4052 break;
4056 while (changed);
4058 if (n_constants > 1)
4059 canonicalized = 1;
4061 gcc_assert (n_ops >= 2);
4063 /* If we only have two operands, we can avoid the loops. */
4064 if (n_ops == 2)
4066 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4067 rtx lhs, rhs;
4069 /* Get the two operands. Be careful with the order, especially for
4070 the cases where code == MINUS. */
4071 if (ops[0].neg && ops[1].neg)
4073 lhs = gen_rtx_NEG (mode, ops[0].op);
4074 rhs = ops[1].op;
4076 else if (ops[0].neg)
4078 lhs = ops[1].op;
4079 rhs = ops[0].op;
4081 else
4083 lhs = ops[0].op;
4084 rhs = ops[1].op;
4087 return simplify_const_binary_operation (code, mode, lhs, rhs);
4090 /* Now simplify each pair of operands until nothing changes. */
4093 /* Insertion sort is good enough for an eight-element array. */
4094 for (i = 1; i < n_ops; i++)
4096 struct simplify_plus_minus_op_data save;
4097 j = i - 1;
4098 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4099 continue;
4101 canonicalized = 1;
4102 save = ops[i];
4104 ops[j + 1] = ops[j];
4105 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4106 ops[j + 1] = save;
4109 changed = 0;
4110 for (i = n_ops - 1; i > 0; i--)
4111 for (j = i - 1; j >= 0; j--)
4113 rtx lhs = ops[j].op, rhs = ops[i].op;
4114 int lneg = ops[j].neg, rneg = ops[i].neg;
4116 if (lhs != 0 && rhs != 0)
4118 enum rtx_code ncode = PLUS;
4120 if (lneg != rneg)
4122 ncode = MINUS;
4123 if (lneg)
4124 tem = lhs, lhs = rhs, rhs = tem;
4126 else if (swap_commutative_operands_p (lhs, rhs))
4127 tem = lhs, lhs = rhs, rhs = tem;
4129 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4130 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4132 rtx tem_lhs, tem_rhs;
4134 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4135 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4136 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4138 if (tem && !CONSTANT_P (tem))
4139 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4141 else
4142 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4144 /* Reject "simplifications" that just wrap the two
4145 arguments in a CONST. Failure to do so can result
4146 in infinite recursion with simplify_binary_operation
4147 when it calls us to simplify CONST operations. */
4148 if (tem
4149 && ! (GET_CODE (tem) == CONST
4150 && GET_CODE (XEXP (tem, 0)) == ncode
4151 && XEXP (XEXP (tem, 0), 0) == lhs
4152 && XEXP (XEXP (tem, 0), 1) == rhs))
4154 lneg &= rneg;
4155 if (GET_CODE (tem) == NEG)
4156 tem = XEXP (tem, 0), lneg = !lneg;
4157 if (CONST_INT_P (tem) && lneg)
4158 tem = neg_const_int (mode, tem), lneg = 0;
4160 ops[i].op = tem;
4161 ops[i].neg = lneg;
4162 ops[j].op = NULL_RTX;
4163 changed = 1;
4164 canonicalized = 1;
4169 /* If nothing changed, fail. */
4170 if (!canonicalized)
4171 return NULL_RTX;
4173 /* Pack all the operands to the lower-numbered entries. */
4174 for (i = 0, j = 0; j < n_ops; j++)
4175 if (ops[j].op)
4177 ops[i] = ops[j];
4178 i++;
4180 n_ops = i;
4182 while (changed);
4184 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4185 if (n_ops == 2
4186 && CONST_INT_P (ops[1].op)
4187 && CONSTANT_P (ops[0].op)
4188 && ops[0].neg)
4189 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4191 /* We suppressed creation of trivial CONST expressions in the
4192 combination loop to avoid recursion. Create one manually now.
4193 The combination loop should have ensured that there is exactly
4194 one CONST_INT, and the sort will have ensured that it is last
4195 in the array and that any other constant will be next-to-last. */
4197 if (n_ops > 1
4198 && CONST_INT_P (ops[n_ops - 1].op)
4199 && CONSTANT_P (ops[n_ops - 2].op))
4201 rtx value = ops[n_ops - 1].op;
4202 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4203 value = neg_const_int (mode, value);
4204 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4205 INTVAL (value));
4206 n_ops--;
4209 /* Put a non-negated operand first, if possible. */
4211 for (i = 0; i < n_ops && ops[i].neg; i++)
4212 continue;
4213 if (i == n_ops)
4214 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4215 else if (i != 0)
4217 tem = ops[0].op;
4218 ops[0] = ops[i];
4219 ops[i].op = tem;
4220 ops[i].neg = 1;
4223 /* Now make the result by performing the requested operations. */
4224 result = ops[0].op;
4225 for (i = 1; i < n_ops; i++)
4226 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4227 mode, result, ops[i].op);
4229 return result;
4232 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4233 static bool
4234 plus_minus_operand_p (const_rtx x)
4236 return GET_CODE (x) == PLUS
4237 || GET_CODE (x) == MINUS
4238 || (GET_CODE (x) == CONST
4239 && GET_CODE (XEXP (x, 0)) == PLUS
4240 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4241 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4244 /* Like simplify_binary_operation except used for relational operators.
4245 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4246 not also be VOIDmode.
4248 CMP_MODE specifies in which mode the comparison is done in, so it is
4249 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4250 the operands or, if both are VOIDmode, the operands are compared in
4251 "infinite precision". */
4253 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4254 enum machine_mode cmp_mode, rtx op0, rtx op1)
4256 rtx tem, trueop0, trueop1;
4258 if (cmp_mode == VOIDmode)
4259 cmp_mode = GET_MODE (op0);
4260 if (cmp_mode == VOIDmode)
4261 cmp_mode = GET_MODE (op1);
4263 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4264 if (tem)
4266 if (SCALAR_FLOAT_MODE_P (mode))
4268 if (tem == const0_rtx)
4269 return CONST0_RTX (mode);
4270 #ifdef FLOAT_STORE_FLAG_VALUE
4272 REAL_VALUE_TYPE val;
4273 val = FLOAT_STORE_FLAG_VALUE (mode);
4274 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4276 #else
4277 return NULL_RTX;
4278 #endif
4280 if (VECTOR_MODE_P (mode))
4282 if (tem == const0_rtx)
4283 return CONST0_RTX (mode);
4284 #ifdef VECTOR_STORE_FLAG_VALUE
4286 int i, units;
4287 rtvec v;
4289 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4290 if (val == NULL_RTX)
4291 return NULL_RTX;
4292 if (val == const1_rtx)
4293 return CONST1_RTX (mode);
4295 units = GET_MODE_NUNITS (mode);
4296 v = rtvec_alloc (units);
4297 for (i = 0; i < units; i++)
4298 RTVEC_ELT (v, i) = val;
4299 return gen_rtx_raw_CONST_VECTOR (mode, v);
4301 #else
4302 return NULL_RTX;
4303 #endif
4306 return tem;
4309 /* For the following tests, ensure const0_rtx is op1. */
4310 if (swap_commutative_operands_p (op0, op1)
4311 || (op0 == const0_rtx && op1 != const0_rtx))
4312 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4314 /* If op0 is a compare, extract the comparison arguments from it. */
4315 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4316 return simplify_gen_relational (code, mode, VOIDmode,
4317 XEXP (op0, 0), XEXP (op0, 1));
4319 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4320 || CC0_P (op0))
4321 return NULL_RTX;
4323 trueop0 = avoid_constant_pool_reference (op0);
4324 trueop1 = avoid_constant_pool_reference (op1);
4325 return simplify_relational_operation_1 (code, mode, cmp_mode,
4326 trueop0, trueop1);
4329 /* This part of simplify_relational_operation is only used when CMP_MODE
4330 is not in class MODE_CC (i.e. it is a real comparison).
4332 MODE is the mode of the result, while CMP_MODE specifies in which
4333 mode the comparison is done in, so it is the mode of the operands. */
4335 static rtx
4336 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4337 enum machine_mode cmp_mode, rtx op0, rtx op1)
4339 enum rtx_code op0code = GET_CODE (op0);
4341 if (op1 == const0_rtx && COMPARISON_P (op0))
4343 /* If op0 is a comparison, extract the comparison arguments
4344 from it. */
4345 if (code == NE)
4347 if (GET_MODE (op0) == mode)
4348 return simplify_rtx (op0);
4349 else
4350 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4351 XEXP (op0, 0), XEXP (op0, 1));
4353 else if (code == EQ)
4355 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4356 if (new_code != UNKNOWN)
4357 return simplify_gen_relational (new_code, mode, VOIDmode,
4358 XEXP (op0, 0), XEXP (op0, 1));
4362 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4363 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4364 if ((code == LTU || code == GEU)
4365 && GET_CODE (op0) == PLUS
4366 && CONST_INT_P (XEXP (op0, 1))
4367 && (rtx_equal_p (op1, XEXP (op0, 0))
4368 || rtx_equal_p (op1, XEXP (op0, 1)))
4369 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4370 && XEXP (op0, 1) != const0_rtx)
4372 rtx new_cmp
4373 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4374 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4375 cmp_mode, XEXP (op0, 0), new_cmp);
4378 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4379 if ((code == LTU || code == GEU)
4380 && GET_CODE (op0) == PLUS
4381 && rtx_equal_p (op1, XEXP (op0, 1))
4382 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4383 && !rtx_equal_p (op1, XEXP (op0, 0)))
4384 return simplify_gen_relational (code, mode, cmp_mode, op0,
4385 copy_rtx (XEXP (op0, 0)));
4387 if (op1 == const0_rtx)
4389 /* Canonicalize (GTU x 0) as (NE x 0). */
4390 if (code == GTU)
4391 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4392 /* Canonicalize (LEU x 0) as (EQ x 0). */
4393 if (code == LEU)
4394 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4396 else if (op1 == const1_rtx)
4398 switch (code)
4400 case GE:
4401 /* Canonicalize (GE x 1) as (GT x 0). */
4402 return simplify_gen_relational (GT, mode, cmp_mode,
4403 op0, const0_rtx);
4404 case GEU:
4405 /* Canonicalize (GEU x 1) as (NE x 0). */
4406 return simplify_gen_relational (NE, mode, cmp_mode,
4407 op0, const0_rtx);
4408 case LT:
4409 /* Canonicalize (LT x 1) as (LE x 0). */
4410 return simplify_gen_relational (LE, mode, cmp_mode,
4411 op0, const0_rtx);
4412 case LTU:
4413 /* Canonicalize (LTU x 1) as (EQ x 0). */
4414 return simplify_gen_relational (EQ, mode, cmp_mode,
4415 op0, const0_rtx);
4416 default:
4417 break;
4420 else if (op1 == constm1_rtx)
4422 /* Canonicalize (LE x -1) as (LT x 0). */
4423 if (code == LE)
4424 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4425 /* Canonicalize (GT x -1) as (GE x 0). */
4426 if (code == GT)
4427 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4430 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4431 if ((code == EQ || code == NE)
4432 && (op0code == PLUS || op0code == MINUS)
4433 && CONSTANT_P (op1)
4434 && CONSTANT_P (XEXP (op0, 1))
4435 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4437 rtx x = XEXP (op0, 0);
4438 rtx c = XEXP (op0, 1);
4439 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4440 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4442 /* Detect an infinite recursive condition, where we oscillate at this
4443 simplification case between:
4444 A + B == C <---> C - B == A,
4445 where A, B, and C are all constants with non-simplifiable expressions,
4446 usually SYMBOL_REFs. */
4447 if (GET_CODE (tem) == invcode
4448 && CONSTANT_P (x)
4449 && rtx_equal_p (c, XEXP (tem, 1)))
4450 return NULL_RTX;
4452 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4455 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4456 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4457 if (code == NE
4458 && op1 == const0_rtx
4459 && GET_MODE_CLASS (mode) == MODE_INT
4460 && cmp_mode != VOIDmode
4461 /* ??? Work-around BImode bugs in the ia64 backend. */
4462 && mode != BImode
4463 && cmp_mode != BImode
4464 && nonzero_bits (op0, cmp_mode) == 1
4465 && STORE_FLAG_VALUE == 1)
4466 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4467 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4468 : lowpart_subreg (mode, op0, cmp_mode);
4470 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4471 if ((code == EQ || code == NE)
4472 && op1 == const0_rtx
4473 && op0code == XOR)
4474 return simplify_gen_relational (code, mode, cmp_mode,
4475 XEXP (op0, 0), XEXP (op0, 1));
4477 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4478 if ((code == EQ || code == NE)
4479 && op0code == XOR
4480 && rtx_equal_p (XEXP (op0, 0), op1)
4481 && !side_effects_p (XEXP (op0, 0)))
4482 return simplify_gen_relational (code, mode, cmp_mode,
4483 XEXP (op0, 1), const0_rtx);
4485 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4486 if ((code == EQ || code == NE)
4487 && op0code == XOR
4488 && rtx_equal_p (XEXP (op0, 1), op1)
4489 && !side_effects_p (XEXP (op0, 1)))
4490 return simplify_gen_relational (code, mode, cmp_mode,
4491 XEXP (op0, 0), const0_rtx);
4493 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4494 if ((code == EQ || code == NE)
4495 && op0code == XOR
4496 && CONST_SCALAR_INT_P (op1)
4497 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4498 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4499 simplify_gen_binary (XOR, cmp_mode,
4500 XEXP (op0, 1), op1));
4502 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4503 if ((code == EQ || code == NE)
4504 && GET_CODE (op0) == BSWAP
4505 && CONST_SCALAR_INT_P (op1))
4506 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4507 simplify_gen_unary (BSWAP, cmp_mode,
4508 op1, cmp_mode));
4510 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4511 if ((code == EQ || code == NE)
4512 && GET_CODE (op0) == BSWAP
4513 && GET_CODE (op1) == BSWAP)
4514 return simplify_gen_relational (code, mode, cmp_mode,
4515 XEXP (op0, 0), XEXP (op1, 0));
4517 if (op0code == POPCOUNT && op1 == const0_rtx)
4518 switch (code)
4520 case EQ:
4521 case LE:
4522 case LEU:
4523 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4524 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4525 XEXP (op0, 0), const0_rtx);
4527 case NE:
4528 case GT:
4529 case GTU:
4530 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4531 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4532 XEXP (op0, 0), const0_rtx);
4534 default:
4535 break;
4538 return NULL_RTX;
4541 enum
4543 CMP_EQ = 1,
4544 CMP_LT = 2,
4545 CMP_GT = 4,
4546 CMP_LTU = 8,
4547 CMP_GTU = 16
4551 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4552 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4553 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4554 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4555 For floating-point comparisons, assume that the operands were ordered. */
4557 static rtx
4558 comparison_result (enum rtx_code code, int known_results)
4560 switch (code)
4562 case EQ:
4563 case UNEQ:
4564 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4565 case NE:
4566 case LTGT:
4567 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4569 case LT:
4570 case UNLT:
4571 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4572 case GE:
4573 case UNGE:
4574 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4576 case GT:
4577 case UNGT:
4578 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4579 case LE:
4580 case UNLE:
4581 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4583 case LTU:
4584 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4585 case GEU:
4586 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4588 case GTU:
4589 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4590 case LEU:
4591 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4593 case ORDERED:
4594 return const_true_rtx;
4595 case UNORDERED:
4596 return const0_rtx;
4597 default:
4598 gcc_unreachable ();
4602 /* Check if the given comparison (done in the given MODE) is actually
4603 a tautology or a contradiction. If the mode is VOID_mode, the
4604 comparison is done in "infinite precision". If no simplification
4605 is possible, this function returns zero. Otherwise, it returns
4606 either const_true_rtx or const0_rtx. */
4609 simplify_const_relational_operation (enum rtx_code code,
4610 enum machine_mode mode,
4611 rtx op0, rtx op1)
4613 rtx tem;
4614 rtx trueop0;
4615 rtx trueop1;
4617 gcc_assert (mode != VOIDmode
4618 || (GET_MODE (op0) == VOIDmode
4619 && GET_MODE (op1) == VOIDmode));
4621 /* If op0 is a compare, extract the comparison arguments from it. */
4622 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4624 op1 = XEXP (op0, 1);
4625 op0 = XEXP (op0, 0);
4627 if (GET_MODE (op0) != VOIDmode)
4628 mode = GET_MODE (op0);
4629 else if (GET_MODE (op1) != VOIDmode)
4630 mode = GET_MODE (op1);
4631 else
4632 return 0;
4635 /* We can't simplify MODE_CC values since we don't know what the
4636 actual comparison is. */
4637 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4638 return 0;
4640 /* Make sure the constant is second. */
4641 if (swap_commutative_operands_p (op0, op1))
4643 tem = op0, op0 = op1, op1 = tem;
4644 code = swap_condition (code);
4647 trueop0 = avoid_constant_pool_reference (op0);
4648 trueop1 = avoid_constant_pool_reference (op1);
4650 /* For integer comparisons of A and B maybe we can simplify A - B and can
4651 then simplify a comparison of that with zero. If A and B are both either
4652 a register or a CONST_INT, this can't help; testing for these cases will
4653 prevent infinite recursion here and speed things up.
4655 We can only do this for EQ and NE comparisons as otherwise we may
4656 lose or introduce overflow which we cannot disregard as undefined as
4657 we do not know the signedness of the operation on either the left or
4658 the right hand side of the comparison. */
4660 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4661 && (code == EQ || code == NE)
4662 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4663 && (REG_P (op1) || CONST_INT_P (trueop1)))
4664 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4665 /* We cannot do this if tem is a nonzero address. */
4666 && ! nonzero_address_p (tem))
4667 return simplify_const_relational_operation (signed_condition (code),
4668 mode, tem, const0_rtx);
4670 if (! HONOR_NANS (mode) && code == ORDERED)
4671 return const_true_rtx;
4673 if (! HONOR_NANS (mode) && code == UNORDERED)
4674 return const0_rtx;
4676 /* For modes without NaNs, if the two operands are equal, we know the
4677 result except if they have side-effects. Even with NaNs we know
4678 the result of unordered comparisons and, if signaling NaNs are
4679 irrelevant, also the result of LT/GT/LTGT. */
4680 if ((! HONOR_NANS (GET_MODE (trueop0))
4681 || code == UNEQ || code == UNLE || code == UNGE
4682 || ((code == LT || code == GT || code == LTGT)
4683 && ! HONOR_SNANS (GET_MODE (trueop0))))
4684 && rtx_equal_p (trueop0, trueop1)
4685 && ! side_effects_p (trueop0))
4686 return comparison_result (code, CMP_EQ);
4688 /* If the operands are floating-point constants, see if we can fold
4689 the result. */
4690 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4691 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4692 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4694 REAL_VALUE_TYPE d0, d1;
4696 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4697 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4699 /* Comparisons are unordered iff at least one of the values is NaN. */
4700 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4701 switch (code)
4703 case UNEQ:
4704 case UNLT:
4705 case UNGT:
4706 case UNLE:
4707 case UNGE:
4708 case NE:
4709 case UNORDERED:
4710 return const_true_rtx;
4711 case EQ:
4712 case LT:
4713 case GT:
4714 case LE:
4715 case GE:
4716 case LTGT:
4717 case ORDERED:
4718 return const0_rtx;
4719 default:
4720 return 0;
4723 return comparison_result (code,
4724 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4725 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4728 /* Otherwise, see if the operands are both integers. */
4729 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4730 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4732 /* It would be nice if we really had a mode here. However, the
4733 largest int representable on the target is as good as
4734 infinite. */
4735 enum machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4736 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4737 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4739 if (wi::eq_p (ptrueop0, ptrueop1))
4740 return comparison_result (code, CMP_EQ);
4741 else
4743 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4744 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4745 return comparison_result (code, cr);
4749 /* Optimize comparisons with upper and lower bounds. */
4750 if (HWI_COMPUTABLE_MODE_P (mode)
4751 && CONST_INT_P (trueop1))
4753 int sign;
4754 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4755 HOST_WIDE_INT val = INTVAL (trueop1);
4756 HOST_WIDE_INT mmin, mmax;
4758 if (code == GEU
4759 || code == LEU
4760 || code == GTU
4761 || code == LTU)
4762 sign = 0;
4763 else
4764 sign = 1;
4766 /* Get a reduced range if the sign bit is zero. */
4767 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4769 mmin = 0;
4770 mmax = nonzero;
4772 else
4774 rtx mmin_rtx, mmax_rtx;
4775 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4777 mmin = INTVAL (mmin_rtx);
4778 mmax = INTVAL (mmax_rtx);
4779 if (sign)
4781 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4783 mmin >>= (sign_copies - 1);
4784 mmax >>= (sign_copies - 1);
4788 switch (code)
4790 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4791 case GEU:
4792 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4793 return const_true_rtx;
4794 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4795 return const0_rtx;
4796 break;
4797 case GE:
4798 if (val <= mmin)
4799 return const_true_rtx;
4800 if (val > mmax)
4801 return const0_rtx;
4802 break;
4804 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4805 case LEU:
4806 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4807 return const_true_rtx;
4808 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4809 return const0_rtx;
4810 break;
4811 case LE:
4812 if (val >= mmax)
4813 return const_true_rtx;
4814 if (val < mmin)
4815 return const0_rtx;
4816 break;
4818 case EQ:
4819 /* x == y is always false for y out of range. */
4820 if (val < mmin || val > mmax)
4821 return const0_rtx;
4822 break;
4824 /* x > y is always false for y >= mmax, always true for y < mmin. */
4825 case GTU:
4826 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4827 return const0_rtx;
4828 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4829 return const_true_rtx;
4830 break;
4831 case GT:
4832 if (val >= mmax)
4833 return const0_rtx;
4834 if (val < mmin)
4835 return const_true_rtx;
4836 break;
4838 /* x < y is always false for y <= mmin, always true for y > mmax. */
4839 case LTU:
4840 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4841 return const0_rtx;
4842 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4843 return const_true_rtx;
4844 break;
4845 case LT:
4846 if (val <= mmin)
4847 return const0_rtx;
4848 if (val > mmax)
4849 return const_true_rtx;
4850 break;
4852 case NE:
4853 /* x != y is always true for y out of range. */
4854 if (val < mmin || val > mmax)
4855 return const_true_rtx;
4856 break;
4858 default:
4859 break;
4863 /* Optimize integer comparisons with zero. */
4864 if (trueop1 == const0_rtx)
4866 /* Some addresses are known to be nonzero. We don't know
4867 their sign, but equality comparisons are known. */
4868 if (nonzero_address_p (trueop0))
4870 if (code == EQ || code == LEU)
4871 return const0_rtx;
4872 if (code == NE || code == GTU)
4873 return const_true_rtx;
4876 /* See if the first operand is an IOR with a constant. If so, we
4877 may be able to determine the result of this comparison. */
4878 if (GET_CODE (op0) == IOR)
4880 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4881 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4883 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
4884 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4885 && (UINTVAL (inner_const)
4886 & ((unsigned HOST_WIDE_INT) 1
4887 << sign_bitnum)));
4889 switch (code)
4891 case EQ:
4892 case LEU:
4893 return const0_rtx;
4894 case NE:
4895 case GTU:
4896 return const_true_rtx;
4897 case LT:
4898 case LE:
4899 if (has_sign)
4900 return const_true_rtx;
4901 break;
4902 case GT:
4903 case GE:
4904 if (has_sign)
4905 return const0_rtx;
4906 break;
4907 default:
4908 break;
4914 /* Optimize comparison of ABS with zero. */
4915 if (trueop1 == CONST0_RTX (mode)
4916 && (GET_CODE (trueop0) == ABS
4917 || (GET_CODE (trueop0) == FLOAT_EXTEND
4918 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4920 switch (code)
4922 case LT:
4923 /* Optimize abs(x) < 0.0. */
4924 if (!HONOR_SNANS (mode)
4925 && (!INTEGRAL_MODE_P (mode)
4926 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4928 if (INTEGRAL_MODE_P (mode)
4929 && (issue_strict_overflow_warning
4930 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4931 warning (OPT_Wstrict_overflow,
4932 ("assuming signed overflow does not occur when "
4933 "assuming abs (x) < 0 is false"));
4934 return const0_rtx;
4936 break;
4938 case GE:
4939 /* Optimize abs(x) >= 0.0. */
4940 if (!HONOR_NANS (mode)
4941 && (!INTEGRAL_MODE_P (mode)
4942 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4944 if (INTEGRAL_MODE_P (mode)
4945 && (issue_strict_overflow_warning
4946 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4947 warning (OPT_Wstrict_overflow,
4948 ("assuming signed overflow does not occur when "
4949 "assuming abs (x) >= 0 is true"));
4950 return const_true_rtx;
4952 break;
4954 case UNGE:
4955 /* Optimize ! (abs(x) < 0.0). */
4956 return const_true_rtx;
4958 default:
4959 break;
4963 return 0;
4966 /* Simplify CODE, an operation with result mode MODE and three operands,
4967 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4968 a constant. Return 0 if no simplifications is possible. */
4971 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4972 enum machine_mode op0_mode, rtx op0, rtx op1,
4973 rtx op2)
4975 unsigned int width = GET_MODE_PRECISION (mode);
4976 bool any_change = false;
4977 rtx tem, trueop2;
4979 /* VOIDmode means "infinite" precision. */
4980 if (width == 0)
4981 width = HOST_BITS_PER_WIDE_INT;
4983 switch (code)
4985 case FMA:
4986 /* Simplify negations around the multiplication. */
4987 /* -a * -b + c => a * b + c. */
4988 if (GET_CODE (op0) == NEG)
4990 tem = simplify_unary_operation (NEG, mode, op1, mode);
4991 if (tem)
4992 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4994 else if (GET_CODE (op1) == NEG)
4996 tem = simplify_unary_operation (NEG, mode, op0, mode);
4997 if (tem)
4998 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5001 /* Canonicalize the two multiplication operands. */
5002 /* a * -b + c => -b * a + c. */
5003 if (swap_commutative_operands_p (op0, op1))
5004 tem = op0, op0 = op1, op1 = tem, any_change = true;
5006 if (any_change)
5007 return gen_rtx_FMA (mode, op0, op1, op2);
5008 return NULL_RTX;
5010 case SIGN_EXTRACT:
5011 case ZERO_EXTRACT:
5012 if (CONST_INT_P (op0)
5013 && CONST_INT_P (op1)
5014 && CONST_INT_P (op2)
5015 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5016 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5018 /* Extracting a bit-field from a constant */
5019 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5020 HOST_WIDE_INT op1val = INTVAL (op1);
5021 HOST_WIDE_INT op2val = INTVAL (op2);
5022 if (BITS_BIG_ENDIAN)
5023 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5024 else
5025 val >>= op2val;
5027 if (HOST_BITS_PER_WIDE_INT != op1val)
5029 /* First zero-extend. */
5030 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5031 /* If desired, propagate sign bit. */
5032 if (code == SIGN_EXTRACT
5033 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5034 != 0)
5035 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5038 return gen_int_mode (val, mode);
5040 break;
5042 case IF_THEN_ELSE:
5043 if (CONST_INT_P (op0))
5044 return op0 != const0_rtx ? op1 : op2;
5046 /* Convert c ? a : a into "a". */
5047 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5048 return op1;
5050 /* Convert a != b ? a : b into "a". */
5051 if (GET_CODE (op0) == NE
5052 && ! side_effects_p (op0)
5053 && ! HONOR_NANS (mode)
5054 && ! HONOR_SIGNED_ZEROS (mode)
5055 && ((rtx_equal_p (XEXP (op0, 0), op1)
5056 && rtx_equal_p (XEXP (op0, 1), op2))
5057 || (rtx_equal_p (XEXP (op0, 0), op2)
5058 && rtx_equal_p (XEXP (op0, 1), op1))))
5059 return op1;
5061 /* Convert a == b ? a : b into "b". */
5062 if (GET_CODE (op0) == EQ
5063 && ! side_effects_p (op0)
5064 && ! HONOR_NANS (mode)
5065 && ! HONOR_SIGNED_ZEROS (mode)
5066 && ((rtx_equal_p (XEXP (op0, 0), op1)
5067 && rtx_equal_p (XEXP (op0, 1), op2))
5068 || (rtx_equal_p (XEXP (op0, 0), op2)
5069 && rtx_equal_p (XEXP (op0, 1), op1))))
5070 return op2;
5072 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5074 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5075 ? GET_MODE (XEXP (op0, 1))
5076 : GET_MODE (XEXP (op0, 0)));
5077 rtx temp;
5079 /* Look for happy constants in op1 and op2. */
5080 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5082 HOST_WIDE_INT t = INTVAL (op1);
5083 HOST_WIDE_INT f = INTVAL (op2);
5085 if (t == STORE_FLAG_VALUE && f == 0)
5086 code = GET_CODE (op0);
5087 else if (t == 0 && f == STORE_FLAG_VALUE)
5089 enum rtx_code tmp;
5090 tmp = reversed_comparison_code (op0, NULL_RTX);
5091 if (tmp == UNKNOWN)
5092 break;
5093 code = tmp;
5095 else
5096 break;
5098 return simplify_gen_relational (code, mode, cmp_mode,
5099 XEXP (op0, 0), XEXP (op0, 1));
5102 if (cmp_mode == VOIDmode)
5103 cmp_mode = op0_mode;
5104 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5105 cmp_mode, XEXP (op0, 0),
5106 XEXP (op0, 1));
5108 /* See if any simplifications were possible. */
5109 if (temp)
5111 if (CONST_INT_P (temp))
5112 return temp == const0_rtx ? op2 : op1;
5113 else if (temp)
5114 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5117 break;
5119 case VEC_MERGE:
5120 gcc_assert (GET_MODE (op0) == mode);
5121 gcc_assert (GET_MODE (op1) == mode);
5122 gcc_assert (VECTOR_MODE_P (mode));
5123 trueop2 = avoid_constant_pool_reference (op2);
5124 if (CONST_INT_P (trueop2))
5126 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5127 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5128 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5129 unsigned HOST_WIDE_INT mask;
5130 if (n_elts == HOST_BITS_PER_WIDE_INT)
5131 mask = -1;
5132 else
5133 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5135 if (!(sel & mask) && !side_effects_p (op0))
5136 return op1;
5137 if ((sel & mask) == mask && !side_effects_p (op1))
5138 return op0;
5140 rtx trueop0 = avoid_constant_pool_reference (op0);
5141 rtx trueop1 = avoid_constant_pool_reference (op1);
5142 if (GET_CODE (trueop0) == CONST_VECTOR
5143 && GET_CODE (trueop1) == CONST_VECTOR)
5145 rtvec v = rtvec_alloc (n_elts);
5146 unsigned int i;
5148 for (i = 0; i < n_elts; i++)
5149 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5150 ? CONST_VECTOR_ELT (trueop0, i)
5151 : CONST_VECTOR_ELT (trueop1, i));
5152 return gen_rtx_CONST_VECTOR (mode, v);
5155 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5156 if no element from a appears in the result. */
5157 if (GET_CODE (op0) == VEC_MERGE)
5159 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5160 if (CONST_INT_P (tem))
5162 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5163 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5164 return simplify_gen_ternary (code, mode, mode,
5165 XEXP (op0, 1), op1, op2);
5166 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5167 return simplify_gen_ternary (code, mode, mode,
5168 XEXP (op0, 0), op1, op2);
5171 if (GET_CODE (op1) == VEC_MERGE)
5173 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5174 if (CONST_INT_P (tem))
5176 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5177 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5178 return simplify_gen_ternary (code, mode, mode,
5179 op0, XEXP (op1, 1), op2);
5180 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5181 return simplify_gen_ternary (code, mode, mode,
5182 op0, XEXP (op1, 0), op2);
5187 if (rtx_equal_p (op0, op1)
5188 && !side_effects_p (op2) && !side_effects_p (op1))
5189 return op0;
5191 break;
5193 default:
5194 gcc_unreachable ();
5197 return 0;
5200 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5201 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5202 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5204 Works by unpacking OP into a collection of 8-bit values
5205 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5206 and then repacking them again for OUTERMODE. */
5208 static rtx
5209 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5210 enum machine_mode innermode, unsigned int byte)
5212 enum {
5213 value_bit = 8,
5214 value_mask = (1 << value_bit) - 1
5216 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5217 int value_start;
5218 int i;
5219 int elem;
5221 int num_elem;
5222 rtx * elems;
5223 int elem_bitsize;
5224 rtx result_s;
5225 rtvec result_v = NULL;
5226 enum mode_class outer_class;
5227 enum machine_mode outer_submode;
5228 int max_bitsize;
5230 /* Some ports misuse CCmode. */
5231 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5232 return op;
5234 /* We have no way to represent a complex constant at the rtl level. */
5235 if (COMPLEX_MODE_P (outermode))
5236 return NULL_RTX;
5238 /* We support any size mode. */
5239 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5240 GET_MODE_BITSIZE (innermode));
5242 /* Unpack the value. */
5244 if (GET_CODE (op) == CONST_VECTOR)
5246 num_elem = CONST_VECTOR_NUNITS (op);
5247 elems = &CONST_VECTOR_ELT (op, 0);
5248 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5250 else
5252 num_elem = 1;
5253 elems = &op;
5254 elem_bitsize = max_bitsize;
5256 /* If this asserts, it is too complicated; reducing value_bit may help. */
5257 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5258 /* I don't know how to handle endianness of sub-units. */
5259 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5261 for (elem = 0; elem < num_elem; elem++)
5263 unsigned char * vp;
5264 rtx el = elems[elem];
5266 /* Vectors are kept in target memory order. (This is probably
5267 a mistake.) */
5269 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5270 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5271 / BITS_PER_UNIT);
5272 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5273 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5274 unsigned bytele = (subword_byte % UNITS_PER_WORD
5275 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5276 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5279 switch (GET_CODE (el))
5281 case CONST_INT:
5282 for (i = 0;
5283 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5284 i += value_bit)
5285 *vp++ = INTVAL (el) >> i;
5286 /* CONST_INTs are always logically sign-extended. */
5287 for (; i < elem_bitsize; i += value_bit)
5288 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5289 break;
5291 case CONST_WIDE_INT:
5293 rtx_mode_t val = std::make_pair (el, innermode);
5294 unsigned char extend = wi::sign_mask (val);
5296 for (i = 0; i < elem_bitsize; i += value_bit)
5297 *vp++ = wi::extract_uhwi (val, i, value_bit);
5298 for (; i < elem_bitsize; i += value_bit)
5299 *vp++ = extend;
5301 break;
5303 case CONST_DOUBLE:
5304 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5306 unsigned char extend = 0;
5307 /* If this triggers, someone should have generated a
5308 CONST_INT instead. */
5309 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5311 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5312 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5313 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5315 *vp++
5316 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5317 i += value_bit;
5320 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5321 extend = -1;
5322 for (; i < elem_bitsize; i += value_bit)
5323 *vp++ = extend;
5325 else
5327 /* This is big enough for anything on the platform. */
5328 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5329 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5331 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5332 gcc_assert (bitsize <= elem_bitsize);
5333 gcc_assert (bitsize % value_bit == 0);
5335 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5336 GET_MODE (el));
5338 /* real_to_target produces its result in words affected by
5339 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5340 and use WORDS_BIG_ENDIAN instead; see the documentation
5341 of SUBREG in rtl.texi. */
5342 for (i = 0; i < bitsize; i += value_bit)
5344 int ibase;
5345 if (WORDS_BIG_ENDIAN)
5346 ibase = bitsize - 1 - i;
5347 else
5348 ibase = i;
5349 *vp++ = tmp[ibase / 32] >> i % 32;
5352 /* It shouldn't matter what's done here, so fill it with
5353 zero. */
5354 for (; i < elem_bitsize; i += value_bit)
5355 *vp++ = 0;
5357 break;
5359 case CONST_FIXED:
5360 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5362 for (i = 0; i < elem_bitsize; i += value_bit)
5363 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5365 else
5367 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5368 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5369 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5370 i += value_bit)
5371 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5372 >> (i - HOST_BITS_PER_WIDE_INT);
5373 for (; i < elem_bitsize; i += value_bit)
5374 *vp++ = 0;
5376 break;
5378 default:
5379 gcc_unreachable ();
5383 /* Now, pick the right byte to start with. */
5384 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5385 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5386 will already have offset 0. */
5387 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5389 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5390 - byte);
5391 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5392 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5393 byte = (subword_byte % UNITS_PER_WORD
5394 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5397 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5398 so if it's become negative it will instead be very large.) */
5399 gcc_assert (byte < GET_MODE_SIZE (innermode));
5401 /* Convert from bytes to chunks of size value_bit. */
5402 value_start = byte * (BITS_PER_UNIT / value_bit);
5404 /* Re-pack the value. */
5406 if (VECTOR_MODE_P (outermode))
5408 num_elem = GET_MODE_NUNITS (outermode);
5409 result_v = rtvec_alloc (num_elem);
5410 elems = &RTVEC_ELT (result_v, 0);
5411 outer_submode = GET_MODE_INNER (outermode);
5413 else
5415 num_elem = 1;
5416 elems = &result_s;
5417 outer_submode = outermode;
5420 outer_class = GET_MODE_CLASS (outer_submode);
5421 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5423 gcc_assert (elem_bitsize % value_bit == 0);
5424 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5426 for (elem = 0; elem < num_elem; elem++)
5428 unsigned char *vp;
5430 /* Vectors are stored in target memory order. (This is probably
5431 a mistake.) */
5433 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5434 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5435 / BITS_PER_UNIT);
5436 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5437 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5438 unsigned bytele = (subword_byte % UNITS_PER_WORD
5439 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5440 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5443 switch (outer_class)
5445 case MODE_INT:
5446 case MODE_PARTIAL_INT:
5448 int u;
5449 int base = 0;
5450 int units
5451 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5452 / HOST_BITS_PER_WIDE_INT;
5453 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5454 wide_int r;
5456 for (u = 0; u < units; u++)
5458 unsigned HOST_WIDE_INT buf = 0;
5459 for (i = 0;
5460 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5461 i += value_bit)
5462 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5464 tmp[u] = buf;
5465 base += HOST_BITS_PER_WIDE_INT;
5467 gcc_assert (GET_MODE_PRECISION (outer_submode)
5468 <= MAX_BITSIZE_MODE_ANY_INT);
5469 r = wide_int::from_array (tmp, units,
5470 GET_MODE_PRECISION (outer_submode));
5471 elems[elem] = immed_wide_int_const (r, outer_submode);
5473 break;
5475 case MODE_FLOAT:
5476 case MODE_DECIMAL_FLOAT:
5478 REAL_VALUE_TYPE r;
5479 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5481 /* real_from_target wants its input in words affected by
5482 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5483 and use WORDS_BIG_ENDIAN instead; see the documentation
5484 of SUBREG in rtl.texi. */
5485 for (i = 0; i < max_bitsize / 32; i++)
5486 tmp[i] = 0;
5487 for (i = 0; i < elem_bitsize; i += value_bit)
5489 int ibase;
5490 if (WORDS_BIG_ENDIAN)
5491 ibase = elem_bitsize - 1 - i;
5492 else
5493 ibase = i;
5494 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5497 real_from_target (&r, tmp, outer_submode);
5498 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5500 break;
5502 case MODE_FRACT:
5503 case MODE_UFRACT:
5504 case MODE_ACCUM:
5505 case MODE_UACCUM:
5507 FIXED_VALUE_TYPE f;
5508 f.data.low = 0;
5509 f.data.high = 0;
5510 f.mode = outer_submode;
5512 for (i = 0;
5513 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5514 i += value_bit)
5515 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5516 for (; i < elem_bitsize; i += value_bit)
5517 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5518 << (i - HOST_BITS_PER_WIDE_INT));
5520 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5522 break;
5524 default:
5525 gcc_unreachable ();
5528 if (VECTOR_MODE_P (outermode))
5529 return gen_rtx_CONST_VECTOR (outermode, result_v);
5530 else
5531 return result_s;
5534 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5535 Return 0 if no simplifications are possible. */
5537 simplify_subreg (enum machine_mode outermode, rtx op,
5538 enum machine_mode innermode, unsigned int byte)
5540 /* Little bit of sanity checking. */
5541 gcc_assert (innermode != VOIDmode);
5542 gcc_assert (outermode != VOIDmode);
5543 gcc_assert (innermode != BLKmode);
5544 gcc_assert (outermode != BLKmode);
5546 gcc_assert (GET_MODE (op) == innermode
5547 || GET_MODE (op) == VOIDmode);
5549 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5550 return NULL_RTX;
5552 if (byte >= GET_MODE_SIZE (innermode))
5553 return NULL_RTX;
5555 if (outermode == innermode && !byte)
5556 return op;
5558 if (CONST_SCALAR_INT_P (op)
5559 || CONST_DOUBLE_AS_FLOAT_P (op)
5560 || GET_CODE (op) == CONST_FIXED
5561 || GET_CODE (op) == CONST_VECTOR)
5562 return simplify_immed_subreg (outermode, op, innermode, byte);
5564 /* Changing mode twice with SUBREG => just change it once,
5565 or not at all if changing back op starting mode. */
5566 if (GET_CODE (op) == SUBREG)
5568 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5569 int final_offset = byte + SUBREG_BYTE (op);
5570 rtx newx;
5572 if (outermode == innermostmode
5573 && byte == 0 && SUBREG_BYTE (op) == 0)
5574 return SUBREG_REG (op);
5576 /* The SUBREG_BYTE represents offset, as if the value were stored
5577 in memory. Irritating exception is paradoxical subreg, where
5578 we define SUBREG_BYTE to be 0. On big endian machines, this
5579 value should be negative. For a moment, undo this exception. */
5580 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5582 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5583 if (WORDS_BIG_ENDIAN)
5584 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5585 if (BYTES_BIG_ENDIAN)
5586 final_offset += difference % UNITS_PER_WORD;
5588 if (SUBREG_BYTE (op) == 0
5589 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5591 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5592 if (WORDS_BIG_ENDIAN)
5593 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5594 if (BYTES_BIG_ENDIAN)
5595 final_offset += difference % UNITS_PER_WORD;
5598 /* See whether resulting subreg will be paradoxical. */
5599 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5601 /* In nonparadoxical subregs we can't handle negative offsets. */
5602 if (final_offset < 0)
5603 return NULL_RTX;
5604 /* Bail out in case resulting subreg would be incorrect. */
5605 if (final_offset % GET_MODE_SIZE (outermode)
5606 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5607 return NULL_RTX;
5609 else
5611 int offset = 0;
5612 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5614 /* In paradoxical subreg, see if we are still looking on lower part.
5615 If so, our SUBREG_BYTE will be 0. */
5616 if (WORDS_BIG_ENDIAN)
5617 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5618 if (BYTES_BIG_ENDIAN)
5619 offset += difference % UNITS_PER_WORD;
5620 if (offset == final_offset)
5621 final_offset = 0;
5622 else
5623 return NULL_RTX;
5626 /* Recurse for further possible simplifications. */
5627 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5628 final_offset);
5629 if (newx)
5630 return newx;
5631 if (validate_subreg (outermode, innermostmode,
5632 SUBREG_REG (op), final_offset))
5634 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5635 if (SUBREG_PROMOTED_VAR_P (op)
5636 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5637 && GET_MODE_CLASS (outermode) == MODE_INT
5638 && IN_RANGE (GET_MODE_SIZE (outermode),
5639 GET_MODE_SIZE (innermode),
5640 GET_MODE_SIZE (innermostmode))
5641 && subreg_lowpart_p (newx))
5643 SUBREG_PROMOTED_VAR_P (newx) = 1;
5644 SUBREG_PROMOTED_UNSIGNED_SET
5645 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5647 return newx;
5649 return NULL_RTX;
5652 /* SUBREG of a hard register => just change the register number
5653 and/or mode. If the hard register is not valid in that mode,
5654 suppress this simplification. If the hard register is the stack,
5655 frame, or argument pointer, leave this as a SUBREG. */
5657 if (REG_P (op) && HARD_REGISTER_P (op))
5659 unsigned int regno, final_regno;
5661 regno = REGNO (op);
5662 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5663 if (HARD_REGISTER_NUM_P (final_regno))
5665 rtx x;
5666 int final_offset = byte;
5668 /* Adjust offset for paradoxical subregs. */
5669 if (byte == 0
5670 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5672 int difference = (GET_MODE_SIZE (innermode)
5673 - GET_MODE_SIZE (outermode));
5674 if (WORDS_BIG_ENDIAN)
5675 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5676 if (BYTES_BIG_ENDIAN)
5677 final_offset += difference % UNITS_PER_WORD;
5680 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5682 /* Propagate original regno. We don't have any way to specify
5683 the offset inside original regno, so do so only for lowpart.
5684 The information is used only by alias analysis that can not
5685 grog partial register anyway. */
5687 if (subreg_lowpart_offset (outermode, innermode) == byte)
5688 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5689 return x;
5693 /* If we have a SUBREG of a register that we are replacing and we are
5694 replacing it with a MEM, make a new MEM and try replacing the
5695 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5696 or if we would be widening it. */
5698 if (MEM_P (op)
5699 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5700 /* Allow splitting of volatile memory references in case we don't
5701 have instruction to move the whole thing. */
5702 && (! MEM_VOLATILE_P (op)
5703 || ! have_insn_for (SET, innermode))
5704 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5705 return adjust_address_nv (op, outermode, byte);
5707 /* Handle complex values represented as CONCAT
5708 of real and imaginary part. */
5709 if (GET_CODE (op) == CONCAT)
5711 unsigned int part_size, final_offset;
5712 rtx part, res;
5714 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5715 if (byte < part_size)
5717 part = XEXP (op, 0);
5718 final_offset = byte;
5720 else
5722 part = XEXP (op, 1);
5723 final_offset = byte - part_size;
5726 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5727 return NULL_RTX;
5729 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5730 if (res)
5731 return res;
5732 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5733 return gen_rtx_SUBREG (outermode, part, final_offset);
5734 return NULL_RTX;
5737 /* A SUBREG resulting from a zero extension may fold to zero if
5738 it extracts higher bits that the ZERO_EXTEND's source bits. */
5739 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5741 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5742 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5743 return CONST0_RTX (outermode);
5746 if (SCALAR_INT_MODE_P (outermode)
5747 && SCALAR_INT_MODE_P (innermode)
5748 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5749 && byte == subreg_lowpart_offset (outermode, innermode))
5751 rtx tem = simplify_truncation (outermode, op, innermode);
5752 if (tem)
5753 return tem;
5756 return NULL_RTX;
5759 /* Make a SUBREG operation or equivalent if it folds. */
5762 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5763 enum machine_mode innermode, unsigned int byte)
5765 rtx newx;
5767 newx = simplify_subreg (outermode, op, innermode, byte);
5768 if (newx)
5769 return newx;
5771 if (GET_CODE (op) == SUBREG
5772 || GET_CODE (op) == CONCAT
5773 || GET_MODE (op) == VOIDmode)
5774 return NULL_RTX;
5776 if (validate_subreg (outermode, innermode, op, byte))
5777 return gen_rtx_SUBREG (outermode, op, byte);
5779 return NULL_RTX;
5782 /* Simplify X, an rtx expression.
5784 Return the simplified expression or NULL if no simplifications
5785 were possible.
5787 This is the preferred entry point into the simplification routines;
5788 however, we still allow passes to call the more specific routines.
5790 Right now GCC has three (yes, three) major bodies of RTL simplification
5791 code that need to be unified.
5793 1. fold_rtx in cse.c. This code uses various CSE specific
5794 information to aid in RTL simplification.
5796 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5797 it uses combine specific information to aid in RTL
5798 simplification.
5800 3. The routines in this file.
5803 Long term we want to only have one body of simplification code; to
5804 get to that state I recommend the following steps:
5806 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5807 which are not pass dependent state into these routines.
5809 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5810 use this routine whenever possible.
5812 3. Allow for pass dependent state to be provided to these
5813 routines and add simplifications based on the pass dependent
5814 state. Remove code from cse.c & combine.c that becomes
5815 redundant/dead.
5817 It will take time, but ultimately the compiler will be easier to
5818 maintain and improve. It's totally silly that when we add a
5819 simplification that it needs to be added to 4 places (3 for RTL
5820 simplification and 1 for tree simplification. */
5823 simplify_rtx (const_rtx x)
5825 const enum rtx_code code = GET_CODE (x);
5826 const enum machine_mode mode = GET_MODE (x);
5828 switch (GET_RTX_CLASS (code))
5830 case RTX_UNARY:
5831 return simplify_unary_operation (code, mode,
5832 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5833 case RTX_COMM_ARITH:
5834 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5835 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5837 /* Fall through.... */
5839 case RTX_BIN_ARITH:
5840 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5842 case RTX_TERNARY:
5843 case RTX_BITFIELD_OPS:
5844 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5845 XEXP (x, 0), XEXP (x, 1),
5846 XEXP (x, 2));
5848 case RTX_COMPARE:
5849 case RTX_COMM_COMPARE:
5850 return simplify_relational_operation (code, mode,
5851 ((GET_MODE (XEXP (x, 0))
5852 != VOIDmode)
5853 ? GET_MODE (XEXP (x, 0))
5854 : GET_MODE (XEXP (x, 1))),
5855 XEXP (x, 0),
5856 XEXP (x, 1));
5858 case RTX_EXTRA:
5859 if (code == SUBREG)
5860 return simplify_subreg (mode, SUBREG_REG (x),
5861 GET_MODE (SUBREG_REG (x)),
5862 SUBREG_BYTE (x));
5863 break;
5865 case RTX_OBJ:
5866 if (code == LO_SUM)
5868 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5869 if (GET_CODE (XEXP (x, 0)) == HIGH
5870 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5871 return XEXP (x, 1);
5873 break;
5875 default:
5876 break;
5878 return NULL;