* tree-if-conv.c: Fix various typos in comments.
[official-gcc.git] / gcc / simplify-rtx.c
blobad003d83cd5582b9c8c81231658ccdefd7ce7424
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "predict.h"
26 #include "rtl.h"
27 #include "alias.h"
28 #include "tree.h"
29 #include "fold-const.h"
30 #include "varasm.h"
31 #include "tm_p.h"
32 #include "regs.h"
33 #include "flags.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "insn-codes.h"
37 #include "optabs.h"
38 #include "expmed.h"
39 #include "dojump.h"
40 #include "explow.h"
41 #include "calls.h"
42 #include "emit-rtl.h"
43 #include "stmt.h"
44 #include "expr.h"
45 #include "diagnostic-core.h"
46 #include "target.h"
48 /* Simplification and canonicalization of RTL. */
50 /* Much code operates on (low, high) pairs; the low value is an
51 unsigned wide int, the high value a signed wide int. We
52 occasionally need to sign extend from low to high as if low were a
53 signed wide int. */
54 #define HWI_SIGN_EXTEND(low) \
55 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
57 static rtx neg_const_int (machine_mode, const_rtx);
58 static bool plus_minus_operand_p (const_rtx);
59 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
60 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
61 unsigned int);
62 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
63 rtx, rtx);
64 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
65 machine_mode, rtx, rtx);
66 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
67 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
68 rtx, rtx, rtx, rtx);
70 /* Negate a CONST_INT rtx, truncating (because a conversion from a
71 maximally negative number can overflow). */
72 static rtx
73 neg_const_int (machine_mode mode, const_rtx i)
75 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
78 /* Test whether expression, X, is an immediate constant that represents
79 the most significant bit of machine mode MODE. */
81 bool
82 mode_signbit_p (machine_mode mode, const_rtx x)
84 unsigned HOST_WIDE_INT val;
85 unsigned int width;
87 if (GET_MODE_CLASS (mode) != MODE_INT)
88 return false;
90 width = GET_MODE_PRECISION (mode);
91 if (width == 0)
92 return false;
94 if (width <= HOST_BITS_PER_WIDE_INT
95 && CONST_INT_P (x))
96 val = INTVAL (x);
97 #if TARGET_SUPPORTS_WIDE_INT
98 else if (CONST_WIDE_INT_P (x))
100 unsigned int i;
101 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
102 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
103 return false;
104 for (i = 0; i < elts - 1; i++)
105 if (CONST_WIDE_INT_ELT (x, i) != 0)
106 return false;
107 val = CONST_WIDE_INT_ELT (x, elts - 1);
108 width %= HOST_BITS_PER_WIDE_INT;
109 if (width == 0)
110 width = HOST_BITS_PER_WIDE_INT;
112 #else
113 else if (width <= HOST_BITS_PER_DOUBLE_INT
114 && CONST_DOUBLE_AS_INT_P (x)
115 && CONST_DOUBLE_LOW (x) == 0)
117 val = CONST_DOUBLE_HIGH (x);
118 width -= HOST_BITS_PER_WIDE_INT;
120 #endif
121 else
122 /* X is not an integer constant. */
123 return false;
125 if (width < HOST_BITS_PER_WIDE_INT)
126 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
127 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
130 /* Test whether VAL is equal to the most significant bit of mode MODE
131 (after masking with the mode mask of MODE). Returns false if the
132 precision of MODE is too large to handle. */
134 bool
135 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
137 unsigned int width;
139 if (GET_MODE_CLASS (mode) != MODE_INT)
140 return false;
142 width = GET_MODE_PRECISION (mode);
143 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
144 return false;
146 val &= GET_MODE_MASK (mode);
147 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
150 /* Test whether the most significant bit of mode MODE is set in VAL.
151 Returns false if the precision of MODE is too large to handle. */
152 bool
153 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
155 unsigned int width;
157 if (GET_MODE_CLASS (mode) != MODE_INT)
158 return false;
160 width = GET_MODE_PRECISION (mode);
161 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
162 return false;
164 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
165 return val != 0;
168 /* Test whether the most significant bit of mode MODE is clear in VAL.
169 Returns false if the precision of MODE is too large to handle. */
170 bool
171 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
173 unsigned int width;
175 if (GET_MODE_CLASS (mode) != MODE_INT)
176 return false;
178 width = GET_MODE_PRECISION (mode);
179 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
180 return false;
182 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
183 return val == 0;
186 /* Make a binary operation by properly ordering the operands and
187 seeing if the expression folds. */
190 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
191 rtx op1)
193 rtx tem;
195 /* If this simplifies, do it. */
196 tem = simplify_binary_operation (code, mode, op0, op1);
197 if (tem)
198 return tem;
200 /* Put complex operands first and constants second if commutative. */
201 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
202 && swap_commutative_operands_p (op0, op1))
203 std::swap (op0, op1);
205 return gen_rtx_fmt_ee (code, mode, op0, op1);
208 /* If X is a MEM referencing the constant pool, return the real value.
209 Otherwise return X. */
211 avoid_constant_pool_reference (rtx x)
213 rtx c, tmp, addr;
214 machine_mode cmode;
215 HOST_WIDE_INT offset = 0;
217 switch (GET_CODE (x))
219 case MEM:
220 break;
222 case FLOAT_EXTEND:
223 /* Handle float extensions of constant pool references. */
224 tmp = XEXP (x, 0);
225 c = avoid_constant_pool_reference (tmp);
226 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
228 REAL_VALUE_TYPE d;
230 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
231 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
233 return x;
235 default:
236 return x;
239 if (GET_MODE (x) == BLKmode)
240 return x;
242 addr = XEXP (x, 0);
244 /* Call target hook to avoid the effects of -fpic etc.... */
245 addr = targetm.delegitimize_address (addr);
247 /* Split the address into a base and integer offset. */
248 if (GET_CODE (addr) == CONST
249 && GET_CODE (XEXP (addr, 0)) == PLUS
250 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
252 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
253 addr = XEXP (XEXP (addr, 0), 0);
256 if (GET_CODE (addr) == LO_SUM)
257 addr = XEXP (addr, 1);
259 /* If this is a constant pool reference, we can turn it into its
260 constant and hope that simplifications happen. */
261 if (GET_CODE (addr) == SYMBOL_REF
262 && CONSTANT_POOL_ADDRESS_P (addr))
264 c = get_pool_constant (addr);
265 cmode = get_pool_mode (addr);
267 /* If we're accessing the constant in a different mode than it was
268 originally stored, attempt to fix that up via subreg simplifications.
269 If that fails we have no choice but to return the original memory. */
270 if ((offset != 0 || cmode != GET_MODE (x))
271 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
273 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
274 if (tem && CONSTANT_P (tem))
275 return tem;
277 else
278 return c;
281 return x;
284 /* Simplify a MEM based on its attributes. This is the default
285 delegitimize_address target hook, and it's recommended that every
286 overrider call it. */
289 delegitimize_mem_from_attrs (rtx x)
291 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
292 use their base addresses as equivalent. */
293 if (MEM_P (x)
294 && MEM_EXPR (x)
295 && MEM_OFFSET_KNOWN_P (x))
297 tree decl = MEM_EXPR (x);
298 machine_mode mode = GET_MODE (x);
299 HOST_WIDE_INT offset = 0;
301 switch (TREE_CODE (decl))
303 default:
304 decl = NULL;
305 break;
307 case VAR_DECL:
308 break;
310 case ARRAY_REF:
311 case ARRAY_RANGE_REF:
312 case COMPONENT_REF:
313 case BIT_FIELD_REF:
314 case REALPART_EXPR:
315 case IMAGPART_EXPR:
316 case VIEW_CONVERT_EXPR:
318 HOST_WIDE_INT bitsize, bitpos;
319 tree toffset;
320 int unsignedp, volatilep = 0;
322 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
323 &mode, &unsignedp, &volatilep, false);
324 if (bitsize != GET_MODE_BITSIZE (mode)
325 || (bitpos % BITS_PER_UNIT)
326 || (toffset && !tree_fits_shwi_p (toffset)))
327 decl = NULL;
328 else
330 offset += bitpos / BITS_PER_UNIT;
331 if (toffset)
332 offset += tree_to_shwi (toffset);
334 break;
338 if (decl
339 && mode == GET_MODE (x)
340 && TREE_CODE (decl) == VAR_DECL
341 && (TREE_STATIC (decl)
342 || DECL_THREAD_LOCAL_P (decl))
343 && DECL_RTL_SET_P (decl)
344 && MEM_P (DECL_RTL (decl)))
346 rtx newx;
348 offset += MEM_OFFSET (x);
350 newx = DECL_RTL (decl);
352 if (MEM_P (newx))
354 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
356 /* Avoid creating a new MEM needlessly if we already had
357 the same address. We do if there's no OFFSET and the
358 old address X is identical to NEWX, or if X is of the
359 form (plus NEWX OFFSET), or the NEWX is of the form
360 (plus Y (const_int Z)) and X is that with the offset
361 added: (plus Y (const_int Z+OFFSET)). */
362 if (!((offset == 0
363 || (GET_CODE (o) == PLUS
364 && GET_CODE (XEXP (o, 1)) == CONST_INT
365 && (offset == INTVAL (XEXP (o, 1))
366 || (GET_CODE (n) == PLUS
367 && GET_CODE (XEXP (n, 1)) == CONST_INT
368 && (INTVAL (XEXP (n, 1)) + offset
369 == INTVAL (XEXP (o, 1)))
370 && (n = XEXP (n, 0))))
371 && (o = XEXP (o, 0))))
372 && rtx_equal_p (o, n)))
373 x = adjust_address_nv (newx, mode, offset);
375 else if (GET_MODE (x) == GET_MODE (newx)
376 && offset == 0)
377 x = newx;
381 return x;
384 /* Make a unary operation by first seeing if it folds and otherwise making
385 the specified operation. */
388 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
389 machine_mode op_mode)
391 rtx tem;
393 /* If this simplifies, use it. */
394 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
395 return tem;
397 return gen_rtx_fmt_e (code, mode, op);
400 /* Likewise for ternary operations. */
403 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
404 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
406 rtx tem;
408 /* If this simplifies, use it. */
409 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
410 op0, op1, op2)))
411 return tem;
413 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
416 /* Likewise, for relational operations.
417 CMP_MODE specifies mode comparison is done in. */
420 simplify_gen_relational (enum rtx_code code, machine_mode mode,
421 machine_mode cmp_mode, rtx op0, rtx op1)
423 rtx tem;
425 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
426 op0, op1)))
427 return tem;
429 return gen_rtx_fmt_ee (code, mode, op0, op1);
432 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
433 and simplify the result. If FN is non-NULL, call this callback on each
434 X, if it returns non-NULL, replace X with its return value and simplify the
435 result. */
438 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
439 rtx (*fn) (rtx, const_rtx, void *), void *data)
441 enum rtx_code code = GET_CODE (x);
442 machine_mode mode = GET_MODE (x);
443 machine_mode op_mode;
444 const char *fmt;
445 rtx op0, op1, op2, newx, op;
446 rtvec vec, newvec;
447 int i, j;
449 if (__builtin_expect (fn != NULL, 0))
451 newx = fn (x, old_rtx, data);
452 if (newx)
453 return newx;
455 else if (rtx_equal_p (x, old_rtx))
456 return copy_rtx ((rtx) data);
458 switch (GET_RTX_CLASS (code))
460 case RTX_UNARY:
461 op0 = XEXP (x, 0);
462 op_mode = GET_MODE (op0);
463 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
464 if (op0 == XEXP (x, 0))
465 return x;
466 return simplify_gen_unary (code, mode, op0, op_mode);
468 case RTX_BIN_ARITH:
469 case RTX_COMM_ARITH:
470 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
471 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
472 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
473 return x;
474 return simplify_gen_binary (code, mode, op0, op1);
476 case RTX_COMPARE:
477 case RTX_COMM_COMPARE:
478 op0 = XEXP (x, 0);
479 op1 = XEXP (x, 1);
480 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
481 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
482 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
483 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
484 return x;
485 return simplify_gen_relational (code, mode, op_mode, op0, op1);
487 case RTX_TERNARY:
488 case RTX_BITFIELD_OPS:
489 op0 = XEXP (x, 0);
490 op_mode = GET_MODE (op0);
491 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
492 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
493 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
494 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
495 return x;
496 if (op_mode == VOIDmode)
497 op_mode = GET_MODE (op0);
498 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
500 case RTX_EXTRA:
501 if (code == SUBREG)
503 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
504 if (op0 == SUBREG_REG (x))
505 return x;
506 op0 = simplify_gen_subreg (GET_MODE (x), op0,
507 GET_MODE (SUBREG_REG (x)),
508 SUBREG_BYTE (x));
509 return op0 ? op0 : x;
511 break;
513 case RTX_OBJ:
514 if (code == MEM)
516 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
517 if (op0 == XEXP (x, 0))
518 return x;
519 return replace_equiv_address_nv (x, op0);
521 else if (code == LO_SUM)
523 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
524 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
526 /* (lo_sum (high x) y) -> y where x and y have the same base. */
527 if (GET_CODE (op0) == HIGH)
529 rtx base0, base1, offset0, offset1;
530 split_const (XEXP (op0, 0), &base0, &offset0);
531 split_const (op1, &base1, &offset1);
532 if (rtx_equal_p (base0, base1))
533 return op1;
536 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
537 return x;
538 return gen_rtx_LO_SUM (mode, op0, op1);
540 break;
542 default:
543 break;
546 newx = x;
547 fmt = GET_RTX_FORMAT (code);
548 for (i = 0; fmt[i]; i++)
549 switch (fmt[i])
551 case 'E':
552 vec = XVEC (x, i);
553 newvec = XVEC (newx, i);
554 for (j = 0; j < GET_NUM_ELEM (vec); j++)
556 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
557 old_rtx, fn, data);
558 if (op != RTVEC_ELT (vec, j))
560 if (newvec == vec)
562 newvec = shallow_copy_rtvec (vec);
563 if (x == newx)
564 newx = shallow_copy_rtx (x);
565 XVEC (newx, i) = newvec;
567 RTVEC_ELT (newvec, j) = op;
570 break;
572 case 'e':
573 if (XEXP (x, i))
575 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
576 if (op != XEXP (x, i))
578 if (x == newx)
579 newx = shallow_copy_rtx (x);
580 XEXP (newx, i) = op;
583 break;
585 return newx;
588 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
589 resulting RTX. Return a new RTX which is as simplified as possible. */
592 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
594 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
597 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
598 Only handle cases where the truncated value is inherently an rvalue.
600 RTL provides two ways of truncating a value:
602 1. a lowpart subreg. This form is only a truncation when both
603 the outer and inner modes (here MODE and OP_MODE respectively)
604 are scalar integers, and only then when the subreg is used as
605 an rvalue.
607 It is only valid to form such truncating subregs if the
608 truncation requires no action by the target. The onus for
609 proving this is on the creator of the subreg -- e.g. the
610 caller to simplify_subreg or simplify_gen_subreg -- and typically
611 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
613 2. a TRUNCATE. This form handles both scalar and compound integers.
615 The first form is preferred where valid. However, the TRUNCATE
616 handling in simplify_unary_operation turns the second form into the
617 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
618 so it is generally safe to form rvalue truncations using:
620 simplify_gen_unary (TRUNCATE, ...)
622 and leave simplify_unary_operation to work out which representation
623 should be used.
625 Because of the proof requirements on (1), simplify_truncation must
626 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
627 regardless of whether the outer truncation came from a SUBREG or a
628 TRUNCATE. For example, if the caller has proven that an SImode
629 truncation of:
631 (and:DI X Y)
633 is a no-op and can be represented as a subreg, it does not follow
634 that SImode truncations of X and Y are also no-ops. On a target
635 like 64-bit MIPS that requires SImode values to be stored in
636 sign-extended form, an SImode truncation of:
638 (and:DI (reg:DI X) (const_int 63))
640 is trivially a no-op because only the lower 6 bits can be set.
641 However, X is still an arbitrary 64-bit number and so we cannot
642 assume that truncating it too is a no-op. */
644 static rtx
645 simplify_truncation (machine_mode mode, rtx op,
646 machine_mode op_mode)
648 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
649 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
650 gcc_assert (precision <= op_precision);
652 /* Optimize truncations of zero and sign extended values. */
653 if (GET_CODE (op) == ZERO_EXTEND
654 || GET_CODE (op) == SIGN_EXTEND)
656 /* There are three possibilities. If MODE is the same as the
657 origmode, we can omit both the extension and the subreg.
658 If MODE is not larger than the origmode, we can apply the
659 truncation without the extension. Finally, if the outermode
660 is larger than the origmode, we can just extend to the appropriate
661 mode. */
662 machine_mode origmode = GET_MODE (XEXP (op, 0));
663 if (mode == origmode)
664 return XEXP (op, 0);
665 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
666 return simplify_gen_unary (TRUNCATE, mode,
667 XEXP (op, 0), origmode);
668 else
669 return simplify_gen_unary (GET_CODE (op), mode,
670 XEXP (op, 0), origmode);
673 /* If the machine can perform operations in the truncated mode, distribute
674 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
675 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
676 if (1
677 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
678 && (GET_CODE (op) == PLUS
679 || GET_CODE (op) == MINUS
680 || GET_CODE (op) == MULT))
682 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
683 if (op0)
685 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
686 if (op1)
687 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
691 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
692 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
693 the outer subreg is effectively a truncation to the original mode. */
694 if ((GET_CODE (op) == LSHIFTRT
695 || GET_CODE (op) == ASHIFTRT)
696 /* Ensure that OP_MODE is at least twice as wide as MODE
697 to avoid the possibility that an outer LSHIFTRT shifts by more
698 than the sign extension's sign_bit_copies and introduces zeros
699 into the high bits of the result. */
700 && 2 * precision <= op_precision
701 && CONST_INT_P (XEXP (op, 1))
702 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
703 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
704 && UINTVAL (XEXP (op, 1)) < precision)
705 return simplify_gen_binary (ASHIFTRT, mode,
706 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
708 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
709 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
710 the outer subreg is effectively a truncation to the original mode. */
711 if ((GET_CODE (op) == LSHIFTRT
712 || GET_CODE (op) == ASHIFTRT)
713 && CONST_INT_P (XEXP (op, 1))
714 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
715 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
716 && UINTVAL (XEXP (op, 1)) < precision)
717 return simplify_gen_binary (LSHIFTRT, mode,
718 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
720 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
721 to (ashift:QI (x:QI) C), where C is a suitable small constant and
722 the outer subreg is effectively a truncation to the original mode. */
723 if (GET_CODE (op) == ASHIFT
724 && CONST_INT_P (XEXP (op, 1))
725 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
726 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
727 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
728 && UINTVAL (XEXP (op, 1)) < precision)
729 return simplify_gen_binary (ASHIFT, mode,
730 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
732 /* Recognize a word extraction from a multi-word subreg. */
733 if ((GET_CODE (op) == LSHIFTRT
734 || GET_CODE (op) == ASHIFTRT)
735 && SCALAR_INT_MODE_P (mode)
736 && SCALAR_INT_MODE_P (op_mode)
737 && precision >= BITS_PER_WORD
738 && 2 * precision <= op_precision
739 && CONST_INT_P (XEXP (op, 1))
740 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
741 && UINTVAL (XEXP (op, 1)) < op_precision)
743 int byte = subreg_lowpart_offset (mode, op_mode);
744 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
745 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
746 (WORDS_BIG_ENDIAN
747 ? byte - shifted_bytes
748 : byte + shifted_bytes));
751 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
752 and try replacing the TRUNCATE and shift with it. Don't do this
753 if the MEM has a mode-dependent address. */
754 if ((GET_CODE (op) == LSHIFTRT
755 || GET_CODE (op) == ASHIFTRT)
756 && SCALAR_INT_MODE_P (op_mode)
757 && MEM_P (XEXP (op, 0))
758 && CONST_INT_P (XEXP (op, 1))
759 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
760 && INTVAL (XEXP (op, 1)) > 0
761 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
762 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
763 MEM_ADDR_SPACE (XEXP (op, 0)))
764 && ! MEM_VOLATILE_P (XEXP (op, 0))
765 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
766 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
768 int byte = subreg_lowpart_offset (mode, op_mode);
769 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
770 return adjust_address_nv (XEXP (op, 0), mode,
771 (WORDS_BIG_ENDIAN
772 ? byte - shifted_bytes
773 : byte + shifted_bytes));
776 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
777 (OP:SI foo:SI) if OP is NEG or ABS. */
778 if ((GET_CODE (op) == ABS
779 || GET_CODE (op) == NEG)
780 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
781 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
782 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
783 return simplify_gen_unary (GET_CODE (op), mode,
784 XEXP (XEXP (op, 0), 0), mode);
786 /* (truncate:A (subreg:B (truncate:C X) 0)) is
787 (truncate:A X). */
788 if (GET_CODE (op) == SUBREG
789 && SCALAR_INT_MODE_P (mode)
790 && SCALAR_INT_MODE_P (op_mode)
791 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
792 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
793 && subreg_lowpart_p (op))
795 rtx inner = XEXP (SUBREG_REG (op), 0);
796 if (GET_MODE_PRECISION (mode)
797 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
798 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
799 else
800 /* If subreg above is paradoxical and C is narrower
801 than A, return (subreg:A (truncate:C X) 0). */
802 return simplify_gen_subreg (mode, SUBREG_REG (op),
803 GET_MODE (SUBREG_REG (op)), 0);
806 /* (truncate:A (truncate:B X)) is (truncate:A X). */
807 if (GET_CODE (op) == TRUNCATE)
808 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
809 GET_MODE (XEXP (op, 0)));
811 return NULL_RTX;
814 /* Try to simplify a unary operation CODE whose output mode is to be
815 MODE with input operand OP whose mode was originally OP_MODE.
816 Return zero if no simplification can be made. */
818 simplify_unary_operation (enum rtx_code code, machine_mode mode,
819 rtx op, machine_mode op_mode)
821 rtx trueop, tem;
823 trueop = avoid_constant_pool_reference (op);
825 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
826 if (tem)
827 return tem;
829 return simplify_unary_operation_1 (code, mode, op);
832 /* Perform some simplifications we can do even if the operands
833 aren't constant. */
834 static rtx
835 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
837 enum rtx_code reversed;
838 rtx temp;
840 switch (code)
842 case NOT:
843 /* (not (not X)) == X. */
844 if (GET_CODE (op) == NOT)
845 return XEXP (op, 0);
847 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
848 comparison is all ones. */
849 if (COMPARISON_P (op)
850 && (mode == BImode || STORE_FLAG_VALUE == -1)
851 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
852 return simplify_gen_relational (reversed, mode, VOIDmode,
853 XEXP (op, 0), XEXP (op, 1));
855 /* (not (plus X -1)) can become (neg X). */
856 if (GET_CODE (op) == PLUS
857 && XEXP (op, 1) == constm1_rtx)
858 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
860 /* Similarly, (not (neg X)) is (plus X -1). */
861 if (GET_CODE (op) == NEG)
862 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
863 CONSTM1_RTX (mode));
865 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
866 if (GET_CODE (op) == XOR
867 && CONST_INT_P (XEXP (op, 1))
868 && (temp = simplify_unary_operation (NOT, mode,
869 XEXP (op, 1), mode)) != 0)
870 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
872 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
873 if (GET_CODE (op) == PLUS
874 && CONST_INT_P (XEXP (op, 1))
875 && mode_signbit_p (mode, XEXP (op, 1))
876 && (temp = simplify_unary_operation (NOT, mode,
877 XEXP (op, 1), mode)) != 0)
878 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
881 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
882 operands other than 1, but that is not valid. We could do a
883 similar simplification for (not (lshiftrt C X)) where C is
884 just the sign bit, but this doesn't seem common enough to
885 bother with. */
886 if (GET_CODE (op) == ASHIFT
887 && XEXP (op, 0) == const1_rtx)
889 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
890 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
893 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
894 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
895 so we can perform the above simplification. */
896 if (STORE_FLAG_VALUE == -1
897 && GET_CODE (op) == ASHIFTRT
898 && CONST_INT_P (XEXP (op, 1))
899 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
900 return simplify_gen_relational (GE, mode, VOIDmode,
901 XEXP (op, 0), const0_rtx);
904 if (GET_CODE (op) == SUBREG
905 && subreg_lowpart_p (op)
906 && (GET_MODE_SIZE (GET_MODE (op))
907 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
908 && GET_CODE (SUBREG_REG (op)) == ASHIFT
909 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
911 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
912 rtx x;
914 x = gen_rtx_ROTATE (inner_mode,
915 simplify_gen_unary (NOT, inner_mode, const1_rtx,
916 inner_mode),
917 XEXP (SUBREG_REG (op), 1));
918 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
919 if (temp)
920 return temp;
923 /* Apply De Morgan's laws to reduce number of patterns for machines
924 with negating logical insns (and-not, nand, etc.). If result has
925 only one NOT, put it first, since that is how the patterns are
926 coded. */
927 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
929 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
930 machine_mode op_mode;
932 op_mode = GET_MODE (in1);
933 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
935 op_mode = GET_MODE (in2);
936 if (op_mode == VOIDmode)
937 op_mode = mode;
938 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
940 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
941 std::swap (in1, in2);
943 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
944 mode, in1, in2);
947 /* (not (bswap x)) -> (bswap (not x)). */
948 if (GET_CODE (op) == BSWAP)
950 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
951 return simplify_gen_unary (BSWAP, mode, x, mode);
953 break;
955 case NEG:
956 /* (neg (neg X)) == X. */
957 if (GET_CODE (op) == NEG)
958 return XEXP (op, 0);
960 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
961 If comparison is not reversible use
962 x ? y : (neg y). */
963 if (GET_CODE (op) == IF_THEN_ELSE)
965 rtx cond = XEXP (op, 0);
966 rtx true_rtx = XEXP (op, 1);
967 rtx false_rtx = XEXP (op, 2);
969 if ((GET_CODE (true_rtx) == NEG
970 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
971 || (GET_CODE (false_rtx) == NEG
972 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
974 if (reversed_comparison_code (cond, NULL_RTX) != UNKNOWN)
975 temp = reversed_comparison (cond, mode);
976 else
978 temp = cond;
979 std::swap (true_rtx, false_rtx);
981 return simplify_gen_ternary (IF_THEN_ELSE, mode,
982 mode, temp, true_rtx, false_rtx);
986 /* (neg (plus X 1)) can become (not X). */
987 if (GET_CODE (op) == PLUS
988 && XEXP (op, 1) == const1_rtx)
989 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
991 /* Similarly, (neg (not X)) is (plus X 1). */
992 if (GET_CODE (op) == NOT)
993 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
994 CONST1_RTX (mode));
996 /* (neg (minus X Y)) can become (minus Y X). This transformation
997 isn't safe for modes with signed zeros, since if X and Y are
998 both +0, (minus Y X) is the same as (minus X Y). If the
999 rounding mode is towards +infinity (or -infinity) then the two
1000 expressions will be rounded differently. */
1001 if (GET_CODE (op) == MINUS
1002 && !HONOR_SIGNED_ZEROS (mode)
1003 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1004 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1006 if (GET_CODE (op) == PLUS
1007 && !HONOR_SIGNED_ZEROS (mode)
1008 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1010 /* (neg (plus A C)) is simplified to (minus -C A). */
1011 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1012 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1014 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1015 if (temp)
1016 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1019 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1020 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1021 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1024 /* (neg (mult A B)) becomes (mult A (neg B)).
1025 This works even for floating-point values. */
1026 if (GET_CODE (op) == MULT
1027 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1029 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1030 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1033 /* NEG commutes with ASHIFT since it is multiplication. Only do
1034 this if we can then eliminate the NEG (e.g., if the operand
1035 is a constant). */
1036 if (GET_CODE (op) == ASHIFT)
1038 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1039 if (temp)
1040 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1043 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1044 C is equal to the width of MODE minus 1. */
1045 if (GET_CODE (op) == ASHIFTRT
1046 && CONST_INT_P (XEXP (op, 1))
1047 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1048 return simplify_gen_binary (LSHIFTRT, mode,
1049 XEXP (op, 0), XEXP (op, 1));
1051 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1052 C is equal to the width of MODE minus 1. */
1053 if (GET_CODE (op) == LSHIFTRT
1054 && CONST_INT_P (XEXP (op, 1))
1055 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1056 return simplify_gen_binary (ASHIFTRT, mode,
1057 XEXP (op, 0), XEXP (op, 1));
1059 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1060 if (GET_CODE (op) == XOR
1061 && XEXP (op, 1) == const1_rtx
1062 && nonzero_bits (XEXP (op, 0), mode) == 1)
1063 return plus_constant (mode, XEXP (op, 0), -1);
1065 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1066 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1067 if (GET_CODE (op) == LT
1068 && XEXP (op, 1) == const0_rtx
1069 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1071 machine_mode inner = GET_MODE (XEXP (op, 0));
1072 int isize = GET_MODE_PRECISION (inner);
1073 if (STORE_FLAG_VALUE == 1)
1075 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1076 GEN_INT (isize - 1));
1077 if (mode == inner)
1078 return temp;
1079 if (GET_MODE_PRECISION (mode) > isize)
1080 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1081 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1083 else if (STORE_FLAG_VALUE == -1)
1085 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1086 GEN_INT (isize - 1));
1087 if (mode == inner)
1088 return temp;
1089 if (GET_MODE_PRECISION (mode) > isize)
1090 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1091 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1094 break;
1096 case TRUNCATE:
1097 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1098 with the umulXi3_highpart patterns. */
1099 if (GET_CODE (op) == LSHIFTRT
1100 && GET_CODE (XEXP (op, 0)) == MULT)
1101 break;
1103 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1105 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1107 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1108 if (temp)
1109 return temp;
1111 /* We can't handle truncation to a partial integer mode here
1112 because we don't know the real bitsize of the partial
1113 integer mode. */
1114 break;
1117 if (GET_MODE (op) != VOIDmode)
1119 temp = simplify_truncation (mode, op, GET_MODE (op));
1120 if (temp)
1121 return temp;
1124 /* If we know that the value is already truncated, we can
1125 replace the TRUNCATE with a SUBREG. */
1126 if (GET_MODE_NUNITS (mode) == 1
1127 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1128 || truncated_to_mode (mode, op)))
1130 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1131 if (temp)
1132 return temp;
1135 /* A truncate of a comparison can be replaced with a subreg if
1136 STORE_FLAG_VALUE permits. This is like the previous test,
1137 but it works even if the comparison is done in a mode larger
1138 than HOST_BITS_PER_WIDE_INT. */
1139 if (HWI_COMPUTABLE_MODE_P (mode)
1140 && COMPARISON_P (op)
1141 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1143 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1144 if (temp)
1145 return temp;
1148 /* A truncate of a memory is just loading the low part of the memory
1149 if we are not changing the meaning of the address. */
1150 if (GET_CODE (op) == MEM
1151 && !VECTOR_MODE_P (mode)
1152 && !MEM_VOLATILE_P (op)
1153 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1155 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1156 if (temp)
1157 return temp;
1160 break;
1162 case FLOAT_TRUNCATE:
1163 if (DECIMAL_FLOAT_MODE_P (mode))
1164 break;
1166 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1167 if (GET_CODE (op) == FLOAT_EXTEND
1168 && GET_MODE (XEXP (op, 0)) == mode)
1169 return XEXP (op, 0);
1171 /* (float_truncate:SF (float_truncate:DF foo:XF))
1172 = (float_truncate:SF foo:XF).
1173 This may eliminate double rounding, so it is unsafe.
1175 (float_truncate:SF (float_extend:XF foo:DF))
1176 = (float_truncate:SF foo:DF).
1178 (float_truncate:DF (float_extend:XF foo:SF))
1179 = (float_extend:DF foo:SF). */
1180 if ((GET_CODE (op) == FLOAT_TRUNCATE
1181 && flag_unsafe_math_optimizations)
1182 || GET_CODE (op) == FLOAT_EXTEND)
1183 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1184 0)))
1185 > GET_MODE_SIZE (mode)
1186 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1187 mode,
1188 XEXP (op, 0), mode);
1190 /* (float_truncate (float x)) is (float x) */
1191 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1192 && (flag_unsafe_math_optimizations
1193 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1194 && ((unsigned)significand_size (GET_MODE (op))
1195 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1196 - num_sign_bit_copies (XEXP (op, 0),
1197 GET_MODE (XEXP (op, 0))))))))
1198 return simplify_gen_unary (GET_CODE (op), mode,
1199 XEXP (op, 0),
1200 GET_MODE (XEXP (op, 0)));
1202 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1203 (OP:SF foo:SF) if OP is NEG or ABS. */
1204 if ((GET_CODE (op) == ABS
1205 || GET_CODE (op) == NEG)
1206 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1207 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1208 return simplify_gen_unary (GET_CODE (op), mode,
1209 XEXP (XEXP (op, 0), 0), mode);
1211 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1212 is (float_truncate:SF x). */
1213 if (GET_CODE (op) == SUBREG
1214 && subreg_lowpart_p (op)
1215 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1216 return SUBREG_REG (op);
1217 break;
1219 case FLOAT_EXTEND:
1220 if (DECIMAL_FLOAT_MODE_P (mode))
1221 break;
1223 /* (float_extend (float_extend x)) is (float_extend x)
1225 (float_extend (float x)) is (float x) assuming that double
1226 rounding can't happen.
1228 if (GET_CODE (op) == FLOAT_EXTEND
1229 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1230 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1231 && ((unsigned)significand_size (GET_MODE (op))
1232 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1233 - num_sign_bit_copies (XEXP (op, 0),
1234 GET_MODE (XEXP (op, 0)))))))
1235 return simplify_gen_unary (GET_CODE (op), mode,
1236 XEXP (op, 0),
1237 GET_MODE (XEXP (op, 0)));
1239 break;
1241 case ABS:
1242 /* (abs (neg <foo>)) -> (abs <foo>) */
1243 if (GET_CODE (op) == NEG)
1244 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1245 GET_MODE (XEXP (op, 0)));
1247 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1248 do nothing. */
1249 if (GET_MODE (op) == VOIDmode)
1250 break;
1252 /* If operand is something known to be positive, ignore the ABS. */
1253 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1254 || val_signbit_known_clear_p (GET_MODE (op),
1255 nonzero_bits (op, GET_MODE (op))))
1256 return op;
1258 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1259 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1260 return gen_rtx_NEG (mode, op);
1262 break;
1264 case FFS:
1265 /* (ffs (*_extend <X>)) = (ffs <X>) */
1266 if (GET_CODE (op) == SIGN_EXTEND
1267 || GET_CODE (op) == ZERO_EXTEND)
1268 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1269 GET_MODE (XEXP (op, 0)));
1270 break;
1272 case POPCOUNT:
1273 switch (GET_CODE (op))
1275 case BSWAP:
1276 case ZERO_EXTEND:
1277 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1278 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1279 GET_MODE (XEXP (op, 0)));
1281 case ROTATE:
1282 case ROTATERT:
1283 /* Rotations don't affect popcount. */
1284 if (!side_effects_p (XEXP (op, 1)))
1285 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1286 GET_MODE (XEXP (op, 0)));
1287 break;
1289 default:
1290 break;
1292 break;
1294 case PARITY:
1295 switch (GET_CODE (op))
1297 case NOT:
1298 case BSWAP:
1299 case ZERO_EXTEND:
1300 case SIGN_EXTEND:
1301 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1302 GET_MODE (XEXP (op, 0)));
1304 case ROTATE:
1305 case ROTATERT:
1306 /* Rotations don't affect parity. */
1307 if (!side_effects_p (XEXP (op, 1)))
1308 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1309 GET_MODE (XEXP (op, 0)));
1310 break;
1312 default:
1313 break;
1315 break;
1317 case BSWAP:
1318 /* (bswap (bswap x)) -> x. */
1319 if (GET_CODE (op) == BSWAP)
1320 return XEXP (op, 0);
1321 break;
1323 case FLOAT:
1324 /* (float (sign_extend <X>)) = (float <X>). */
1325 if (GET_CODE (op) == SIGN_EXTEND)
1326 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1327 GET_MODE (XEXP (op, 0)));
1328 break;
1330 case SIGN_EXTEND:
1331 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1332 becomes just the MINUS if its mode is MODE. This allows
1333 folding switch statements on machines using casesi (such as
1334 the VAX). */
1335 if (GET_CODE (op) == TRUNCATE
1336 && GET_MODE (XEXP (op, 0)) == mode
1337 && GET_CODE (XEXP (op, 0)) == MINUS
1338 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1339 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1340 return XEXP (op, 0);
1342 /* Extending a widening multiplication should be canonicalized to
1343 a wider widening multiplication. */
1344 if (GET_CODE (op) == MULT)
1346 rtx lhs = XEXP (op, 0);
1347 rtx rhs = XEXP (op, 1);
1348 enum rtx_code lcode = GET_CODE (lhs);
1349 enum rtx_code rcode = GET_CODE (rhs);
1351 /* Widening multiplies usually extend both operands, but sometimes
1352 they use a shift to extract a portion of a register. */
1353 if ((lcode == SIGN_EXTEND
1354 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1355 && (rcode == SIGN_EXTEND
1356 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1358 machine_mode lmode = GET_MODE (lhs);
1359 machine_mode rmode = GET_MODE (rhs);
1360 int bits;
1362 if (lcode == ASHIFTRT)
1363 /* Number of bits not shifted off the end. */
1364 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1365 else /* lcode == SIGN_EXTEND */
1366 /* Size of inner mode. */
1367 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1369 if (rcode == ASHIFTRT)
1370 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1371 else /* rcode == SIGN_EXTEND */
1372 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1374 /* We can only widen multiplies if the result is mathematiclly
1375 equivalent. I.e. if overflow was impossible. */
1376 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1377 return simplify_gen_binary
1378 (MULT, mode,
1379 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1380 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1384 /* Check for a sign extension of a subreg of a promoted
1385 variable, where the promotion is sign-extended, and the
1386 target mode is the same as the variable's promotion. */
1387 if (GET_CODE (op) == SUBREG
1388 && SUBREG_PROMOTED_VAR_P (op)
1389 && SUBREG_PROMOTED_SIGNED_P (op)
1390 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1392 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1393 if (temp)
1394 return temp;
1397 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1398 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1399 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1401 gcc_assert (GET_MODE_PRECISION (mode)
1402 > GET_MODE_PRECISION (GET_MODE (op)));
1403 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1404 GET_MODE (XEXP (op, 0)));
1407 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1408 is (sign_extend:M (subreg:O <X>)) if there is mode with
1409 GET_MODE_BITSIZE (N) - I bits.
1410 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1411 is similarly (zero_extend:M (subreg:O <X>)). */
1412 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1413 && GET_CODE (XEXP (op, 0)) == ASHIFT
1414 && CONST_INT_P (XEXP (op, 1))
1415 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1416 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1418 machine_mode tmode
1419 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1420 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1421 gcc_assert (GET_MODE_BITSIZE (mode)
1422 > GET_MODE_BITSIZE (GET_MODE (op)));
1423 if (tmode != BLKmode)
1425 rtx inner =
1426 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1427 if (inner)
1428 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1429 ? SIGN_EXTEND : ZERO_EXTEND,
1430 mode, inner, tmode);
1434 #if defined(POINTERS_EXTEND_UNSIGNED)
1435 /* As we do not know which address space the pointer is referring to,
1436 we can do this only if the target does not support different pointer
1437 or address modes depending on the address space. */
1438 if (target_default_pointer_address_modes_p ()
1439 && ! POINTERS_EXTEND_UNSIGNED
1440 && mode == Pmode && GET_MODE (op) == ptr_mode
1441 && (CONSTANT_P (op)
1442 || (GET_CODE (op) == SUBREG
1443 && REG_P (SUBREG_REG (op))
1444 && REG_POINTER (SUBREG_REG (op))
1445 && GET_MODE (SUBREG_REG (op)) == Pmode))
1446 && !targetm.have_ptr_extend ())
1447 return convert_memory_address (Pmode, op);
1448 #endif
1449 break;
1451 case ZERO_EXTEND:
1452 /* Check for a zero extension of a subreg of a promoted
1453 variable, where the promotion is zero-extended, and the
1454 target mode is the same as the variable's promotion. */
1455 if (GET_CODE (op) == SUBREG
1456 && SUBREG_PROMOTED_VAR_P (op)
1457 && SUBREG_PROMOTED_UNSIGNED_P (op)
1458 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1460 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1461 if (temp)
1462 return temp;
1465 /* Extending a widening multiplication should be canonicalized to
1466 a wider widening multiplication. */
1467 if (GET_CODE (op) == MULT)
1469 rtx lhs = XEXP (op, 0);
1470 rtx rhs = XEXP (op, 1);
1471 enum rtx_code lcode = GET_CODE (lhs);
1472 enum rtx_code rcode = GET_CODE (rhs);
1474 /* Widening multiplies usually extend both operands, but sometimes
1475 they use a shift to extract a portion of a register. */
1476 if ((lcode == ZERO_EXTEND
1477 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1478 && (rcode == ZERO_EXTEND
1479 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1481 machine_mode lmode = GET_MODE (lhs);
1482 machine_mode rmode = GET_MODE (rhs);
1483 int bits;
1485 if (lcode == LSHIFTRT)
1486 /* Number of bits not shifted off the end. */
1487 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1488 else /* lcode == ZERO_EXTEND */
1489 /* Size of inner mode. */
1490 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1492 if (rcode == LSHIFTRT)
1493 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1494 else /* rcode == ZERO_EXTEND */
1495 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1497 /* We can only widen multiplies if the result is mathematiclly
1498 equivalent. I.e. if overflow was impossible. */
1499 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1500 return simplify_gen_binary
1501 (MULT, mode,
1502 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1503 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1507 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1508 if (GET_CODE (op) == ZERO_EXTEND)
1509 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1510 GET_MODE (XEXP (op, 0)));
1512 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1513 is (zero_extend:M (subreg:O <X>)) if there is mode with
1514 GET_MODE_PRECISION (N) - I bits. */
1515 if (GET_CODE (op) == LSHIFTRT
1516 && GET_CODE (XEXP (op, 0)) == ASHIFT
1517 && CONST_INT_P (XEXP (op, 1))
1518 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1519 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1521 machine_mode tmode
1522 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1523 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1524 if (tmode != BLKmode)
1526 rtx inner =
1527 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1528 if (inner)
1529 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1533 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1534 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1535 of mode N. E.g.
1536 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1537 (and:SI (reg:SI) (const_int 63)). */
1538 if (GET_CODE (op) == SUBREG
1539 && GET_MODE_PRECISION (GET_MODE (op))
1540 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1541 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1542 <= HOST_BITS_PER_WIDE_INT
1543 && GET_MODE_PRECISION (mode)
1544 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1545 && subreg_lowpart_p (op)
1546 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1547 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1549 if (GET_MODE_PRECISION (mode)
1550 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1551 return SUBREG_REG (op);
1552 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1553 GET_MODE (SUBREG_REG (op)));
1556 #if defined(POINTERS_EXTEND_UNSIGNED)
1557 /* As we do not know which address space the pointer is referring to,
1558 we can do this only if the target does not support different pointer
1559 or address modes depending on the address space. */
1560 if (target_default_pointer_address_modes_p ()
1561 && POINTERS_EXTEND_UNSIGNED > 0
1562 && mode == Pmode && GET_MODE (op) == ptr_mode
1563 && (CONSTANT_P (op)
1564 || (GET_CODE (op) == SUBREG
1565 && REG_P (SUBREG_REG (op))
1566 && REG_POINTER (SUBREG_REG (op))
1567 && GET_MODE (SUBREG_REG (op)) == Pmode))
1568 && !targetm.have_ptr_extend ())
1569 return convert_memory_address (Pmode, op);
1570 #endif
1571 break;
1573 default:
1574 break;
1577 return 0;
1580 /* Try to compute the value of a unary operation CODE whose output mode is to
1581 be MODE with input operand OP whose mode was originally OP_MODE.
1582 Return zero if the value cannot be computed. */
1584 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1585 rtx op, machine_mode op_mode)
1587 unsigned int width = GET_MODE_PRECISION (mode);
1589 if (code == VEC_DUPLICATE)
1591 gcc_assert (VECTOR_MODE_P (mode));
1592 if (GET_MODE (op) != VOIDmode)
1594 if (!VECTOR_MODE_P (GET_MODE (op)))
1595 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1596 else
1597 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1598 (GET_MODE (op)));
1600 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1601 || GET_CODE (op) == CONST_VECTOR)
1603 int elt_size = GET_MODE_UNIT_SIZE (mode);
1604 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1605 rtvec v = rtvec_alloc (n_elts);
1606 unsigned int i;
1608 if (GET_CODE (op) != CONST_VECTOR)
1609 for (i = 0; i < n_elts; i++)
1610 RTVEC_ELT (v, i) = op;
1611 else
1613 machine_mode inmode = GET_MODE (op);
1614 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1615 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1617 gcc_assert (in_n_elts < n_elts);
1618 gcc_assert ((n_elts % in_n_elts) == 0);
1619 for (i = 0; i < n_elts; i++)
1620 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1622 return gen_rtx_CONST_VECTOR (mode, v);
1626 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1628 int elt_size = GET_MODE_UNIT_SIZE (mode);
1629 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1630 machine_mode opmode = GET_MODE (op);
1631 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1632 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1633 rtvec v = rtvec_alloc (n_elts);
1634 unsigned int i;
1636 gcc_assert (op_n_elts == n_elts);
1637 for (i = 0; i < n_elts; i++)
1639 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1640 CONST_VECTOR_ELT (op, i),
1641 GET_MODE_INNER (opmode));
1642 if (!x)
1643 return 0;
1644 RTVEC_ELT (v, i) = x;
1646 return gen_rtx_CONST_VECTOR (mode, v);
1649 /* The order of these tests is critical so that, for example, we don't
1650 check the wrong mode (input vs. output) for a conversion operation,
1651 such as FIX. At some point, this should be simplified. */
1653 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1655 REAL_VALUE_TYPE d;
1657 if (op_mode == VOIDmode)
1659 /* CONST_INT have VOIDmode as the mode. We assume that all
1660 the bits of the constant are significant, though, this is
1661 a dangerous assumption as many times CONST_INTs are
1662 created and used with garbage in the bits outside of the
1663 precision of the implied mode of the const_int. */
1664 op_mode = MAX_MODE_INT;
1667 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1668 d = real_value_truncate (mode, d);
1669 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1671 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1673 REAL_VALUE_TYPE d;
1675 if (op_mode == VOIDmode)
1677 /* CONST_INT have VOIDmode as the mode. We assume that all
1678 the bits of the constant are significant, though, this is
1679 a dangerous assumption as many times CONST_INTs are
1680 created and used with garbage in the bits outside of the
1681 precision of the implied mode of the const_int. */
1682 op_mode = MAX_MODE_INT;
1685 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1686 d = real_value_truncate (mode, d);
1687 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1690 if (CONST_SCALAR_INT_P (op) && width > 0)
1692 wide_int result;
1693 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1694 rtx_mode_t op0 = std::make_pair (op, imode);
1695 int int_value;
1697 #if TARGET_SUPPORTS_WIDE_INT == 0
1698 /* This assert keeps the simplification from producing a result
1699 that cannot be represented in a CONST_DOUBLE but a lot of
1700 upstream callers expect that this function never fails to
1701 simplify something and so you if you added this to the test
1702 above the code would die later anyway. If this assert
1703 happens, you just need to make the port support wide int. */
1704 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1705 #endif
1707 switch (code)
1709 case NOT:
1710 result = wi::bit_not (op0);
1711 break;
1713 case NEG:
1714 result = wi::neg (op0);
1715 break;
1717 case ABS:
1718 result = wi::abs (op0);
1719 break;
1721 case FFS:
1722 result = wi::shwi (wi::ffs (op0), mode);
1723 break;
1725 case CLZ:
1726 if (wi::ne_p (op0, 0))
1727 int_value = wi::clz (op0);
1728 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1729 int_value = GET_MODE_PRECISION (mode);
1730 result = wi::shwi (int_value, mode);
1731 break;
1733 case CLRSB:
1734 result = wi::shwi (wi::clrsb (op0), mode);
1735 break;
1737 case CTZ:
1738 if (wi::ne_p (op0, 0))
1739 int_value = wi::ctz (op0);
1740 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1741 int_value = GET_MODE_PRECISION (mode);
1742 result = wi::shwi (int_value, mode);
1743 break;
1745 case POPCOUNT:
1746 result = wi::shwi (wi::popcount (op0), mode);
1747 break;
1749 case PARITY:
1750 result = wi::shwi (wi::parity (op0), mode);
1751 break;
1753 case BSWAP:
1754 result = wide_int (op0).bswap ();
1755 break;
1757 case TRUNCATE:
1758 case ZERO_EXTEND:
1759 result = wide_int::from (op0, width, UNSIGNED);
1760 break;
1762 case SIGN_EXTEND:
1763 result = wide_int::from (op0, width, SIGNED);
1764 break;
1766 case SQRT:
1767 default:
1768 return 0;
1771 return immed_wide_int_const (result, mode);
1774 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1775 && SCALAR_FLOAT_MODE_P (mode)
1776 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1778 REAL_VALUE_TYPE d;
1779 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1781 switch (code)
1783 case SQRT:
1784 return 0;
1785 case ABS:
1786 d = real_value_abs (&d);
1787 break;
1788 case NEG:
1789 d = real_value_negate (&d);
1790 break;
1791 case FLOAT_TRUNCATE:
1792 d = real_value_truncate (mode, d);
1793 break;
1794 case FLOAT_EXTEND:
1795 /* All this does is change the mode, unless changing
1796 mode class. */
1797 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1798 real_convert (&d, mode, &d);
1799 break;
1800 case FIX:
1801 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1802 break;
1803 case NOT:
1805 long tmp[4];
1806 int i;
1808 real_to_target (tmp, &d, GET_MODE (op));
1809 for (i = 0; i < 4; i++)
1810 tmp[i] = ~tmp[i];
1811 real_from_target (&d, tmp, mode);
1812 break;
1814 default:
1815 gcc_unreachable ();
1817 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1819 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1820 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1821 && GET_MODE_CLASS (mode) == MODE_INT
1822 && width > 0)
1824 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1825 operators are intentionally left unspecified (to ease implementation
1826 by target backends), for consistency, this routine implements the
1827 same semantics for constant folding as used by the middle-end. */
1829 /* This was formerly used only for non-IEEE float.
1830 eggert@twinsun.com says it is safe for IEEE also. */
1831 REAL_VALUE_TYPE x, t;
1832 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1833 wide_int wmax, wmin;
1834 /* This is part of the abi to real_to_integer, but we check
1835 things before making this call. */
1836 bool fail;
1838 switch (code)
1840 case FIX:
1841 if (REAL_VALUE_ISNAN (x))
1842 return const0_rtx;
1844 /* Test against the signed upper bound. */
1845 wmax = wi::max_value (width, SIGNED);
1846 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1847 if (REAL_VALUES_LESS (t, x))
1848 return immed_wide_int_const (wmax, mode);
1850 /* Test against the signed lower bound. */
1851 wmin = wi::min_value (width, SIGNED);
1852 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1853 if (REAL_VALUES_LESS (x, t))
1854 return immed_wide_int_const (wmin, mode);
1856 return immed_wide_int_const (real_to_integer (&x, &fail, width), mode);
1857 break;
1859 case UNSIGNED_FIX:
1860 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1861 return const0_rtx;
1863 /* Test against the unsigned upper bound. */
1864 wmax = wi::max_value (width, UNSIGNED);
1865 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1866 if (REAL_VALUES_LESS (t, x))
1867 return immed_wide_int_const (wmax, mode);
1869 return immed_wide_int_const (real_to_integer (&x, &fail, width),
1870 mode);
1871 break;
1873 default:
1874 gcc_unreachable ();
1878 return NULL_RTX;
1881 /* Subroutine of simplify_binary_operation to simplify a binary operation
1882 CODE that can commute with byte swapping, with result mode MODE and
1883 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1884 Return zero if no simplification or canonicalization is possible. */
1886 static rtx
1887 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1888 rtx op0, rtx op1)
1890 rtx tem;
1892 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1893 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1895 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1896 simplify_gen_unary (BSWAP, mode, op1, mode));
1897 return simplify_gen_unary (BSWAP, mode, tem, mode);
1900 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1901 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1903 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1904 return simplify_gen_unary (BSWAP, mode, tem, mode);
1907 return NULL_RTX;
1910 /* Subroutine of simplify_binary_operation to simplify a commutative,
1911 associative binary operation CODE with result mode MODE, operating
1912 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1913 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1914 canonicalization is possible. */
1916 static rtx
1917 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1918 rtx op0, rtx op1)
1920 rtx tem;
1922 /* Linearize the operator to the left. */
1923 if (GET_CODE (op1) == code)
1925 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1926 if (GET_CODE (op0) == code)
1928 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1929 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1932 /* "a op (b op c)" becomes "(b op c) op a". */
1933 if (! swap_commutative_operands_p (op1, op0))
1934 return simplify_gen_binary (code, mode, op1, op0);
1936 std::swap (op0, op1);
1939 if (GET_CODE (op0) == code)
1941 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1942 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1944 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1945 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1948 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1949 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1950 if (tem != 0)
1951 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1953 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1954 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1955 if (tem != 0)
1956 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1959 return 0;
1963 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1964 and OP1. Return 0 if no simplification is possible.
1966 Don't use this for relational operations such as EQ or LT.
1967 Use simplify_relational_operation instead. */
1969 simplify_binary_operation (enum rtx_code code, machine_mode mode,
1970 rtx op0, rtx op1)
1972 rtx trueop0, trueop1;
1973 rtx tem;
1975 /* Relational operations don't work here. We must know the mode
1976 of the operands in order to do the comparison correctly.
1977 Assuming a full word can give incorrect results.
1978 Consider comparing 128 with -128 in QImode. */
1979 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1980 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1982 /* Make sure the constant is second. */
1983 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1984 && swap_commutative_operands_p (op0, op1))
1985 std::swap (op0, op1);
1987 trueop0 = avoid_constant_pool_reference (op0);
1988 trueop1 = avoid_constant_pool_reference (op1);
1990 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1991 if (tem)
1992 return tem;
1993 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1996 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1997 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1998 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1999 actual constants. */
2001 static rtx
2002 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2003 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2005 rtx tem, reversed, opleft, opright;
2006 HOST_WIDE_INT val;
2007 unsigned int width = GET_MODE_PRECISION (mode);
2009 /* Even if we can't compute a constant result,
2010 there are some cases worth simplifying. */
2012 switch (code)
2014 case PLUS:
2015 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2016 when x is NaN, infinite, or finite and nonzero. They aren't
2017 when x is -0 and the rounding mode is not towards -infinity,
2018 since (-0) + 0 is then 0. */
2019 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2020 return op0;
2022 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2023 transformations are safe even for IEEE. */
2024 if (GET_CODE (op0) == NEG)
2025 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2026 else if (GET_CODE (op1) == NEG)
2027 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2029 /* (~a) + 1 -> -a */
2030 if (INTEGRAL_MODE_P (mode)
2031 && GET_CODE (op0) == NOT
2032 && trueop1 == const1_rtx)
2033 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2035 /* Handle both-operands-constant cases. We can only add
2036 CONST_INTs to constants since the sum of relocatable symbols
2037 can't be handled by most assemblers. Don't add CONST_INT
2038 to CONST_INT since overflow won't be computed properly if wider
2039 than HOST_BITS_PER_WIDE_INT. */
2041 if ((GET_CODE (op0) == CONST
2042 || GET_CODE (op0) == SYMBOL_REF
2043 || GET_CODE (op0) == LABEL_REF)
2044 && CONST_INT_P (op1))
2045 return plus_constant (mode, op0, INTVAL (op1));
2046 else if ((GET_CODE (op1) == CONST
2047 || GET_CODE (op1) == SYMBOL_REF
2048 || GET_CODE (op1) == LABEL_REF)
2049 && CONST_INT_P (op0))
2050 return plus_constant (mode, op1, INTVAL (op0));
2052 /* See if this is something like X * C - X or vice versa or
2053 if the multiplication is written as a shift. If so, we can
2054 distribute and make a new multiply, shift, or maybe just
2055 have X (if C is 2 in the example above). But don't make
2056 something more expensive than we had before. */
2058 if (SCALAR_INT_MODE_P (mode))
2060 rtx lhs = op0, rhs = op1;
2062 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2063 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2065 if (GET_CODE (lhs) == NEG)
2067 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2068 lhs = XEXP (lhs, 0);
2070 else if (GET_CODE (lhs) == MULT
2071 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2073 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2074 lhs = XEXP (lhs, 0);
2076 else if (GET_CODE (lhs) == ASHIFT
2077 && CONST_INT_P (XEXP (lhs, 1))
2078 && INTVAL (XEXP (lhs, 1)) >= 0
2079 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2081 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2082 GET_MODE_PRECISION (mode));
2083 lhs = XEXP (lhs, 0);
2086 if (GET_CODE (rhs) == NEG)
2088 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2089 rhs = XEXP (rhs, 0);
2091 else if (GET_CODE (rhs) == MULT
2092 && CONST_INT_P (XEXP (rhs, 1)))
2094 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2095 rhs = XEXP (rhs, 0);
2097 else if (GET_CODE (rhs) == ASHIFT
2098 && CONST_INT_P (XEXP (rhs, 1))
2099 && INTVAL (XEXP (rhs, 1)) >= 0
2100 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2102 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2103 GET_MODE_PRECISION (mode));
2104 rhs = XEXP (rhs, 0);
2107 if (rtx_equal_p (lhs, rhs))
2109 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2110 rtx coeff;
2111 bool speed = optimize_function_for_speed_p (cfun);
2113 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2115 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2116 return (set_src_cost (tem, mode, speed)
2117 <= set_src_cost (orig, mode, speed) ? tem : 0);
2121 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2122 if (CONST_SCALAR_INT_P (op1)
2123 && GET_CODE (op0) == XOR
2124 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2125 && mode_signbit_p (mode, op1))
2126 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2127 simplify_gen_binary (XOR, mode, op1,
2128 XEXP (op0, 1)));
2130 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2131 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2132 && GET_CODE (op0) == MULT
2133 && GET_CODE (XEXP (op0, 0)) == NEG)
2135 rtx in1, in2;
2137 in1 = XEXP (XEXP (op0, 0), 0);
2138 in2 = XEXP (op0, 1);
2139 return simplify_gen_binary (MINUS, mode, op1,
2140 simplify_gen_binary (MULT, mode,
2141 in1, in2));
2144 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2145 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2146 is 1. */
2147 if (COMPARISON_P (op0)
2148 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2149 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2150 && (reversed = reversed_comparison (op0, mode)))
2151 return
2152 simplify_gen_unary (NEG, mode, reversed, mode);
2154 /* If one of the operands is a PLUS or a MINUS, see if we can
2155 simplify this by the associative law.
2156 Don't use the associative law for floating point.
2157 The inaccuracy makes it nonassociative,
2158 and subtle programs can break if operations are associated. */
2160 if (INTEGRAL_MODE_P (mode)
2161 && (plus_minus_operand_p (op0)
2162 || plus_minus_operand_p (op1))
2163 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2164 return tem;
2166 /* Reassociate floating point addition only when the user
2167 specifies associative math operations. */
2168 if (FLOAT_MODE_P (mode)
2169 && flag_associative_math)
2171 tem = simplify_associative_operation (code, mode, op0, op1);
2172 if (tem)
2173 return tem;
2175 break;
2177 case COMPARE:
2178 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2179 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2180 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2181 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2183 rtx xop00 = XEXP (op0, 0);
2184 rtx xop10 = XEXP (op1, 0);
2186 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2187 return xop00;
2189 if (REG_P (xop00) && REG_P (xop10)
2190 && GET_MODE (xop00) == GET_MODE (xop10)
2191 && REGNO (xop00) == REGNO (xop10)
2192 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2193 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2194 return xop00;
2196 break;
2198 case MINUS:
2199 /* We can't assume x-x is 0 even with non-IEEE floating point,
2200 but since it is zero except in very strange circumstances, we
2201 will treat it as zero with -ffinite-math-only. */
2202 if (rtx_equal_p (trueop0, trueop1)
2203 && ! side_effects_p (op0)
2204 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2205 return CONST0_RTX (mode);
2207 /* Change subtraction from zero into negation. (0 - x) is the
2208 same as -x when x is NaN, infinite, or finite and nonzero.
2209 But if the mode has signed zeros, and does not round towards
2210 -infinity, then 0 - 0 is 0, not -0. */
2211 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2212 return simplify_gen_unary (NEG, mode, op1, mode);
2214 /* (-1 - a) is ~a. */
2215 if (trueop0 == constm1_rtx)
2216 return simplify_gen_unary (NOT, mode, op1, mode);
2218 /* Subtracting 0 has no effect unless the mode has signed zeros
2219 and supports rounding towards -infinity. In such a case,
2220 0 - 0 is -0. */
2221 if (!(HONOR_SIGNED_ZEROS (mode)
2222 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2223 && trueop1 == CONST0_RTX (mode))
2224 return op0;
2226 /* See if this is something like X * C - X or vice versa or
2227 if the multiplication is written as a shift. If so, we can
2228 distribute and make a new multiply, shift, or maybe just
2229 have X (if C is 2 in the example above). But don't make
2230 something more expensive than we had before. */
2232 if (SCALAR_INT_MODE_P (mode))
2234 rtx lhs = op0, rhs = op1;
2236 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2237 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2239 if (GET_CODE (lhs) == NEG)
2241 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2242 lhs = XEXP (lhs, 0);
2244 else if (GET_CODE (lhs) == MULT
2245 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2247 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2248 lhs = XEXP (lhs, 0);
2250 else if (GET_CODE (lhs) == ASHIFT
2251 && CONST_INT_P (XEXP (lhs, 1))
2252 && INTVAL (XEXP (lhs, 1)) >= 0
2253 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2255 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2256 GET_MODE_PRECISION (mode));
2257 lhs = XEXP (lhs, 0);
2260 if (GET_CODE (rhs) == NEG)
2262 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2263 rhs = XEXP (rhs, 0);
2265 else if (GET_CODE (rhs) == MULT
2266 && CONST_INT_P (XEXP (rhs, 1)))
2268 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2269 rhs = XEXP (rhs, 0);
2271 else if (GET_CODE (rhs) == ASHIFT
2272 && CONST_INT_P (XEXP (rhs, 1))
2273 && INTVAL (XEXP (rhs, 1)) >= 0
2274 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2276 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2277 GET_MODE_PRECISION (mode));
2278 negcoeff1 = -negcoeff1;
2279 rhs = XEXP (rhs, 0);
2282 if (rtx_equal_p (lhs, rhs))
2284 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2285 rtx coeff;
2286 bool speed = optimize_function_for_speed_p (cfun);
2288 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2290 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2291 return (set_src_cost (tem, mode, speed)
2292 <= set_src_cost (orig, mode, speed) ? tem : 0);
2296 /* (a - (-b)) -> (a + b). True even for IEEE. */
2297 if (GET_CODE (op1) == NEG)
2298 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2300 /* (-x - c) may be simplified as (-c - x). */
2301 if (GET_CODE (op0) == NEG
2302 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2304 tem = simplify_unary_operation (NEG, mode, op1, mode);
2305 if (tem)
2306 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2309 /* Don't let a relocatable value get a negative coeff. */
2310 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2311 return simplify_gen_binary (PLUS, mode,
2312 op0,
2313 neg_const_int (mode, op1));
2315 /* (x - (x & y)) -> (x & ~y) */
2316 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2318 if (rtx_equal_p (op0, XEXP (op1, 0)))
2320 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2321 GET_MODE (XEXP (op1, 1)));
2322 return simplify_gen_binary (AND, mode, op0, tem);
2324 if (rtx_equal_p (op0, XEXP (op1, 1)))
2326 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2327 GET_MODE (XEXP (op1, 0)));
2328 return simplify_gen_binary (AND, mode, op0, tem);
2332 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2333 by reversing the comparison code if valid. */
2334 if (STORE_FLAG_VALUE == 1
2335 && trueop0 == const1_rtx
2336 && COMPARISON_P (op1)
2337 && (reversed = reversed_comparison (op1, mode)))
2338 return reversed;
2340 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2341 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2342 && GET_CODE (op1) == MULT
2343 && GET_CODE (XEXP (op1, 0)) == NEG)
2345 rtx in1, in2;
2347 in1 = XEXP (XEXP (op1, 0), 0);
2348 in2 = XEXP (op1, 1);
2349 return simplify_gen_binary (PLUS, mode,
2350 simplify_gen_binary (MULT, mode,
2351 in1, in2),
2352 op0);
2355 /* Canonicalize (minus (neg A) (mult B C)) to
2356 (minus (mult (neg B) C) A). */
2357 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2358 && GET_CODE (op1) == MULT
2359 && GET_CODE (op0) == NEG)
2361 rtx in1, in2;
2363 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2364 in2 = XEXP (op1, 1);
2365 return simplify_gen_binary (MINUS, mode,
2366 simplify_gen_binary (MULT, mode,
2367 in1, in2),
2368 XEXP (op0, 0));
2371 /* If one of the operands is a PLUS or a MINUS, see if we can
2372 simplify this by the associative law. This will, for example,
2373 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2374 Don't use the associative law for floating point.
2375 The inaccuracy makes it nonassociative,
2376 and subtle programs can break if operations are associated. */
2378 if (INTEGRAL_MODE_P (mode)
2379 && (plus_minus_operand_p (op0)
2380 || plus_minus_operand_p (op1))
2381 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2382 return tem;
2383 break;
2385 case MULT:
2386 if (trueop1 == constm1_rtx)
2387 return simplify_gen_unary (NEG, mode, op0, mode);
2389 if (GET_CODE (op0) == NEG)
2391 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2392 /* If op1 is a MULT as well and simplify_unary_operation
2393 just moved the NEG to the second operand, simplify_gen_binary
2394 below could through simplify_associative_operation move
2395 the NEG around again and recurse endlessly. */
2396 if (temp
2397 && GET_CODE (op1) == MULT
2398 && GET_CODE (temp) == MULT
2399 && XEXP (op1, 0) == XEXP (temp, 0)
2400 && GET_CODE (XEXP (temp, 1)) == NEG
2401 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2402 temp = NULL_RTX;
2403 if (temp)
2404 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2406 if (GET_CODE (op1) == NEG)
2408 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2409 /* If op0 is a MULT as well and simplify_unary_operation
2410 just moved the NEG to the second operand, simplify_gen_binary
2411 below could through simplify_associative_operation move
2412 the NEG around again and recurse endlessly. */
2413 if (temp
2414 && GET_CODE (op0) == MULT
2415 && GET_CODE (temp) == MULT
2416 && XEXP (op0, 0) == XEXP (temp, 0)
2417 && GET_CODE (XEXP (temp, 1)) == NEG
2418 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2419 temp = NULL_RTX;
2420 if (temp)
2421 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2424 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2425 x is NaN, since x * 0 is then also NaN. Nor is it valid
2426 when the mode has signed zeros, since multiplying a negative
2427 number by 0 will give -0, not 0. */
2428 if (!HONOR_NANS (mode)
2429 && !HONOR_SIGNED_ZEROS (mode)
2430 && trueop1 == CONST0_RTX (mode)
2431 && ! side_effects_p (op0))
2432 return op1;
2434 /* In IEEE floating point, x*1 is not equivalent to x for
2435 signalling NaNs. */
2436 if (!HONOR_SNANS (mode)
2437 && trueop1 == CONST1_RTX (mode))
2438 return op0;
2440 /* Convert multiply by constant power of two into shift. */
2441 if (CONST_SCALAR_INT_P (trueop1))
2443 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2444 if (val >= 0)
2445 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2448 /* x*2 is x+x and x*(-1) is -x */
2449 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2450 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2451 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2452 && GET_MODE (op0) == mode)
2454 REAL_VALUE_TYPE d;
2455 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2457 if (REAL_VALUES_EQUAL (d, dconst2))
2458 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2460 if (!HONOR_SNANS (mode)
2461 && REAL_VALUES_EQUAL (d, dconstm1))
2462 return simplify_gen_unary (NEG, mode, op0, mode);
2465 /* Optimize -x * -x as x * x. */
2466 if (FLOAT_MODE_P (mode)
2467 && GET_CODE (op0) == NEG
2468 && GET_CODE (op1) == NEG
2469 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2470 && !side_effects_p (XEXP (op0, 0)))
2471 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2473 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2474 if (SCALAR_FLOAT_MODE_P (mode)
2475 && GET_CODE (op0) == ABS
2476 && GET_CODE (op1) == ABS
2477 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2478 && !side_effects_p (XEXP (op0, 0)))
2479 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2481 /* Reassociate multiplication, but for floating point MULTs
2482 only when the user specifies unsafe math optimizations. */
2483 if (! FLOAT_MODE_P (mode)
2484 || flag_unsafe_math_optimizations)
2486 tem = simplify_associative_operation (code, mode, op0, op1);
2487 if (tem)
2488 return tem;
2490 break;
2492 case IOR:
2493 if (trueop1 == CONST0_RTX (mode))
2494 return op0;
2495 if (INTEGRAL_MODE_P (mode)
2496 && trueop1 == CONSTM1_RTX (mode)
2497 && !side_effects_p (op0))
2498 return op1;
2499 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2500 return op0;
2501 /* A | (~A) -> -1 */
2502 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2503 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2504 && ! side_effects_p (op0)
2505 && SCALAR_INT_MODE_P (mode))
2506 return constm1_rtx;
2508 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2509 if (CONST_INT_P (op1)
2510 && HWI_COMPUTABLE_MODE_P (mode)
2511 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2512 && !side_effects_p (op0))
2513 return op1;
2515 /* Canonicalize (X & C1) | C2. */
2516 if (GET_CODE (op0) == AND
2517 && CONST_INT_P (trueop1)
2518 && CONST_INT_P (XEXP (op0, 1)))
2520 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2521 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2522 HOST_WIDE_INT c2 = INTVAL (trueop1);
2524 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2525 if ((c1 & c2) == c1
2526 && !side_effects_p (XEXP (op0, 0)))
2527 return trueop1;
2529 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2530 if (((c1|c2) & mask) == mask)
2531 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2533 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2534 if (((c1 & ~c2) & mask) != (c1 & mask))
2536 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2537 gen_int_mode (c1 & ~c2, mode));
2538 return simplify_gen_binary (IOR, mode, tem, op1);
2542 /* Convert (A & B) | A to A. */
2543 if (GET_CODE (op0) == AND
2544 && (rtx_equal_p (XEXP (op0, 0), op1)
2545 || rtx_equal_p (XEXP (op0, 1), op1))
2546 && ! side_effects_p (XEXP (op0, 0))
2547 && ! side_effects_p (XEXP (op0, 1)))
2548 return op1;
2550 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2551 mode size to (rotate A CX). */
2553 if (GET_CODE (op1) == ASHIFT
2554 || GET_CODE (op1) == SUBREG)
2556 opleft = op1;
2557 opright = op0;
2559 else
2561 opright = op1;
2562 opleft = op0;
2565 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2566 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2567 && CONST_INT_P (XEXP (opleft, 1))
2568 && CONST_INT_P (XEXP (opright, 1))
2569 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2570 == GET_MODE_PRECISION (mode)))
2571 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2573 /* Same, but for ashift that has been "simplified" to a wider mode
2574 by simplify_shift_const. */
2576 if (GET_CODE (opleft) == SUBREG
2577 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2578 && GET_CODE (opright) == LSHIFTRT
2579 && GET_CODE (XEXP (opright, 0)) == SUBREG
2580 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2581 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2582 && (GET_MODE_SIZE (GET_MODE (opleft))
2583 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2584 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2585 SUBREG_REG (XEXP (opright, 0)))
2586 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2587 && CONST_INT_P (XEXP (opright, 1))
2588 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2589 == GET_MODE_PRECISION (mode)))
2590 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2591 XEXP (SUBREG_REG (opleft), 1));
2593 /* If we have (ior (and (X C1) C2)), simplify this by making
2594 C1 as small as possible if C1 actually changes. */
2595 if (CONST_INT_P (op1)
2596 && (HWI_COMPUTABLE_MODE_P (mode)
2597 || INTVAL (op1) > 0)
2598 && GET_CODE (op0) == AND
2599 && CONST_INT_P (XEXP (op0, 1))
2600 && CONST_INT_P (op1)
2601 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2603 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2604 gen_int_mode (UINTVAL (XEXP (op0, 1))
2605 & ~UINTVAL (op1),
2606 mode));
2607 return simplify_gen_binary (IOR, mode, tmp, op1);
2610 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2611 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2612 the PLUS does not affect any of the bits in OP1: then we can do
2613 the IOR as a PLUS and we can associate. This is valid if OP1
2614 can be safely shifted left C bits. */
2615 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2616 && GET_CODE (XEXP (op0, 0)) == PLUS
2617 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2618 && CONST_INT_P (XEXP (op0, 1))
2619 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2621 int count = INTVAL (XEXP (op0, 1));
2622 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2624 if (mask >> count == INTVAL (trueop1)
2625 && trunc_int_for_mode (mask, mode) == mask
2626 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2627 return simplify_gen_binary (ASHIFTRT, mode,
2628 plus_constant (mode, XEXP (op0, 0),
2629 mask),
2630 XEXP (op0, 1));
2633 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2634 if (tem)
2635 return tem;
2637 tem = simplify_associative_operation (code, mode, op0, op1);
2638 if (tem)
2639 return tem;
2640 break;
2642 case XOR:
2643 if (trueop1 == CONST0_RTX (mode))
2644 return op0;
2645 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2646 return simplify_gen_unary (NOT, mode, op0, mode);
2647 if (rtx_equal_p (trueop0, trueop1)
2648 && ! side_effects_p (op0)
2649 && GET_MODE_CLASS (mode) != MODE_CC)
2650 return CONST0_RTX (mode);
2652 /* Canonicalize XOR of the most significant bit to PLUS. */
2653 if (CONST_SCALAR_INT_P (op1)
2654 && mode_signbit_p (mode, op1))
2655 return simplify_gen_binary (PLUS, mode, op0, op1);
2656 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2657 if (CONST_SCALAR_INT_P (op1)
2658 && GET_CODE (op0) == PLUS
2659 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2660 && mode_signbit_p (mode, XEXP (op0, 1)))
2661 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2662 simplify_gen_binary (XOR, mode, op1,
2663 XEXP (op0, 1)));
2665 /* If we are XORing two things that have no bits in common,
2666 convert them into an IOR. This helps to detect rotation encoded
2667 using those methods and possibly other simplifications. */
2669 if (HWI_COMPUTABLE_MODE_P (mode)
2670 && (nonzero_bits (op0, mode)
2671 & nonzero_bits (op1, mode)) == 0)
2672 return (simplify_gen_binary (IOR, mode, op0, op1));
2674 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2675 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2676 (NOT y). */
2678 int num_negated = 0;
2680 if (GET_CODE (op0) == NOT)
2681 num_negated++, op0 = XEXP (op0, 0);
2682 if (GET_CODE (op1) == NOT)
2683 num_negated++, op1 = XEXP (op1, 0);
2685 if (num_negated == 2)
2686 return simplify_gen_binary (XOR, mode, op0, op1);
2687 else if (num_negated == 1)
2688 return simplify_gen_unary (NOT, mode,
2689 simplify_gen_binary (XOR, mode, op0, op1),
2690 mode);
2693 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2694 correspond to a machine insn or result in further simplifications
2695 if B is a constant. */
2697 if (GET_CODE (op0) == AND
2698 && rtx_equal_p (XEXP (op0, 1), op1)
2699 && ! side_effects_p (op1))
2700 return simplify_gen_binary (AND, mode,
2701 simplify_gen_unary (NOT, mode,
2702 XEXP (op0, 0), mode),
2703 op1);
2705 else if (GET_CODE (op0) == AND
2706 && rtx_equal_p (XEXP (op0, 0), op1)
2707 && ! side_effects_p (op1))
2708 return simplify_gen_binary (AND, mode,
2709 simplify_gen_unary (NOT, mode,
2710 XEXP (op0, 1), mode),
2711 op1);
2713 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2714 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2715 out bits inverted twice and not set by C. Similarly, given
2716 (xor (and (xor A B) C) D), simplify without inverting C in
2717 the xor operand: (xor (and A C) (B&C)^D).
2719 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2720 && GET_CODE (XEXP (op0, 0)) == XOR
2721 && CONST_INT_P (op1)
2722 && CONST_INT_P (XEXP (op0, 1))
2723 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2725 enum rtx_code op = GET_CODE (op0);
2726 rtx a = XEXP (XEXP (op0, 0), 0);
2727 rtx b = XEXP (XEXP (op0, 0), 1);
2728 rtx c = XEXP (op0, 1);
2729 rtx d = op1;
2730 HOST_WIDE_INT bval = INTVAL (b);
2731 HOST_WIDE_INT cval = INTVAL (c);
2732 HOST_WIDE_INT dval = INTVAL (d);
2733 HOST_WIDE_INT xcval;
2735 if (op == IOR)
2736 xcval = ~cval;
2737 else
2738 xcval = cval;
2740 return simplify_gen_binary (XOR, mode,
2741 simplify_gen_binary (op, mode, a, c),
2742 gen_int_mode ((bval & xcval) ^ dval,
2743 mode));
2746 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2747 we can transform like this:
2748 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2749 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2750 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2751 Attempt a few simplifications when B and C are both constants. */
2752 if (GET_CODE (op0) == AND
2753 && CONST_INT_P (op1)
2754 && CONST_INT_P (XEXP (op0, 1)))
2756 rtx a = XEXP (op0, 0);
2757 rtx b = XEXP (op0, 1);
2758 rtx c = op1;
2759 HOST_WIDE_INT bval = INTVAL (b);
2760 HOST_WIDE_INT cval = INTVAL (c);
2762 /* Instead of computing ~A&C, we compute its negated value,
2763 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2764 optimize for sure. If it does not simplify, we still try
2765 to compute ~A&C below, but since that always allocates
2766 RTL, we don't try that before committing to returning a
2767 simplified expression. */
2768 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2769 GEN_INT (~cval));
2771 if ((~cval & bval) == 0)
2773 rtx na_c = NULL_RTX;
2774 if (n_na_c)
2775 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2776 else
2778 /* If ~A does not simplify, don't bother: we don't
2779 want to simplify 2 operations into 3, and if na_c
2780 were to simplify with na, n_na_c would have
2781 simplified as well. */
2782 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2783 if (na)
2784 na_c = simplify_gen_binary (AND, mode, na, c);
2787 /* Try to simplify ~A&C | ~B&C. */
2788 if (na_c != NULL_RTX)
2789 return simplify_gen_binary (IOR, mode, na_c,
2790 gen_int_mode (~bval & cval, mode));
2792 else
2794 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2795 if (n_na_c == CONSTM1_RTX (mode))
2797 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2798 gen_int_mode (~cval & bval,
2799 mode));
2800 return simplify_gen_binary (IOR, mode, a_nc_b,
2801 gen_int_mode (~bval & cval,
2802 mode));
2807 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2808 comparison if STORE_FLAG_VALUE is 1. */
2809 if (STORE_FLAG_VALUE == 1
2810 && trueop1 == const1_rtx
2811 && COMPARISON_P (op0)
2812 && (reversed = reversed_comparison (op0, mode)))
2813 return reversed;
2815 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2816 is (lt foo (const_int 0)), so we can perform the above
2817 simplification if STORE_FLAG_VALUE is 1. */
2819 if (STORE_FLAG_VALUE == 1
2820 && trueop1 == const1_rtx
2821 && GET_CODE (op0) == LSHIFTRT
2822 && CONST_INT_P (XEXP (op0, 1))
2823 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2824 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2826 /* (xor (comparison foo bar) (const_int sign-bit))
2827 when STORE_FLAG_VALUE is the sign bit. */
2828 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2829 && trueop1 == const_true_rtx
2830 && COMPARISON_P (op0)
2831 && (reversed = reversed_comparison (op0, mode)))
2832 return reversed;
2834 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2835 if (tem)
2836 return tem;
2838 tem = simplify_associative_operation (code, mode, op0, op1);
2839 if (tem)
2840 return tem;
2841 break;
2843 case AND:
2844 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2845 return trueop1;
2846 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2847 return op0;
2848 if (HWI_COMPUTABLE_MODE_P (mode))
2850 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2851 HOST_WIDE_INT nzop1;
2852 if (CONST_INT_P (trueop1))
2854 HOST_WIDE_INT val1 = INTVAL (trueop1);
2855 /* If we are turning off bits already known off in OP0, we need
2856 not do an AND. */
2857 if ((nzop0 & ~val1) == 0)
2858 return op0;
2860 nzop1 = nonzero_bits (trueop1, mode);
2861 /* If we are clearing all the nonzero bits, the result is zero. */
2862 if ((nzop1 & nzop0) == 0
2863 && !side_effects_p (op0) && !side_effects_p (op1))
2864 return CONST0_RTX (mode);
2866 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2867 && GET_MODE_CLASS (mode) != MODE_CC)
2868 return op0;
2869 /* A & (~A) -> 0 */
2870 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2871 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2872 && ! side_effects_p (op0)
2873 && GET_MODE_CLASS (mode) != MODE_CC)
2874 return CONST0_RTX (mode);
2876 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2877 there are no nonzero bits of C outside of X's mode. */
2878 if ((GET_CODE (op0) == SIGN_EXTEND
2879 || GET_CODE (op0) == ZERO_EXTEND)
2880 && CONST_INT_P (trueop1)
2881 && HWI_COMPUTABLE_MODE_P (mode)
2882 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2883 & UINTVAL (trueop1)) == 0)
2885 machine_mode imode = GET_MODE (XEXP (op0, 0));
2886 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2887 gen_int_mode (INTVAL (trueop1),
2888 imode));
2889 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2892 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2893 we might be able to further simplify the AND with X and potentially
2894 remove the truncation altogether. */
2895 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2897 rtx x = XEXP (op0, 0);
2898 machine_mode xmode = GET_MODE (x);
2899 tem = simplify_gen_binary (AND, xmode, x,
2900 gen_int_mode (INTVAL (trueop1), xmode));
2901 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2904 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2905 if (GET_CODE (op0) == IOR
2906 && CONST_INT_P (trueop1)
2907 && CONST_INT_P (XEXP (op0, 1)))
2909 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2910 return simplify_gen_binary (IOR, mode,
2911 simplify_gen_binary (AND, mode,
2912 XEXP (op0, 0), op1),
2913 gen_int_mode (tmp, mode));
2916 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2917 insn (and may simplify more). */
2918 if (GET_CODE (op0) == XOR
2919 && rtx_equal_p (XEXP (op0, 0), op1)
2920 && ! side_effects_p (op1))
2921 return simplify_gen_binary (AND, mode,
2922 simplify_gen_unary (NOT, mode,
2923 XEXP (op0, 1), mode),
2924 op1);
2926 if (GET_CODE (op0) == XOR
2927 && rtx_equal_p (XEXP (op0, 1), op1)
2928 && ! side_effects_p (op1))
2929 return simplify_gen_binary (AND, mode,
2930 simplify_gen_unary (NOT, mode,
2931 XEXP (op0, 0), mode),
2932 op1);
2934 /* Similarly for (~(A ^ B)) & A. */
2935 if (GET_CODE (op0) == NOT
2936 && GET_CODE (XEXP (op0, 0)) == XOR
2937 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2938 && ! side_effects_p (op1))
2939 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2941 if (GET_CODE (op0) == NOT
2942 && GET_CODE (XEXP (op0, 0)) == XOR
2943 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2944 && ! side_effects_p (op1))
2945 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2947 /* Convert (A | B) & A to A. */
2948 if (GET_CODE (op0) == IOR
2949 && (rtx_equal_p (XEXP (op0, 0), op1)
2950 || rtx_equal_p (XEXP (op0, 1), op1))
2951 && ! side_effects_p (XEXP (op0, 0))
2952 && ! side_effects_p (XEXP (op0, 1)))
2953 return op1;
2955 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2956 ((A & N) + B) & M -> (A + B) & M
2957 Similarly if (N & M) == 0,
2958 ((A | N) + B) & M -> (A + B) & M
2959 and for - instead of + and/or ^ instead of |.
2960 Also, if (N & M) == 0, then
2961 (A +- N) & M -> A & M. */
2962 if (CONST_INT_P (trueop1)
2963 && HWI_COMPUTABLE_MODE_P (mode)
2964 && ~UINTVAL (trueop1)
2965 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2966 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2968 rtx pmop[2];
2969 int which;
2971 pmop[0] = XEXP (op0, 0);
2972 pmop[1] = XEXP (op0, 1);
2974 if (CONST_INT_P (pmop[1])
2975 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2976 return simplify_gen_binary (AND, mode, pmop[0], op1);
2978 for (which = 0; which < 2; which++)
2980 tem = pmop[which];
2981 switch (GET_CODE (tem))
2983 case AND:
2984 if (CONST_INT_P (XEXP (tem, 1))
2985 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2986 == UINTVAL (trueop1))
2987 pmop[which] = XEXP (tem, 0);
2988 break;
2989 case IOR:
2990 case XOR:
2991 if (CONST_INT_P (XEXP (tem, 1))
2992 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2993 pmop[which] = XEXP (tem, 0);
2994 break;
2995 default:
2996 break;
3000 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3002 tem = simplify_gen_binary (GET_CODE (op0), mode,
3003 pmop[0], pmop[1]);
3004 return simplify_gen_binary (code, mode, tem, op1);
3008 /* (and X (ior (not X) Y) -> (and X Y) */
3009 if (GET_CODE (op1) == IOR
3010 && GET_CODE (XEXP (op1, 0)) == NOT
3011 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3012 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3014 /* (and (ior (not X) Y) X) -> (and X Y) */
3015 if (GET_CODE (op0) == IOR
3016 && GET_CODE (XEXP (op0, 0)) == NOT
3017 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3018 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3020 /* (and X (ior Y (not X)) -> (and X Y) */
3021 if (GET_CODE (op1) == IOR
3022 && GET_CODE (XEXP (op1, 1)) == NOT
3023 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3024 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3026 /* (and (ior Y (not X)) X) -> (and X Y) */
3027 if (GET_CODE (op0) == IOR
3028 && GET_CODE (XEXP (op0, 1)) == NOT
3029 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3030 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3032 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3033 if (tem)
3034 return tem;
3036 tem = simplify_associative_operation (code, mode, op0, op1);
3037 if (tem)
3038 return tem;
3039 break;
3041 case UDIV:
3042 /* 0/x is 0 (or x&0 if x has side-effects). */
3043 if (trueop0 == CONST0_RTX (mode))
3045 if (side_effects_p (op1))
3046 return simplify_gen_binary (AND, mode, op1, trueop0);
3047 return trueop0;
3049 /* x/1 is x. */
3050 if (trueop1 == CONST1_RTX (mode))
3052 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3053 if (tem)
3054 return tem;
3056 /* Convert divide by power of two into shift. */
3057 if (CONST_INT_P (trueop1)
3058 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3059 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3060 break;
3062 case DIV:
3063 /* Handle floating point and integers separately. */
3064 if (SCALAR_FLOAT_MODE_P (mode))
3066 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3067 safe for modes with NaNs, since 0.0 / 0.0 will then be
3068 NaN rather than 0.0. Nor is it safe for modes with signed
3069 zeros, since dividing 0 by a negative number gives -0.0 */
3070 if (trueop0 == CONST0_RTX (mode)
3071 && !HONOR_NANS (mode)
3072 && !HONOR_SIGNED_ZEROS (mode)
3073 && ! side_effects_p (op1))
3074 return op0;
3075 /* x/1.0 is x. */
3076 if (trueop1 == CONST1_RTX (mode)
3077 && !HONOR_SNANS (mode))
3078 return op0;
3080 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3081 && trueop1 != CONST0_RTX (mode))
3083 REAL_VALUE_TYPE d;
3084 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3086 /* x/-1.0 is -x. */
3087 if (REAL_VALUES_EQUAL (d, dconstm1)
3088 && !HONOR_SNANS (mode))
3089 return simplify_gen_unary (NEG, mode, op0, mode);
3091 /* Change FP division by a constant into multiplication.
3092 Only do this with -freciprocal-math. */
3093 if (flag_reciprocal_math
3094 && !REAL_VALUES_EQUAL (d, dconst0))
3096 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3097 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3098 return simplify_gen_binary (MULT, mode, op0, tem);
3102 else if (SCALAR_INT_MODE_P (mode))
3104 /* 0/x is 0 (or x&0 if x has side-effects). */
3105 if (trueop0 == CONST0_RTX (mode)
3106 && !cfun->can_throw_non_call_exceptions)
3108 if (side_effects_p (op1))
3109 return simplify_gen_binary (AND, mode, op1, trueop0);
3110 return trueop0;
3112 /* x/1 is x. */
3113 if (trueop1 == CONST1_RTX (mode))
3115 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3116 if (tem)
3117 return tem;
3119 /* x/-1 is -x. */
3120 if (trueop1 == constm1_rtx)
3122 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3123 if (x)
3124 return simplify_gen_unary (NEG, mode, x, mode);
3127 break;
3129 case UMOD:
3130 /* 0%x is 0 (or x&0 if x has side-effects). */
3131 if (trueop0 == CONST0_RTX (mode))
3133 if (side_effects_p (op1))
3134 return simplify_gen_binary (AND, mode, op1, trueop0);
3135 return trueop0;
3137 /* x%1 is 0 (of x&0 if x has side-effects). */
3138 if (trueop1 == CONST1_RTX (mode))
3140 if (side_effects_p (op0))
3141 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3142 return CONST0_RTX (mode);
3144 /* Implement modulus by power of two as AND. */
3145 if (CONST_INT_P (trueop1)
3146 && exact_log2 (UINTVAL (trueop1)) > 0)
3147 return simplify_gen_binary (AND, mode, op0,
3148 gen_int_mode (INTVAL (op1) - 1, mode));
3149 break;
3151 case MOD:
3152 /* 0%x is 0 (or x&0 if x has side-effects). */
3153 if (trueop0 == CONST0_RTX (mode))
3155 if (side_effects_p (op1))
3156 return simplify_gen_binary (AND, mode, op1, trueop0);
3157 return trueop0;
3159 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3160 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3162 if (side_effects_p (op0))
3163 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3164 return CONST0_RTX (mode);
3166 break;
3168 case ROTATERT:
3169 case ROTATE:
3170 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3171 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3172 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3173 amount instead. */
3174 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3175 if (CONST_INT_P (trueop1)
3176 && IN_RANGE (INTVAL (trueop1),
3177 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3178 GET_MODE_PRECISION (mode) - 1))
3179 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3180 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3181 - INTVAL (trueop1)));
3182 #endif
3183 /* FALLTHRU */
3184 case ASHIFTRT:
3185 if (trueop1 == CONST0_RTX (mode))
3186 return op0;
3187 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3188 return op0;
3189 /* Rotating ~0 always results in ~0. */
3190 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3191 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3192 && ! side_effects_p (op1))
3193 return op0;
3194 /* Given:
3195 scalar modes M1, M2
3196 scalar constants c1, c2
3197 size (M2) > size (M1)
3198 c1 == size (M2) - size (M1)
3199 optimize:
3200 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3201 <low_part>)
3202 (const_int <c2>))
3204 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3205 <low_part>). */
3206 if (code == ASHIFTRT
3207 && !VECTOR_MODE_P (mode)
3208 && SUBREG_P (op0)
3209 && CONST_INT_P (op1)
3210 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3211 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3212 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3213 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3214 > GET_MODE_BITSIZE (mode))
3215 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3216 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3217 - GET_MODE_BITSIZE (mode)))
3218 && subreg_lowpart_p (op0))
3220 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3221 + INTVAL (op1));
3222 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3223 tmp = simplify_gen_binary (ASHIFTRT,
3224 GET_MODE (SUBREG_REG (op0)),
3225 XEXP (SUBREG_REG (op0), 0),
3226 tmp);
3227 return lowpart_subreg (mode, tmp, inner_mode);
3229 canonicalize_shift:
3230 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3232 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3233 if (val != INTVAL (op1))
3234 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3236 break;
3238 case ASHIFT:
3239 case SS_ASHIFT:
3240 case US_ASHIFT:
3241 if (trueop1 == CONST0_RTX (mode))
3242 return op0;
3243 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3244 return op0;
3245 goto canonicalize_shift;
3247 case LSHIFTRT:
3248 if (trueop1 == CONST0_RTX (mode))
3249 return op0;
3250 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3251 return op0;
3252 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3253 if (GET_CODE (op0) == CLZ
3254 && CONST_INT_P (trueop1)
3255 && STORE_FLAG_VALUE == 1
3256 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3258 machine_mode imode = GET_MODE (XEXP (op0, 0));
3259 unsigned HOST_WIDE_INT zero_val = 0;
3261 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3262 && zero_val == GET_MODE_PRECISION (imode)
3263 && INTVAL (trueop1) == exact_log2 (zero_val))
3264 return simplify_gen_relational (EQ, mode, imode,
3265 XEXP (op0, 0), const0_rtx);
3267 goto canonicalize_shift;
3269 case SMIN:
3270 if (width <= HOST_BITS_PER_WIDE_INT
3271 && mode_signbit_p (mode, trueop1)
3272 && ! side_effects_p (op0))
3273 return op1;
3274 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3275 return op0;
3276 tem = simplify_associative_operation (code, mode, op0, op1);
3277 if (tem)
3278 return tem;
3279 break;
3281 case SMAX:
3282 if (width <= HOST_BITS_PER_WIDE_INT
3283 && CONST_INT_P (trueop1)
3284 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3285 && ! side_effects_p (op0))
3286 return op1;
3287 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3288 return op0;
3289 tem = simplify_associative_operation (code, mode, op0, op1);
3290 if (tem)
3291 return tem;
3292 break;
3294 case UMIN:
3295 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3296 return op1;
3297 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3298 return op0;
3299 tem = simplify_associative_operation (code, mode, op0, op1);
3300 if (tem)
3301 return tem;
3302 break;
3304 case UMAX:
3305 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3306 return op1;
3307 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3308 return op0;
3309 tem = simplify_associative_operation (code, mode, op0, op1);
3310 if (tem)
3311 return tem;
3312 break;
3314 case SS_PLUS:
3315 case US_PLUS:
3316 case SS_MINUS:
3317 case US_MINUS:
3318 case SS_MULT:
3319 case US_MULT:
3320 case SS_DIV:
3321 case US_DIV:
3322 /* ??? There are simplifications that can be done. */
3323 return 0;
3325 case VEC_SELECT:
3326 if (!VECTOR_MODE_P (mode))
3328 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3329 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3330 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3331 gcc_assert (XVECLEN (trueop1, 0) == 1);
3332 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3334 if (GET_CODE (trueop0) == CONST_VECTOR)
3335 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3336 (trueop1, 0, 0)));
3338 /* Extract a scalar element from a nested VEC_SELECT expression
3339 (with optional nested VEC_CONCAT expression). Some targets
3340 (i386) extract scalar element from a vector using chain of
3341 nested VEC_SELECT expressions. When input operand is a memory
3342 operand, this operation can be simplified to a simple scalar
3343 load from an offseted memory address. */
3344 if (GET_CODE (trueop0) == VEC_SELECT)
3346 rtx op0 = XEXP (trueop0, 0);
3347 rtx op1 = XEXP (trueop0, 1);
3349 machine_mode opmode = GET_MODE (op0);
3350 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3351 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3353 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3354 int elem;
3356 rtvec vec;
3357 rtx tmp_op, tmp;
3359 gcc_assert (GET_CODE (op1) == PARALLEL);
3360 gcc_assert (i < n_elts);
3362 /* Select element, pointed by nested selector. */
3363 elem = INTVAL (XVECEXP (op1, 0, i));
3365 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3366 if (GET_CODE (op0) == VEC_CONCAT)
3368 rtx op00 = XEXP (op0, 0);
3369 rtx op01 = XEXP (op0, 1);
3371 machine_mode mode00, mode01;
3372 int n_elts00, n_elts01;
3374 mode00 = GET_MODE (op00);
3375 mode01 = GET_MODE (op01);
3377 /* Find out number of elements of each operand. */
3378 if (VECTOR_MODE_P (mode00))
3380 elt_size = GET_MODE_UNIT_SIZE (mode00);
3381 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3383 else
3384 n_elts00 = 1;
3386 if (VECTOR_MODE_P (mode01))
3388 elt_size = GET_MODE_UNIT_SIZE (mode01);
3389 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3391 else
3392 n_elts01 = 1;
3394 gcc_assert (n_elts == n_elts00 + n_elts01);
3396 /* Select correct operand of VEC_CONCAT
3397 and adjust selector. */
3398 if (elem < n_elts01)
3399 tmp_op = op00;
3400 else
3402 tmp_op = op01;
3403 elem -= n_elts00;
3406 else
3407 tmp_op = op0;
3409 vec = rtvec_alloc (1);
3410 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3412 tmp = gen_rtx_fmt_ee (code, mode,
3413 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3414 return tmp;
3416 if (GET_CODE (trueop0) == VEC_DUPLICATE
3417 && GET_MODE (XEXP (trueop0, 0)) == mode)
3418 return XEXP (trueop0, 0);
3420 else
3422 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3423 gcc_assert (GET_MODE_INNER (mode)
3424 == GET_MODE_INNER (GET_MODE (trueop0)));
3425 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3427 if (GET_CODE (trueop0) == CONST_VECTOR)
3429 int elt_size = GET_MODE_UNIT_SIZE (mode);
3430 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3431 rtvec v = rtvec_alloc (n_elts);
3432 unsigned int i;
3434 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3435 for (i = 0; i < n_elts; i++)
3437 rtx x = XVECEXP (trueop1, 0, i);
3439 gcc_assert (CONST_INT_P (x));
3440 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3441 INTVAL (x));
3444 return gen_rtx_CONST_VECTOR (mode, v);
3447 /* Recognize the identity. */
3448 if (GET_MODE (trueop0) == mode)
3450 bool maybe_ident = true;
3451 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3453 rtx j = XVECEXP (trueop1, 0, i);
3454 if (!CONST_INT_P (j) || INTVAL (j) != i)
3456 maybe_ident = false;
3457 break;
3460 if (maybe_ident)
3461 return trueop0;
3464 /* If we build {a,b} then permute it, build the result directly. */
3465 if (XVECLEN (trueop1, 0) == 2
3466 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3467 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3468 && GET_CODE (trueop0) == VEC_CONCAT
3469 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3470 && GET_MODE (XEXP (trueop0, 0)) == mode
3471 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3472 && GET_MODE (XEXP (trueop0, 1)) == mode)
3474 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3475 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3476 rtx subop0, subop1;
3478 gcc_assert (i0 < 4 && i1 < 4);
3479 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3480 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3482 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3485 if (XVECLEN (trueop1, 0) == 2
3486 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3487 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3488 && GET_CODE (trueop0) == VEC_CONCAT
3489 && GET_MODE (trueop0) == mode)
3491 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3492 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3493 rtx subop0, subop1;
3495 gcc_assert (i0 < 2 && i1 < 2);
3496 subop0 = XEXP (trueop0, i0);
3497 subop1 = XEXP (trueop0, i1);
3499 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3502 /* If we select one half of a vec_concat, return that. */
3503 if (GET_CODE (trueop0) == VEC_CONCAT
3504 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3506 rtx subop0 = XEXP (trueop0, 0);
3507 rtx subop1 = XEXP (trueop0, 1);
3508 machine_mode mode0 = GET_MODE (subop0);
3509 machine_mode mode1 = GET_MODE (subop1);
3510 int li = GET_MODE_UNIT_SIZE (mode0);
3511 int l0 = GET_MODE_SIZE (mode0) / li;
3512 int l1 = GET_MODE_SIZE (mode1) / li;
3513 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3514 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3516 bool success = true;
3517 for (int i = 1; i < l0; ++i)
3519 rtx j = XVECEXP (trueop1, 0, i);
3520 if (!CONST_INT_P (j) || INTVAL (j) != i)
3522 success = false;
3523 break;
3526 if (success)
3527 return subop0;
3529 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3531 bool success = true;
3532 for (int i = 1; i < l1; ++i)
3534 rtx j = XVECEXP (trueop1, 0, i);
3535 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3537 success = false;
3538 break;
3541 if (success)
3542 return subop1;
3547 if (XVECLEN (trueop1, 0) == 1
3548 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3549 && GET_CODE (trueop0) == VEC_CONCAT)
3551 rtx vec = trueop0;
3552 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3554 /* Try to find the element in the VEC_CONCAT. */
3555 while (GET_MODE (vec) != mode
3556 && GET_CODE (vec) == VEC_CONCAT)
3558 HOST_WIDE_INT vec_size;
3560 if (CONST_INT_P (XEXP (vec, 0)))
3562 /* vec_concat of two const_ints doesn't make sense with
3563 respect to modes. */
3564 if (CONST_INT_P (XEXP (vec, 1)))
3565 return 0;
3567 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3568 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3570 else
3571 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3573 if (offset < vec_size)
3574 vec = XEXP (vec, 0);
3575 else
3577 offset -= vec_size;
3578 vec = XEXP (vec, 1);
3580 vec = avoid_constant_pool_reference (vec);
3583 if (GET_MODE (vec) == mode)
3584 return vec;
3587 /* If we select elements in a vec_merge that all come from the same
3588 operand, select from that operand directly. */
3589 if (GET_CODE (op0) == VEC_MERGE)
3591 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3592 if (CONST_INT_P (trueop02))
3594 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3595 bool all_operand0 = true;
3596 bool all_operand1 = true;
3597 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3599 rtx j = XVECEXP (trueop1, 0, i);
3600 if (sel & (1 << UINTVAL (j)))
3601 all_operand1 = false;
3602 else
3603 all_operand0 = false;
3605 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3606 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3607 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3608 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3612 /* If we have two nested selects that are inverses of each
3613 other, replace them with the source operand. */
3614 if (GET_CODE (trueop0) == VEC_SELECT
3615 && GET_MODE (XEXP (trueop0, 0)) == mode)
3617 rtx op0_subop1 = XEXP (trueop0, 1);
3618 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3619 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3621 /* Apply the outer ordering vector to the inner one. (The inner
3622 ordering vector is expressly permitted to be of a different
3623 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3624 then the two VEC_SELECTs cancel. */
3625 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3627 rtx x = XVECEXP (trueop1, 0, i);
3628 if (!CONST_INT_P (x))
3629 return 0;
3630 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3631 if (!CONST_INT_P (y) || i != INTVAL (y))
3632 return 0;
3634 return XEXP (trueop0, 0);
3637 return 0;
3638 case VEC_CONCAT:
3640 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3641 ? GET_MODE (trueop0)
3642 : GET_MODE_INNER (mode));
3643 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3644 ? GET_MODE (trueop1)
3645 : GET_MODE_INNER (mode));
3647 gcc_assert (VECTOR_MODE_P (mode));
3648 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3649 == GET_MODE_SIZE (mode));
3651 if (VECTOR_MODE_P (op0_mode))
3652 gcc_assert (GET_MODE_INNER (mode)
3653 == GET_MODE_INNER (op0_mode));
3654 else
3655 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3657 if (VECTOR_MODE_P (op1_mode))
3658 gcc_assert (GET_MODE_INNER (mode)
3659 == GET_MODE_INNER (op1_mode));
3660 else
3661 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3663 if ((GET_CODE (trueop0) == CONST_VECTOR
3664 || CONST_SCALAR_INT_P (trueop0)
3665 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3666 && (GET_CODE (trueop1) == CONST_VECTOR
3667 || CONST_SCALAR_INT_P (trueop1)
3668 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3670 int elt_size = GET_MODE_UNIT_SIZE (mode);
3671 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3672 rtvec v = rtvec_alloc (n_elts);
3673 unsigned int i;
3674 unsigned in_n_elts = 1;
3676 if (VECTOR_MODE_P (op0_mode))
3677 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3678 for (i = 0; i < n_elts; i++)
3680 if (i < in_n_elts)
3682 if (!VECTOR_MODE_P (op0_mode))
3683 RTVEC_ELT (v, i) = trueop0;
3684 else
3685 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3687 else
3689 if (!VECTOR_MODE_P (op1_mode))
3690 RTVEC_ELT (v, i) = trueop1;
3691 else
3692 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3693 i - in_n_elts);
3697 return gen_rtx_CONST_VECTOR (mode, v);
3700 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3701 Restrict the transformation to avoid generating a VEC_SELECT with a
3702 mode unrelated to its operand. */
3703 if (GET_CODE (trueop0) == VEC_SELECT
3704 && GET_CODE (trueop1) == VEC_SELECT
3705 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3706 && GET_MODE (XEXP (trueop0, 0)) == mode)
3708 rtx par0 = XEXP (trueop0, 1);
3709 rtx par1 = XEXP (trueop1, 1);
3710 int len0 = XVECLEN (par0, 0);
3711 int len1 = XVECLEN (par1, 0);
3712 rtvec vec = rtvec_alloc (len0 + len1);
3713 for (int i = 0; i < len0; i++)
3714 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3715 for (int i = 0; i < len1; i++)
3716 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3717 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3718 gen_rtx_PARALLEL (VOIDmode, vec));
3721 return 0;
3723 default:
3724 gcc_unreachable ();
3727 return 0;
3731 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3732 rtx op0, rtx op1)
3734 unsigned int width = GET_MODE_PRECISION (mode);
3736 if (VECTOR_MODE_P (mode)
3737 && code != VEC_CONCAT
3738 && GET_CODE (op0) == CONST_VECTOR
3739 && GET_CODE (op1) == CONST_VECTOR)
3741 unsigned n_elts = GET_MODE_NUNITS (mode);
3742 machine_mode op0mode = GET_MODE (op0);
3743 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3744 machine_mode op1mode = GET_MODE (op1);
3745 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3746 rtvec v = rtvec_alloc (n_elts);
3747 unsigned int i;
3749 gcc_assert (op0_n_elts == n_elts);
3750 gcc_assert (op1_n_elts == n_elts);
3751 for (i = 0; i < n_elts; i++)
3753 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3754 CONST_VECTOR_ELT (op0, i),
3755 CONST_VECTOR_ELT (op1, i));
3756 if (!x)
3757 return 0;
3758 RTVEC_ELT (v, i) = x;
3761 return gen_rtx_CONST_VECTOR (mode, v);
3764 if (VECTOR_MODE_P (mode)
3765 && code == VEC_CONCAT
3766 && (CONST_SCALAR_INT_P (op0)
3767 || GET_CODE (op0) == CONST_FIXED
3768 || CONST_DOUBLE_AS_FLOAT_P (op0))
3769 && (CONST_SCALAR_INT_P (op1)
3770 || CONST_DOUBLE_AS_FLOAT_P (op1)
3771 || GET_CODE (op1) == CONST_FIXED))
3773 unsigned n_elts = GET_MODE_NUNITS (mode);
3774 rtvec v = rtvec_alloc (n_elts);
3776 gcc_assert (n_elts >= 2);
3777 if (n_elts == 2)
3779 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3780 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3782 RTVEC_ELT (v, 0) = op0;
3783 RTVEC_ELT (v, 1) = op1;
3785 else
3787 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3788 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3789 unsigned i;
3791 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3792 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3793 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3795 for (i = 0; i < op0_n_elts; ++i)
3796 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3797 for (i = 0; i < op1_n_elts; ++i)
3798 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3801 return gen_rtx_CONST_VECTOR (mode, v);
3804 if (SCALAR_FLOAT_MODE_P (mode)
3805 && CONST_DOUBLE_AS_FLOAT_P (op0)
3806 && CONST_DOUBLE_AS_FLOAT_P (op1)
3807 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3809 if (code == AND
3810 || code == IOR
3811 || code == XOR)
3813 long tmp0[4];
3814 long tmp1[4];
3815 REAL_VALUE_TYPE r;
3816 int i;
3818 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3819 GET_MODE (op0));
3820 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3821 GET_MODE (op1));
3822 for (i = 0; i < 4; i++)
3824 switch (code)
3826 case AND:
3827 tmp0[i] &= tmp1[i];
3828 break;
3829 case IOR:
3830 tmp0[i] |= tmp1[i];
3831 break;
3832 case XOR:
3833 tmp0[i] ^= tmp1[i];
3834 break;
3835 default:
3836 gcc_unreachable ();
3839 real_from_target (&r, tmp0, mode);
3840 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3842 else
3844 REAL_VALUE_TYPE f0, f1, value, result;
3845 bool inexact;
3847 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3848 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3849 real_convert (&f0, mode, &f0);
3850 real_convert (&f1, mode, &f1);
3852 if (HONOR_SNANS (mode)
3853 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3854 return 0;
3856 if (code == DIV
3857 && REAL_VALUES_EQUAL (f1, dconst0)
3858 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3859 return 0;
3861 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3862 && flag_trapping_math
3863 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3865 int s0 = REAL_VALUE_NEGATIVE (f0);
3866 int s1 = REAL_VALUE_NEGATIVE (f1);
3868 switch (code)
3870 case PLUS:
3871 /* Inf + -Inf = NaN plus exception. */
3872 if (s0 != s1)
3873 return 0;
3874 break;
3875 case MINUS:
3876 /* Inf - Inf = NaN plus exception. */
3877 if (s0 == s1)
3878 return 0;
3879 break;
3880 case DIV:
3881 /* Inf / Inf = NaN plus exception. */
3882 return 0;
3883 default:
3884 break;
3888 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3889 && flag_trapping_math
3890 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3891 || (REAL_VALUE_ISINF (f1)
3892 && REAL_VALUES_EQUAL (f0, dconst0))))
3893 /* Inf * 0 = NaN plus exception. */
3894 return 0;
3896 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3897 &f0, &f1);
3898 real_convert (&result, mode, &value);
3900 /* Don't constant fold this floating point operation if
3901 the result has overflowed and flag_trapping_math. */
3903 if (flag_trapping_math
3904 && MODE_HAS_INFINITIES (mode)
3905 && REAL_VALUE_ISINF (result)
3906 && !REAL_VALUE_ISINF (f0)
3907 && !REAL_VALUE_ISINF (f1))
3908 /* Overflow plus exception. */
3909 return 0;
3911 /* Don't constant fold this floating point operation if the
3912 result may dependent upon the run-time rounding mode and
3913 flag_rounding_math is set, or if GCC's software emulation
3914 is unable to accurately represent the result. */
3916 if ((flag_rounding_math
3917 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3918 && (inexact || !real_identical (&result, &value)))
3919 return NULL_RTX;
3921 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3925 /* We can fold some multi-word operations. */
3926 if ((GET_MODE_CLASS (mode) == MODE_INT
3927 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3928 && CONST_SCALAR_INT_P (op0)
3929 && CONST_SCALAR_INT_P (op1))
3931 wide_int result;
3932 bool overflow;
3933 rtx_mode_t pop0 = std::make_pair (op0, mode);
3934 rtx_mode_t pop1 = std::make_pair (op1, mode);
3936 #if TARGET_SUPPORTS_WIDE_INT == 0
3937 /* This assert keeps the simplification from producing a result
3938 that cannot be represented in a CONST_DOUBLE but a lot of
3939 upstream callers expect that this function never fails to
3940 simplify something and so you if you added this to the test
3941 above the code would die later anyway. If this assert
3942 happens, you just need to make the port support wide int. */
3943 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3944 #endif
3945 switch (code)
3947 case MINUS:
3948 result = wi::sub (pop0, pop1);
3949 break;
3951 case PLUS:
3952 result = wi::add (pop0, pop1);
3953 break;
3955 case MULT:
3956 result = wi::mul (pop0, pop1);
3957 break;
3959 case DIV:
3960 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3961 if (overflow)
3962 return NULL_RTX;
3963 break;
3965 case MOD:
3966 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3967 if (overflow)
3968 return NULL_RTX;
3969 break;
3971 case UDIV:
3972 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3973 if (overflow)
3974 return NULL_RTX;
3975 break;
3977 case UMOD:
3978 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3979 if (overflow)
3980 return NULL_RTX;
3981 break;
3983 case AND:
3984 result = wi::bit_and (pop0, pop1);
3985 break;
3987 case IOR:
3988 result = wi::bit_or (pop0, pop1);
3989 break;
3991 case XOR:
3992 result = wi::bit_xor (pop0, pop1);
3993 break;
3995 case SMIN:
3996 result = wi::smin (pop0, pop1);
3997 break;
3999 case SMAX:
4000 result = wi::smax (pop0, pop1);
4001 break;
4003 case UMIN:
4004 result = wi::umin (pop0, pop1);
4005 break;
4007 case UMAX:
4008 result = wi::umax (pop0, pop1);
4009 break;
4011 case LSHIFTRT:
4012 case ASHIFTRT:
4013 case ASHIFT:
4015 wide_int wop1 = pop1;
4016 if (SHIFT_COUNT_TRUNCATED)
4017 wop1 = wi::umod_trunc (wop1, width);
4018 else if (wi::geu_p (wop1, width))
4019 return NULL_RTX;
4021 switch (code)
4023 case LSHIFTRT:
4024 result = wi::lrshift (pop0, wop1);
4025 break;
4027 case ASHIFTRT:
4028 result = wi::arshift (pop0, wop1);
4029 break;
4031 case ASHIFT:
4032 result = wi::lshift (pop0, wop1);
4033 break;
4035 default:
4036 gcc_unreachable ();
4038 break;
4040 case ROTATE:
4041 case ROTATERT:
4043 if (wi::neg_p (pop1))
4044 return NULL_RTX;
4046 switch (code)
4048 case ROTATE:
4049 result = wi::lrotate (pop0, pop1);
4050 break;
4052 case ROTATERT:
4053 result = wi::rrotate (pop0, pop1);
4054 break;
4056 default:
4057 gcc_unreachable ();
4059 break;
4061 default:
4062 return NULL_RTX;
4064 return immed_wide_int_const (result, mode);
4067 return NULL_RTX;
4072 /* Return a positive integer if X should sort after Y. The value
4073 returned is 1 if and only if X and Y are both regs. */
4075 static int
4076 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4078 int result;
4080 result = (commutative_operand_precedence (y)
4081 - commutative_operand_precedence (x));
4082 if (result)
4083 return result + result;
4085 /* Group together equal REGs to do more simplification. */
4086 if (REG_P (x) && REG_P (y))
4087 return REGNO (x) > REGNO (y);
4089 return 0;
4092 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4093 operands may be another PLUS or MINUS.
4095 Rather than test for specific case, we do this by a brute-force method
4096 and do all possible simplifications until no more changes occur. Then
4097 we rebuild the operation.
4099 May return NULL_RTX when no changes were made. */
4101 static rtx
4102 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4103 rtx op1)
4105 struct simplify_plus_minus_op_data
4107 rtx op;
4108 short neg;
4109 } ops[16];
4110 rtx result, tem;
4111 int n_ops = 2;
4112 int changed, n_constants, canonicalized = 0;
4113 int i, j;
4115 memset (ops, 0, sizeof ops);
4117 /* Set up the two operands and then expand them until nothing has been
4118 changed. If we run out of room in our array, give up; this should
4119 almost never happen. */
4121 ops[0].op = op0;
4122 ops[0].neg = 0;
4123 ops[1].op = op1;
4124 ops[1].neg = (code == MINUS);
4128 changed = 0;
4129 n_constants = 0;
4131 for (i = 0; i < n_ops; i++)
4133 rtx this_op = ops[i].op;
4134 int this_neg = ops[i].neg;
4135 enum rtx_code this_code = GET_CODE (this_op);
4137 switch (this_code)
4139 case PLUS:
4140 case MINUS:
4141 if (n_ops == ARRAY_SIZE (ops))
4142 return NULL_RTX;
4144 ops[n_ops].op = XEXP (this_op, 1);
4145 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4146 n_ops++;
4148 ops[i].op = XEXP (this_op, 0);
4149 changed = 1;
4150 /* If this operand was negated then we will potentially
4151 canonicalize the expression. Similarly if we don't
4152 place the operands adjacent we're re-ordering the
4153 expression and thus might be performing a
4154 canonicalization. Ignore register re-ordering.
4155 ??? It might be better to shuffle the ops array here,
4156 but then (plus (plus (A, B), plus (C, D))) wouldn't
4157 be seen as non-canonical. */
4158 if (this_neg
4159 || (i != n_ops - 2
4160 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4161 canonicalized = 1;
4162 break;
4164 case NEG:
4165 ops[i].op = XEXP (this_op, 0);
4166 ops[i].neg = ! this_neg;
4167 changed = 1;
4168 canonicalized = 1;
4169 break;
4171 case CONST:
4172 if (n_ops != ARRAY_SIZE (ops)
4173 && GET_CODE (XEXP (this_op, 0)) == PLUS
4174 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4175 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4177 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4178 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4179 ops[n_ops].neg = this_neg;
4180 n_ops++;
4181 changed = 1;
4182 canonicalized = 1;
4184 break;
4186 case NOT:
4187 /* ~a -> (-a - 1) */
4188 if (n_ops != ARRAY_SIZE (ops))
4190 ops[n_ops].op = CONSTM1_RTX (mode);
4191 ops[n_ops++].neg = this_neg;
4192 ops[i].op = XEXP (this_op, 0);
4193 ops[i].neg = !this_neg;
4194 changed = 1;
4195 canonicalized = 1;
4197 break;
4199 case CONST_INT:
4200 n_constants++;
4201 if (this_neg)
4203 ops[i].op = neg_const_int (mode, this_op);
4204 ops[i].neg = 0;
4205 changed = 1;
4206 canonicalized = 1;
4208 break;
4210 default:
4211 break;
4215 while (changed);
4217 if (n_constants > 1)
4218 canonicalized = 1;
4220 gcc_assert (n_ops >= 2);
4222 /* If we only have two operands, we can avoid the loops. */
4223 if (n_ops == 2)
4225 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4226 rtx lhs, rhs;
4228 /* Get the two operands. Be careful with the order, especially for
4229 the cases where code == MINUS. */
4230 if (ops[0].neg && ops[1].neg)
4232 lhs = gen_rtx_NEG (mode, ops[0].op);
4233 rhs = ops[1].op;
4235 else if (ops[0].neg)
4237 lhs = ops[1].op;
4238 rhs = ops[0].op;
4240 else
4242 lhs = ops[0].op;
4243 rhs = ops[1].op;
4246 return simplify_const_binary_operation (code, mode, lhs, rhs);
4249 /* Now simplify each pair of operands until nothing changes. */
4250 while (1)
4252 /* Insertion sort is good enough for a small array. */
4253 for (i = 1; i < n_ops; i++)
4255 struct simplify_plus_minus_op_data save;
4256 int cmp;
4258 j = i - 1;
4259 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4260 if (cmp <= 0)
4261 continue;
4262 /* Just swapping registers doesn't count as canonicalization. */
4263 if (cmp != 1)
4264 canonicalized = 1;
4266 save = ops[i];
4268 ops[j + 1] = ops[j];
4269 while (j--
4270 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4271 ops[j + 1] = save;
4274 changed = 0;
4275 for (i = n_ops - 1; i > 0; i--)
4276 for (j = i - 1; j >= 0; j--)
4278 rtx lhs = ops[j].op, rhs = ops[i].op;
4279 int lneg = ops[j].neg, rneg = ops[i].neg;
4281 if (lhs != 0 && rhs != 0)
4283 enum rtx_code ncode = PLUS;
4285 if (lneg != rneg)
4287 ncode = MINUS;
4288 if (lneg)
4289 std::swap (lhs, rhs);
4291 else if (swap_commutative_operands_p (lhs, rhs))
4292 std::swap (lhs, rhs);
4294 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4295 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4297 rtx tem_lhs, tem_rhs;
4299 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4300 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4301 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4302 tem_rhs);
4304 if (tem && !CONSTANT_P (tem))
4305 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4307 else
4308 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4310 if (tem)
4312 /* Reject "simplifications" that just wrap the two
4313 arguments in a CONST. Failure to do so can result
4314 in infinite recursion with simplify_binary_operation
4315 when it calls us to simplify CONST operations.
4316 Also, if we find such a simplification, don't try
4317 any more combinations with this rhs: We must have
4318 something like symbol+offset, ie. one of the
4319 trivial CONST expressions we handle later. */
4320 if (GET_CODE (tem) == CONST
4321 && GET_CODE (XEXP (tem, 0)) == ncode
4322 && XEXP (XEXP (tem, 0), 0) == lhs
4323 && XEXP (XEXP (tem, 0), 1) == rhs)
4324 break;
4325 lneg &= rneg;
4326 if (GET_CODE (tem) == NEG)
4327 tem = XEXP (tem, 0), lneg = !lneg;
4328 if (CONST_INT_P (tem) && lneg)
4329 tem = neg_const_int (mode, tem), lneg = 0;
4331 ops[i].op = tem;
4332 ops[i].neg = lneg;
4333 ops[j].op = NULL_RTX;
4334 changed = 1;
4335 canonicalized = 1;
4340 if (!changed)
4341 break;
4343 /* Pack all the operands to the lower-numbered entries. */
4344 for (i = 0, j = 0; j < n_ops; j++)
4345 if (ops[j].op)
4347 ops[i] = ops[j];
4348 i++;
4350 n_ops = i;
4353 /* If nothing changed, fail. */
4354 if (!canonicalized)
4355 return NULL_RTX;
4357 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4358 if (n_ops == 2
4359 && CONST_INT_P (ops[1].op)
4360 && CONSTANT_P (ops[0].op)
4361 && ops[0].neg)
4362 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4364 /* We suppressed creation of trivial CONST expressions in the
4365 combination loop to avoid recursion. Create one manually now.
4366 The combination loop should have ensured that there is exactly
4367 one CONST_INT, and the sort will have ensured that it is last
4368 in the array and that any other constant will be next-to-last. */
4370 if (n_ops > 1
4371 && CONST_INT_P (ops[n_ops - 1].op)
4372 && CONSTANT_P (ops[n_ops - 2].op))
4374 rtx value = ops[n_ops - 1].op;
4375 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4376 value = neg_const_int (mode, value);
4377 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4378 INTVAL (value));
4379 n_ops--;
4382 /* Put a non-negated operand first, if possible. */
4384 for (i = 0; i < n_ops && ops[i].neg; i++)
4385 continue;
4386 if (i == n_ops)
4387 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4388 else if (i != 0)
4390 tem = ops[0].op;
4391 ops[0] = ops[i];
4392 ops[i].op = tem;
4393 ops[i].neg = 1;
4396 /* Now make the result by performing the requested operations. */
4397 result = ops[0].op;
4398 for (i = 1; i < n_ops; i++)
4399 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4400 mode, result, ops[i].op);
4402 return result;
4405 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4406 static bool
4407 plus_minus_operand_p (const_rtx x)
4409 return GET_CODE (x) == PLUS
4410 || GET_CODE (x) == MINUS
4411 || (GET_CODE (x) == CONST
4412 && GET_CODE (XEXP (x, 0)) == PLUS
4413 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4414 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4417 /* Like simplify_binary_operation except used for relational operators.
4418 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4419 not also be VOIDmode.
4421 CMP_MODE specifies in which mode the comparison is done in, so it is
4422 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4423 the operands or, if both are VOIDmode, the operands are compared in
4424 "infinite precision". */
4426 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4427 machine_mode cmp_mode, rtx op0, rtx op1)
4429 rtx tem, trueop0, trueop1;
4431 if (cmp_mode == VOIDmode)
4432 cmp_mode = GET_MODE (op0);
4433 if (cmp_mode == VOIDmode)
4434 cmp_mode = GET_MODE (op1);
4436 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4437 if (tem)
4439 if (SCALAR_FLOAT_MODE_P (mode))
4441 if (tem == const0_rtx)
4442 return CONST0_RTX (mode);
4443 #ifdef FLOAT_STORE_FLAG_VALUE
4445 REAL_VALUE_TYPE val;
4446 val = FLOAT_STORE_FLAG_VALUE (mode);
4447 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4449 #else
4450 return NULL_RTX;
4451 #endif
4453 if (VECTOR_MODE_P (mode))
4455 if (tem == const0_rtx)
4456 return CONST0_RTX (mode);
4457 #ifdef VECTOR_STORE_FLAG_VALUE
4459 int i, units;
4460 rtvec v;
4462 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4463 if (val == NULL_RTX)
4464 return NULL_RTX;
4465 if (val == const1_rtx)
4466 return CONST1_RTX (mode);
4468 units = GET_MODE_NUNITS (mode);
4469 v = rtvec_alloc (units);
4470 for (i = 0; i < units; i++)
4471 RTVEC_ELT (v, i) = val;
4472 return gen_rtx_raw_CONST_VECTOR (mode, v);
4474 #else
4475 return NULL_RTX;
4476 #endif
4479 return tem;
4482 /* For the following tests, ensure const0_rtx is op1. */
4483 if (swap_commutative_operands_p (op0, op1)
4484 || (op0 == const0_rtx && op1 != const0_rtx))
4485 std::swap (op0, op1), code = swap_condition (code);
4487 /* If op0 is a compare, extract the comparison arguments from it. */
4488 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4489 return simplify_gen_relational (code, mode, VOIDmode,
4490 XEXP (op0, 0), XEXP (op0, 1));
4492 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4493 || CC0_P (op0))
4494 return NULL_RTX;
4496 trueop0 = avoid_constant_pool_reference (op0);
4497 trueop1 = avoid_constant_pool_reference (op1);
4498 return simplify_relational_operation_1 (code, mode, cmp_mode,
4499 trueop0, trueop1);
4502 /* This part of simplify_relational_operation is only used when CMP_MODE
4503 is not in class MODE_CC (i.e. it is a real comparison).
4505 MODE is the mode of the result, while CMP_MODE specifies in which
4506 mode the comparison is done in, so it is the mode of the operands. */
4508 static rtx
4509 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4510 machine_mode cmp_mode, rtx op0, rtx op1)
4512 enum rtx_code op0code = GET_CODE (op0);
4514 if (op1 == const0_rtx && COMPARISON_P (op0))
4516 /* If op0 is a comparison, extract the comparison arguments
4517 from it. */
4518 if (code == NE)
4520 if (GET_MODE (op0) == mode)
4521 return simplify_rtx (op0);
4522 else
4523 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4524 XEXP (op0, 0), XEXP (op0, 1));
4526 else if (code == EQ)
4528 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4529 if (new_code != UNKNOWN)
4530 return simplify_gen_relational (new_code, mode, VOIDmode,
4531 XEXP (op0, 0), XEXP (op0, 1));
4535 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4536 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4537 if ((code == LTU || code == GEU)
4538 && GET_CODE (op0) == PLUS
4539 && CONST_INT_P (XEXP (op0, 1))
4540 && (rtx_equal_p (op1, XEXP (op0, 0))
4541 || rtx_equal_p (op1, XEXP (op0, 1)))
4542 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4543 && XEXP (op0, 1) != const0_rtx)
4545 rtx new_cmp
4546 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4547 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4548 cmp_mode, XEXP (op0, 0), new_cmp);
4551 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4552 if ((code == LTU || code == GEU)
4553 && GET_CODE (op0) == PLUS
4554 && rtx_equal_p (op1, XEXP (op0, 1))
4555 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4556 && !rtx_equal_p (op1, XEXP (op0, 0)))
4557 return simplify_gen_relational (code, mode, cmp_mode, op0,
4558 copy_rtx (XEXP (op0, 0)));
4560 if (op1 == const0_rtx)
4562 /* Canonicalize (GTU x 0) as (NE x 0). */
4563 if (code == GTU)
4564 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4565 /* Canonicalize (LEU x 0) as (EQ x 0). */
4566 if (code == LEU)
4567 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4569 else if (op1 == const1_rtx)
4571 switch (code)
4573 case GE:
4574 /* Canonicalize (GE x 1) as (GT x 0). */
4575 return simplify_gen_relational (GT, mode, cmp_mode,
4576 op0, const0_rtx);
4577 case GEU:
4578 /* Canonicalize (GEU x 1) as (NE x 0). */
4579 return simplify_gen_relational (NE, mode, cmp_mode,
4580 op0, const0_rtx);
4581 case LT:
4582 /* Canonicalize (LT x 1) as (LE x 0). */
4583 return simplify_gen_relational (LE, mode, cmp_mode,
4584 op0, const0_rtx);
4585 case LTU:
4586 /* Canonicalize (LTU x 1) as (EQ x 0). */
4587 return simplify_gen_relational (EQ, mode, cmp_mode,
4588 op0, const0_rtx);
4589 default:
4590 break;
4593 else if (op1 == constm1_rtx)
4595 /* Canonicalize (LE x -1) as (LT x 0). */
4596 if (code == LE)
4597 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4598 /* Canonicalize (GT x -1) as (GE x 0). */
4599 if (code == GT)
4600 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4603 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4604 if ((code == EQ || code == NE)
4605 && (op0code == PLUS || op0code == MINUS)
4606 && CONSTANT_P (op1)
4607 && CONSTANT_P (XEXP (op0, 1))
4608 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4610 rtx x = XEXP (op0, 0);
4611 rtx c = XEXP (op0, 1);
4612 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4613 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4615 /* Detect an infinite recursive condition, where we oscillate at this
4616 simplification case between:
4617 A + B == C <---> C - B == A,
4618 where A, B, and C are all constants with non-simplifiable expressions,
4619 usually SYMBOL_REFs. */
4620 if (GET_CODE (tem) == invcode
4621 && CONSTANT_P (x)
4622 && rtx_equal_p (c, XEXP (tem, 1)))
4623 return NULL_RTX;
4625 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4628 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4629 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4630 if (code == NE
4631 && op1 == const0_rtx
4632 && GET_MODE_CLASS (mode) == MODE_INT
4633 && cmp_mode != VOIDmode
4634 /* ??? Work-around BImode bugs in the ia64 backend. */
4635 && mode != BImode
4636 && cmp_mode != BImode
4637 && nonzero_bits (op0, cmp_mode) == 1
4638 && STORE_FLAG_VALUE == 1)
4639 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4640 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4641 : lowpart_subreg (mode, op0, cmp_mode);
4643 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4644 if ((code == EQ || code == NE)
4645 && op1 == const0_rtx
4646 && op0code == XOR)
4647 return simplify_gen_relational (code, mode, cmp_mode,
4648 XEXP (op0, 0), XEXP (op0, 1));
4650 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4651 if ((code == EQ || code == NE)
4652 && op0code == XOR
4653 && rtx_equal_p (XEXP (op0, 0), op1)
4654 && !side_effects_p (XEXP (op0, 0)))
4655 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4656 CONST0_RTX (mode));
4658 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4659 if ((code == EQ || code == NE)
4660 && op0code == XOR
4661 && rtx_equal_p (XEXP (op0, 1), op1)
4662 && !side_effects_p (XEXP (op0, 1)))
4663 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4664 CONST0_RTX (mode));
4666 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4667 if ((code == EQ || code == NE)
4668 && op0code == XOR
4669 && CONST_SCALAR_INT_P (op1)
4670 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4671 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4672 simplify_gen_binary (XOR, cmp_mode,
4673 XEXP (op0, 1), op1));
4675 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4676 can be implemented with a BICS instruction on some targets, or
4677 constant-folded if y is a constant. */
4678 if ((code == EQ || code == NE)
4679 && op0code == AND
4680 && rtx_equal_p (XEXP (op0, 0), op1)
4681 && !side_effects_p (op1)
4682 && op1 != CONST0_RTX (cmp_mode))
4684 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4685 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4687 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4688 CONST0_RTX (cmp_mode));
4691 /* Likewise for (eq/ne (and x y) y). */
4692 if ((code == EQ || code == NE)
4693 && op0code == AND
4694 && rtx_equal_p (XEXP (op0, 1), op1)
4695 && !side_effects_p (op1)
4696 && op1 != CONST0_RTX (cmp_mode))
4698 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4699 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4701 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4702 CONST0_RTX (cmp_mode));
4705 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4706 if ((code == EQ || code == NE)
4707 && GET_CODE (op0) == BSWAP
4708 && CONST_SCALAR_INT_P (op1))
4709 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4710 simplify_gen_unary (BSWAP, cmp_mode,
4711 op1, cmp_mode));
4713 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4714 if ((code == EQ || code == NE)
4715 && GET_CODE (op0) == BSWAP
4716 && GET_CODE (op1) == BSWAP)
4717 return simplify_gen_relational (code, mode, cmp_mode,
4718 XEXP (op0, 0), XEXP (op1, 0));
4720 if (op0code == POPCOUNT && op1 == const0_rtx)
4721 switch (code)
4723 case EQ:
4724 case LE:
4725 case LEU:
4726 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4727 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4728 XEXP (op0, 0), const0_rtx);
4730 case NE:
4731 case GT:
4732 case GTU:
4733 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4734 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4735 XEXP (op0, 0), const0_rtx);
4737 default:
4738 break;
4741 return NULL_RTX;
4744 enum
4746 CMP_EQ = 1,
4747 CMP_LT = 2,
4748 CMP_GT = 4,
4749 CMP_LTU = 8,
4750 CMP_GTU = 16
4754 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4755 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4756 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4757 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4758 For floating-point comparisons, assume that the operands were ordered. */
4760 static rtx
4761 comparison_result (enum rtx_code code, int known_results)
4763 switch (code)
4765 case EQ:
4766 case UNEQ:
4767 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4768 case NE:
4769 case LTGT:
4770 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4772 case LT:
4773 case UNLT:
4774 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4775 case GE:
4776 case UNGE:
4777 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4779 case GT:
4780 case UNGT:
4781 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4782 case LE:
4783 case UNLE:
4784 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4786 case LTU:
4787 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4788 case GEU:
4789 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4791 case GTU:
4792 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4793 case LEU:
4794 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4796 case ORDERED:
4797 return const_true_rtx;
4798 case UNORDERED:
4799 return const0_rtx;
4800 default:
4801 gcc_unreachable ();
4805 /* Check if the given comparison (done in the given MODE) is actually
4806 a tautology or a contradiction. If the mode is VOID_mode, the
4807 comparison is done in "infinite precision". If no simplification
4808 is possible, this function returns zero. Otherwise, it returns
4809 either const_true_rtx or const0_rtx. */
4812 simplify_const_relational_operation (enum rtx_code code,
4813 machine_mode mode,
4814 rtx op0, rtx op1)
4816 rtx tem;
4817 rtx trueop0;
4818 rtx trueop1;
4820 gcc_assert (mode != VOIDmode
4821 || (GET_MODE (op0) == VOIDmode
4822 && GET_MODE (op1) == VOIDmode));
4824 /* If op0 is a compare, extract the comparison arguments from it. */
4825 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4827 op1 = XEXP (op0, 1);
4828 op0 = XEXP (op0, 0);
4830 if (GET_MODE (op0) != VOIDmode)
4831 mode = GET_MODE (op0);
4832 else if (GET_MODE (op1) != VOIDmode)
4833 mode = GET_MODE (op1);
4834 else
4835 return 0;
4838 /* We can't simplify MODE_CC values since we don't know what the
4839 actual comparison is. */
4840 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4841 return 0;
4843 /* Make sure the constant is second. */
4844 if (swap_commutative_operands_p (op0, op1))
4846 std::swap (op0, op1);
4847 code = swap_condition (code);
4850 trueop0 = avoid_constant_pool_reference (op0);
4851 trueop1 = avoid_constant_pool_reference (op1);
4853 /* For integer comparisons of A and B maybe we can simplify A - B and can
4854 then simplify a comparison of that with zero. If A and B are both either
4855 a register or a CONST_INT, this can't help; testing for these cases will
4856 prevent infinite recursion here and speed things up.
4858 We can only do this for EQ and NE comparisons as otherwise we may
4859 lose or introduce overflow which we cannot disregard as undefined as
4860 we do not know the signedness of the operation on either the left or
4861 the right hand side of the comparison. */
4863 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4864 && (code == EQ || code == NE)
4865 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4866 && (REG_P (op1) || CONST_INT_P (trueop1)))
4867 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4868 /* We cannot do this if tem is a nonzero address. */
4869 && ! nonzero_address_p (tem))
4870 return simplify_const_relational_operation (signed_condition (code),
4871 mode, tem, const0_rtx);
4873 if (! HONOR_NANS (mode) && code == ORDERED)
4874 return const_true_rtx;
4876 if (! HONOR_NANS (mode) && code == UNORDERED)
4877 return const0_rtx;
4879 /* For modes without NaNs, if the two operands are equal, we know the
4880 result except if they have side-effects. Even with NaNs we know
4881 the result of unordered comparisons and, if signaling NaNs are
4882 irrelevant, also the result of LT/GT/LTGT. */
4883 if ((! HONOR_NANS (trueop0)
4884 || code == UNEQ || code == UNLE || code == UNGE
4885 || ((code == LT || code == GT || code == LTGT)
4886 && ! HONOR_SNANS (trueop0)))
4887 && rtx_equal_p (trueop0, trueop1)
4888 && ! side_effects_p (trueop0))
4889 return comparison_result (code, CMP_EQ);
4891 /* If the operands are floating-point constants, see if we can fold
4892 the result. */
4893 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4894 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4895 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4897 REAL_VALUE_TYPE d0, d1;
4899 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4900 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4902 /* Comparisons are unordered iff at least one of the values is NaN. */
4903 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4904 switch (code)
4906 case UNEQ:
4907 case UNLT:
4908 case UNGT:
4909 case UNLE:
4910 case UNGE:
4911 case NE:
4912 case UNORDERED:
4913 return const_true_rtx;
4914 case EQ:
4915 case LT:
4916 case GT:
4917 case LE:
4918 case GE:
4919 case LTGT:
4920 case ORDERED:
4921 return const0_rtx;
4922 default:
4923 return 0;
4926 return comparison_result (code,
4927 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4928 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4931 /* Otherwise, see if the operands are both integers. */
4932 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4933 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4935 /* It would be nice if we really had a mode here. However, the
4936 largest int representable on the target is as good as
4937 infinite. */
4938 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4939 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4940 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4942 if (wi::eq_p (ptrueop0, ptrueop1))
4943 return comparison_result (code, CMP_EQ);
4944 else
4946 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4947 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4948 return comparison_result (code, cr);
4952 /* Optimize comparisons with upper and lower bounds. */
4953 if (HWI_COMPUTABLE_MODE_P (mode)
4954 && CONST_INT_P (trueop1)
4955 && !side_effects_p (trueop0))
4957 int sign;
4958 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4959 HOST_WIDE_INT val = INTVAL (trueop1);
4960 HOST_WIDE_INT mmin, mmax;
4962 if (code == GEU
4963 || code == LEU
4964 || code == GTU
4965 || code == LTU)
4966 sign = 0;
4967 else
4968 sign = 1;
4970 /* Get a reduced range if the sign bit is zero. */
4971 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4973 mmin = 0;
4974 mmax = nonzero;
4976 else
4978 rtx mmin_rtx, mmax_rtx;
4979 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4981 mmin = INTVAL (mmin_rtx);
4982 mmax = INTVAL (mmax_rtx);
4983 if (sign)
4985 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4987 mmin >>= (sign_copies - 1);
4988 mmax >>= (sign_copies - 1);
4992 switch (code)
4994 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4995 case GEU:
4996 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4997 return const_true_rtx;
4998 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4999 return const0_rtx;
5000 break;
5001 case GE:
5002 if (val <= mmin)
5003 return const_true_rtx;
5004 if (val > mmax)
5005 return const0_rtx;
5006 break;
5008 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5009 case LEU:
5010 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5011 return const_true_rtx;
5012 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5013 return const0_rtx;
5014 break;
5015 case LE:
5016 if (val >= mmax)
5017 return const_true_rtx;
5018 if (val < mmin)
5019 return const0_rtx;
5020 break;
5022 case EQ:
5023 /* x == y is always false for y out of range. */
5024 if (val < mmin || val > mmax)
5025 return const0_rtx;
5026 break;
5028 /* x > y is always false for y >= mmax, always true for y < mmin. */
5029 case GTU:
5030 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5031 return const0_rtx;
5032 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5033 return const_true_rtx;
5034 break;
5035 case GT:
5036 if (val >= mmax)
5037 return const0_rtx;
5038 if (val < mmin)
5039 return const_true_rtx;
5040 break;
5042 /* x < y is always false for y <= mmin, always true for y > mmax. */
5043 case LTU:
5044 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5045 return const0_rtx;
5046 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5047 return const_true_rtx;
5048 break;
5049 case LT:
5050 if (val <= mmin)
5051 return const0_rtx;
5052 if (val > mmax)
5053 return const_true_rtx;
5054 break;
5056 case NE:
5057 /* x != y is always true for y out of range. */
5058 if (val < mmin || val > mmax)
5059 return const_true_rtx;
5060 break;
5062 default:
5063 break;
5067 /* Optimize integer comparisons with zero. */
5068 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5070 /* Some addresses are known to be nonzero. We don't know
5071 their sign, but equality comparisons are known. */
5072 if (nonzero_address_p (trueop0))
5074 if (code == EQ || code == LEU)
5075 return const0_rtx;
5076 if (code == NE || code == GTU)
5077 return const_true_rtx;
5080 /* See if the first operand is an IOR with a constant. If so, we
5081 may be able to determine the result of this comparison. */
5082 if (GET_CODE (op0) == IOR)
5084 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5085 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5087 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5088 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5089 && (UINTVAL (inner_const)
5090 & ((unsigned HOST_WIDE_INT) 1
5091 << sign_bitnum)));
5093 switch (code)
5095 case EQ:
5096 case LEU:
5097 return const0_rtx;
5098 case NE:
5099 case GTU:
5100 return const_true_rtx;
5101 case LT:
5102 case LE:
5103 if (has_sign)
5104 return const_true_rtx;
5105 break;
5106 case GT:
5107 case GE:
5108 if (has_sign)
5109 return const0_rtx;
5110 break;
5111 default:
5112 break;
5118 /* Optimize comparison of ABS with zero. */
5119 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5120 && (GET_CODE (trueop0) == ABS
5121 || (GET_CODE (trueop0) == FLOAT_EXTEND
5122 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5124 switch (code)
5126 case LT:
5127 /* Optimize abs(x) < 0.0. */
5128 if (!HONOR_SNANS (mode)
5129 && (!INTEGRAL_MODE_P (mode)
5130 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5132 if (INTEGRAL_MODE_P (mode)
5133 && (issue_strict_overflow_warning
5134 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5135 warning (OPT_Wstrict_overflow,
5136 ("assuming signed overflow does not occur when "
5137 "assuming abs (x) < 0 is false"));
5138 return const0_rtx;
5140 break;
5142 case GE:
5143 /* Optimize abs(x) >= 0.0. */
5144 if (!HONOR_NANS (mode)
5145 && (!INTEGRAL_MODE_P (mode)
5146 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5148 if (INTEGRAL_MODE_P (mode)
5149 && (issue_strict_overflow_warning
5150 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5151 warning (OPT_Wstrict_overflow,
5152 ("assuming signed overflow does not occur when "
5153 "assuming abs (x) >= 0 is true"));
5154 return const_true_rtx;
5156 break;
5158 case UNGE:
5159 /* Optimize ! (abs(x) < 0.0). */
5160 return const_true_rtx;
5162 default:
5163 break;
5167 return 0;
5170 /* Simplify CODE, an operation with result mode MODE and three operands,
5171 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5172 a constant. Return 0 if no simplifications is possible. */
5175 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5176 machine_mode op0_mode, rtx op0, rtx op1,
5177 rtx op2)
5179 unsigned int width = GET_MODE_PRECISION (mode);
5180 bool any_change = false;
5181 rtx tem, trueop2;
5183 /* VOIDmode means "infinite" precision. */
5184 if (width == 0)
5185 width = HOST_BITS_PER_WIDE_INT;
5187 switch (code)
5189 case FMA:
5190 /* Simplify negations around the multiplication. */
5191 /* -a * -b + c => a * b + c. */
5192 if (GET_CODE (op0) == NEG)
5194 tem = simplify_unary_operation (NEG, mode, op1, mode);
5195 if (tem)
5196 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5198 else if (GET_CODE (op1) == NEG)
5200 tem = simplify_unary_operation (NEG, mode, op0, mode);
5201 if (tem)
5202 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5205 /* Canonicalize the two multiplication operands. */
5206 /* a * -b + c => -b * a + c. */
5207 if (swap_commutative_operands_p (op0, op1))
5208 std::swap (op0, op1), any_change = true;
5210 if (any_change)
5211 return gen_rtx_FMA (mode, op0, op1, op2);
5212 return NULL_RTX;
5214 case SIGN_EXTRACT:
5215 case ZERO_EXTRACT:
5216 if (CONST_INT_P (op0)
5217 && CONST_INT_P (op1)
5218 && CONST_INT_P (op2)
5219 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5220 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5222 /* Extracting a bit-field from a constant */
5223 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5224 HOST_WIDE_INT op1val = INTVAL (op1);
5225 HOST_WIDE_INT op2val = INTVAL (op2);
5226 if (BITS_BIG_ENDIAN)
5227 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5228 else
5229 val >>= op2val;
5231 if (HOST_BITS_PER_WIDE_INT != op1val)
5233 /* First zero-extend. */
5234 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5235 /* If desired, propagate sign bit. */
5236 if (code == SIGN_EXTRACT
5237 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5238 != 0)
5239 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5242 return gen_int_mode (val, mode);
5244 break;
5246 case IF_THEN_ELSE:
5247 if (CONST_INT_P (op0))
5248 return op0 != const0_rtx ? op1 : op2;
5250 /* Convert c ? a : a into "a". */
5251 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5252 return op1;
5254 /* Convert a != b ? a : b into "a". */
5255 if (GET_CODE (op0) == NE
5256 && ! side_effects_p (op0)
5257 && ! HONOR_NANS (mode)
5258 && ! HONOR_SIGNED_ZEROS (mode)
5259 && ((rtx_equal_p (XEXP (op0, 0), op1)
5260 && rtx_equal_p (XEXP (op0, 1), op2))
5261 || (rtx_equal_p (XEXP (op0, 0), op2)
5262 && rtx_equal_p (XEXP (op0, 1), op1))))
5263 return op1;
5265 /* Convert a == b ? a : b into "b". */
5266 if (GET_CODE (op0) == EQ
5267 && ! side_effects_p (op0)
5268 && ! HONOR_NANS (mode)
5269 && ! HONOR_SIGNED_ZEROS (mode)
5270 && ((rtx_equal_p (XEXP (op0, 0), op1)
5271 && rtx_equal_p (XEXP (op0, 1), op2))
5272 || (rtx_equal_p (XEXP (op0, 0), op2)
5273 && rtx_equal_p (XEXP (op0, 1), op1))))
5274 return op2;
5276 /* Convert (!c) != {0,...,0} ? a : b into
5277 c != {0,...,0} ? b : a for vector modes. */
5278 if (VECTOR_MODE_P (GET_MODE (op1))
5279 && GET_CODE (op0) == NE
5280 && GET_CODE (XEXP (op0, 0)) == NOT
5281 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5283 rtx cv = XEXP (op0, 1);
5284 int nunits = CONST_VECTOR_NUNITS (cv);
5285 bool ok = true;
5286 for (int i = 0; i < nunits; ++i)
5287 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5289 ok = false;
5290 break;
5292 if (ok)
5294 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5295 XEXP (XEXP (op0, 0), 0),
5296 XEXP (op0, 1));
5297 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5298 return retval;
5302 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5304 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5305 ? GET_MODE (XEXP (op0, 1))
5306 : GET_MODE (XEXP (op0, 0)));
5307 rtx temp;
5309 /* Look for happy constants in op1 and op2. */
5310 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5312 HOST_WIDE_INT t = INTVAL (op1);
5313 HOST_WIDE_INT f = INTVAL (op2);
5315 if (t == STORE_FLAG_VALUE && f == 0)
5316 code = GET_CODE (op0);
5317 else if (t == 0 && f == STORE_FLAG_VALUE)
5319 enum rtx_code tmp;
5320 tmp = reversed_comparison_code (op0, NULL_RTX);
5321 if (tmp == UNKNOWN)
5322 break;
5323 code = tmp;
5325 else
5326 break;
5328 return simplify_gen_relational (code, mode, cmp_mode,
5329 XEXP (op0, 0), XEXP (op0, 1));
5332 if (cmp_mode == VOIDmode)
5333 cmp_mode = op0_mode;
5334 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5335 cmp_mode, XEXP (op0, 0),
5336 XEXP (op0, 1));
5338 /* See if any simplifications were possible. */
5339 if (temp)
5341 if (CONST_INT_P (temp))
5342 return temp == const0_rtx ? op2 : op1;
5343 else if (temp)
5344 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5347 break;
5349 case VEC_MERGE:
5350 gcc_assert (GET_MODE (op0) == mode);
5351 gcc_assert (GET_MODE (op1) == mode);
5352 gcc_assert (VECTOR_MODE_P (mode));
5353 trueop2 = avoid_constant_pool_reference (op2);
5354 if (CONST_INT_P (trueop2))
5356 int elt_size = GET_MODE_UNIT_SIZE (mode);
5357 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5358 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5359 unsigned HOST_WIDE_INT mask;
5360 if (n_elts == HOST_BITS_PER_WIDE_INT)
5361 mask = -1;
5362 else
5363 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5365 if (!(sel & mask) && !side_effects_p (op0))
5366 return op1;
5367 if ((sel & mask) == mask && !side_effects_p (op1))
5368 return op0;
5370 rtx trueop0 = avoid_constant_pool_reference (op0);
5371 rtx trueop1 = avoid_constant_pool_reference (op1);
5372 if (GET_CODE (trueop0) == CONST_VECTOR
5373 && GET_CODE (trueop1) == CONST_VECTOR)
5375 rtvec v = rtvec_alloc (n_elts);
5376 unsigned int i;
5378 for (i = 0; i < n_elts; i++)
5379 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5380 ? CONST_VECTOR_ELT (trueop0, i)
5381 : CONST_VECTOR_ELT (trueop1, i));
5382 return gen_rtx_CONST_VECTOR (mode, v);
5385 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5386 if no element from a appears in the result. */
5387 if (GET_CODE (op0) == VEC_MERGE)
5389 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5390 if (CONST_INT_P (tem))
5392 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5393 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5394 return simplify_gen_ternary (code, mode, mode,
5395 XEXP (op0, 1), op1, op2);
5396 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5397 return simplify_gen_ternary (code, mode, mode,
5398 XEXP (op0, 0), op1, op2);
5401 if (GET_CODE (op1) == VEC_MERGE)
5403 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5404 if (CONST_INT_P (tem))
5406 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5407 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5408 return simplify_gen_ternary (code, mode, mode,
5409 op0, XEXP (op1, 1), op2);
5410 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5411 return simplify_gen_ternary (code, mode, mode,
5412 op0, XEXP (op1, 0), op2);
5416 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5417 with a. */
5418 if (GET_CODE (op0) == VEC_DUPLICATE
5419 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5420 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5421 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5423 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5424 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5426 if (XEXP (XEXP (op0, 0), 0) == op1
5427 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5428 return op1;
5433 if (rtx_equal_p (op0, op1)
5434 && !side_effects_p (op2) && !side_effects_p (op1))
5435 return op0;
5437 break;
5439 default:
5440 gcc_unreachable ();
5443 return 0;
5446 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5447 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5448 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5450 Works by unpacking OP into a collection of 8-bit values
5451 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5452 and then repacking them again for OUTERMODE. */
5454 static rtx
5455 simplify_immed_subreg (machine_mode outermode, rtx op,
5456 machine_mode innermode, unsigned int byte)
5458 enum {
5459 value_bit = 8,
5460 value_mask = (1 << value_bit) - 1
5462 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5463 int value_start;
5464 int i;
5465 int elem;
5467 int num_elem;
5468 rtx * elems;
5469 int elem_bitsize;
5470 rtx result_s;
5471 rtvec result_v = NULL;
5472 enum mode_class outer_class;
5473 machine_mode outer_submode;
5474 int max_bitsize;
5476 /* Some ports misuse CCmode. */
5477 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5478 return op;
5480 /* We have no way to represent a complex constant at the rtl level. */
5481 if (COMPLEX_MODE_P (outermode))
5482 return NULL_RTX;
5484 /* We support any size mode. */
5485 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5486 GET_MODE_BITSIZE (innermode));
5488 /* Unpack the value. */
5490 if (GET_CODE (op) == CONST_VECTOR)
5492 num_elem = CONST_VECTOR_NUNITS (op);
5493 elems = &CONST_VECTOR_ELT (op, 0);
5494 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5496 else
5498 num_elem = 1;
5499 elems = &op;
5500 elem_bitsize = max_bitsize;
5502 /* If this asserts, it is too complicated; reducing value_bit may help. */
5503 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5504 /* I don't know how to handle endianness of sub-units. */
5505 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5507 for (elem = 0; elem < num_elem; elem++)
5509 unsigned char * vp;
5510 rtx el = elems[elem];
5512 /* Vectors are kept in target memory order. (This is probably
5513 a mistake.) */
5515 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5516 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5517 / BITS_PER_UNIT);
5518 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5519 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5520 unsigned bytele = (subword_byte % UNITS_PER_WORD
5521 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5522 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5525 switch (GET_CODE (el))
5527 case CONST_INT:
5528 for (i = 0;
5529 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5530 i += value_bit)
5531 *vp++ = INTVAL (el) >> i;
5532 /* CONST_INTs are always logically sign-extended. */
5533 for (; i < elem_bitsize; i += value_bit)
5534 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5535 break;
5537 case CONST_WIDE_INT:
5539 rtx_mode_t val = std::make_pair (el, innermode);
5540 unsigned char extend = wi::sign_mask (val);
5542 for (i = 0; i < elem_bitsize; i += value_bit)
5543 *vp++ = wi::extract_uhwi (val, i, value_bit);
5544 for (; i < elem_bitsize; i += value_bit)
5545 *vp++ = extend;
5547 break;
5549 case CONST_DOUBLE:
5550 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5552 unsigned char extend = 0;
5553 /* If this triggers, someone should have generated a
5554 CONST_INT instead. */
5555 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5557 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5558 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5559 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5561 *vp++
5562 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5563 i += value_bit;
5566 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5567 extend = -1;
5568 for (; i < elem_bitsize; i += value_bit)
5569 *vp++ = extend;
5571 else
5573 /* This is big enough for anything on the platform. */
5574 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5575 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5577 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5578 gcc_assert (bitsize <= elem_bitsize);
5579 gcc_assert (bitsize % value_bit == 0);
5581 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5582 GET_MODE (el));
5584 /* real_to_target produces its result in words affected by
5585 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5586 and use WORDS_BIG_ENDIAN instead; see the documentation
5587 of SUBREG in rtl.texi. */
5588 for (i = 0; i < bitsize; i += value_bit)
5590 int ibase;
5591 if (WORDS_BIG_ENDIAN)
5592 ibase = bitsize - 1 - i;
5593 else
5594 ibase = i;
5595 *vp++ = tmp[ibase / 32] >> i % 32;
5598 /* It shouldn't matter what's done here, so fill it with
5599 zero. */
5600 for (; i < elem_bitsize; i += value_bit)
5601 *vp++ = 0;
5603 break;
5605 case CONST_FIXED:
5606 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5608 for (i = 0; i < elem_bitsize; i += value_bit)
5609 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5611 else
5613 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5614 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5615 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5616 i += value_bit)
5617 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5618 >> (i - HOST_BITS_PER_WIDE_INT);
5619 for (; i < elem_bitsize; i += value_bit)
5620 *vp++ = 0;
5622 break;
5624 default:
5625 gcc_unreachable ();
5629 /* Now, pick the right byte to start with. */
5630 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5631 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5632 will already have offset 0. */
5633 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5635 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5636 - byte);
5637 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5638 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5639 byte = (subword_byte % UNITS_PER_WORD
5640 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5643 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5644 so if it's become negative it will instead be very large.) */
5645 gcc_assert (byte < GET_MODE_SIZE (innermode));
5647 /* Convert from bytes to chunks of size value_bit. */
5648 value_start = byte * (BITS_PER_UNIT / value_bit);
5650 /* Re-pack the value. */
5651 num_elem = GET_MODE_NUNITS (outermode);
5653 if (VECTOR_MODE_P (outermode))
5655 result_v = rtvec_alloc (num_elem);
5656 elems = &RTVEC_ELT (result_v, 0);
5658 else
5659 elems = &result_s;
5661 outer_submode = GET_MODE_INNER (outermode);
5662 outer_class = GET_MODE_CLASS (outer_submode);
5663 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5665 gcc_assert (elem_bitsize % value_bit == 0);
5666 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5668 for (elem = 0; elem < num_elem; elem++)
5670 unsigned char *vp;
5672 /* Vectors are stored in target memory order. (This is probably
5673 a mistake.) */
5675 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5676 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5677 / BITS_PER_UNIT);
5678 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5679 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5680 unsigned bytele = (subword_byte % UNITS_PER_WORD
5681 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5682 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5685 switch (outer_class)
5687 case MODE_INT:
5688 case MODE_PARTIAL_INT:
5690 int u;
5691 int base = 0;
5692 int units
5693 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5694 / HOST_BITS_PER_WIDE_INT;
5695 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5696 wide_int r;
5698 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5699 return NULL_RTX;
5700 for (u = 0; u < units; u++)
5702 unsigned HOST_WIDE_INT buf = 0;
5703 for (i = 0;
5704 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5705 i += value_bit)
5706 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5708 tmp[u] = buf;
5709 base += HOST_BITS_PER_WIDE_INT;
5711 r = wide_int::from_array (tmp, units,
5712 GET_MODE_PRECISION (outer_submode));
5713 #if TARGET_SUPPORTS_WIDE_INT == 0
5714 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5715 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5716 return NULL_RTX;
5717 #endif
5718 elems[elem] = immed_wide_int_const (r, outer_submode);
5720 break;
5722 case MODE_FLOAT:
5723 case MODE_DECIMAL_FLOAT:
5725 REAL_VALUE_TYPE r;
5726 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5728 /* real_from_target wants its input in words affected by
5729 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5730 and use WORDS_BIG_ENDIAN instead; see the documentation
5731 of SUBREG in rtl.texi. */
5732 for (i = 0; i < max_bitsize / 32; i++)
5733 tmp[i] = 0;
5734 for (i = 0; i < elem_bitsize; i += value_bit)
5736 int ibase;
5737 if (WORDS_BIG_ENDIAN)
5738 ibase = elem_bitsize - 1 - i;
5739 else
5740 ibase = i;
5741 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5744 real_from_target (&r, tmp, outer_submode);
5745 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5747 break;
5749 case MODE_FRACT:
5750 case MODE_UFRACT:
5751 case MODE_ACCUM:
5752 case MODE_UACCUM:
5754 FIXED_VALUE_TYPE f;
5755 f.data.low = 0;
5756 f.data.high = 0;
5757 f.mode = outer_submode;
5759 for (i = 0;
5760 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5761 i += value_bit)
5762 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5763 for (; i < elem_bitsize; i += value_bit)
5764 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5765 << (i - HOST_BITS_PER_WIDE_INT));
5767 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5769 break;
5771 default:
5772 gcc_unreachable ();
5775 if (VECTOR_MODE_P (outermode))
5776 return gen_rtx_CONST_VECTOR (outermode, result_v);
5777 else
5778 return result_s;
5781 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5782 Return 0 if no simplifications are possible. */
5784 simplify_subreg (machine_mode outermode, rtx op,
5785 machine_mode innermode, unsigned int byte)
5787 /* Little bit of sanity checking. */
5788 gcc_assert (innermode != VOIDmode);
5789 gcc_assert (outermode != VOIDmode);
5790 gcc_assert (innermode != BLKmode);
5791 gcc_assert (outermode != BLKmode);
5793 gcc_assert (GET_MODE (op) == innermode
5794 || GET_MODE (op) == VOIDmode);
5796 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5797 return NULL_RTX;
5799 if (byte >= GET_MODE_SIZE (innermode))
5800 return NULL_RTX;
5802 if (outermode == innermode && !byte)
5803 return op;
5805 if (CONST_SCALAR_INT_P (op)
5806 || CONST_DOUBLE_AS_FLOAT_P (op)
5807 || GET_CODE (op) == CONST_FIXED
5808 || GET_CODE (op) == CONST_VECTOR)
5809 return simplify_immed_subreg (outermode, op, innermode, byte);
5811 /* Changing mode twice with SUBREG => just change it once,
5812 or not at all if changing back op starting mode. */
5813 if (GET_CODE (op) == SUBREG)
5815 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5816 int final_offset = byte + SUBREG_BYTE (op);
5817 rtx newx;
5819 if (outermode == innermostmode
5820 && byte == 0 && SUBREG_BYTE (op) == 0)
5821 return SUBREG_REG (op);
5823 /* The SUBREG_BYTE represents offset, as if the value were stored
5824 in memory. Irritating exception is paradoxical subreg, where
5825 we define SUBREG_BYTE to be 0. On big endian machines, this
5826 value should be negative. For a moment, undo this exception. */
5827 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5829 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5830 if (WORDS_BIG_ENDIAN)
5831 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5832 if (BYTES_BIG_ENDIAN)
5833 final_offset += difference % UNITS_PER_WORD;
5835 if (SUBREG_BYTE (op) == 0
5836 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5838 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5839 if (WORDS_BIG_ENDIAN)
5840 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5841 if (BYTES_BIG_ENDIAN)
5842 final_offset += difference % UNITS_PER_WORD;
5845 /* See whether resulting subreg will be paradoxical. */
5846 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5848 /* In nonparadoxical subregs we can't handle negative offsets. */
5849 if (final_offset < 0)
5850 return NULL_RTX;
5851 /* Bail out in case resulting subreg would be incorrect. */
5852 if (final_offset % GET_MODE_SIZE (outermode)
5853 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5854 return NULL_RTX;
5856 else
5858 int offset = 0;
5859 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5861 /* In paradoxical subreg, see if we are still looking on lower part.
5862 If so, our SUBREG_BYTE will be 0. */
5863 if (WORDS_BIG_ENDIAN)
5864 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5865 if (BYTES_BIG_ENDIAN)
5866 offset += difference % UNITS_PER_WORD;
5867 if (offset == final_offset)
5868 final_offset = 0;
5869 else
5870 return NULL_RTX;
5873 /* Recurse for further possible simplifications. */
5874 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5875 final_offset);
5876 if (newx)
5877 return newx;
5878 if (validate_subreg (outermode, innermostmode,
5879 SUBREG_REG (op), final_offset))
5881 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5882 if (SUBREG_PROMOTED_VAR_P (op)
5883 && SUBREG_PROMOTED_SIGN (op) >= 0
5884 && GET_MODE_CLASS (outermode) == MODE_INT
5885 && IN_RANGE (GET_MODE_SIZE (outermode),
5886 GET_MODE_SIZE (innermode),
5887 GET_MODE_SIZE (innermostmode))
5888 && subreg_lowpart_p (newx))
5890 SUBREG_PROMOTED_VAR_P (newx) = 1;
5891 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5893 return newx;
5895 return NULL_RTX;
5898 /* SUBREG of a hard register => just change the register number
5899 and/or mode. If the hard register is not valid in that mode,
5900 suppress this simplification. If the hard register is the stack,
5901 frame, or argument pointer, leave this as a SUBREG. */
5903 if (REG_P (op) && HARD_REGISTER_P (op))
5905 unsigned int regno, final_regno;
5907 regno = REGNO (op);
5908 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5909 if (HARD_REGISTER_NUM_P (final_regno))
5911 rtx x;
5912 int final_offset = byte;
5914 /* Adjust offset for paradoxical subregs. */
5915 if (byte == 0
5916 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5918 int difference = (GET_MODE_SIZE (innermode)
5919 - GET_MODE_SIZE (outermode));
5920 if (WORDS_BIG_ENDIAN)
5921 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5922 if (BYTES_BIG_ENDIAN)
5923 final_offset += difference % UNITS_PER_WORD;
5926 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5928 /* Propagate original regno. We don't have any way to specify
5929 the offset inside original regno, so do so only for lowpart.
5930 The information is used only by alias analysis that can not
5931 grog partial register anyway. */
5933 if (subreg_lowpart_offset (outermode, innermode) == byte)
5934 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5935 return x;
5939 /* If we have a SUBREG of a register that we are replacing and we are
5940 replacing it with a MEM, make a new MEM and try replacing the
5941 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5942 or if we would be widening it. */
5944 if (MEM_P (op)
5945 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5946 /* Allow splitting of volatile memory references in case we don't
5947 have instruction to move the whole thing. */
5948 && (! MEM_VOLATILE_P (op)
5949 || ! have_insn_for (SET, innermode))
5950 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5951 return adjust_address_nv (op, outermode, byte);
5953 /* Handle complex values represented as CONCAT
5954 of real and imaginary part. */
5955 if (GET_CODE (op) == CONCAT)
5957 unsigned int part_size, final_offset;
5958 rtx part, res;
5960 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5961 if (byte < part_size)
5963 part = XEXP (op, 0);
5964 final_offset = byte;
5966 else
5968 part = XEXP (op, 1);
5969 final_offset = byte - part_size;
5972 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5973 return NULL_RTX;
5975 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5976 if (res)
5977 return res;
5978 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5979 return gen_rtx_SUBREG (outermode, part, final_offset);
5980 return NULL_RTX;
5983 /* A SUBREG resulting from a zero extension may fold to zero if
5984 it extracts higher bits that the ZERO_EXTEND's source bits. */
5985 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5987 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5988 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5989 return CONST0_RTX (outermode);
5992 if (SCALAR_INT_MODE_P (outermode)
5993 && SCALAR_INT_MODE_P (innermode)
5994 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5995 && byte == subreg_lowpart_offset (outermode, innermode))
5997 rtx tem = simplify_truncation (outermode, op, innermode);
5998 if (tem)
5999 return tem;
6002 return NULL_RTX;
6005 /* Make a SUBREG operation or equivalent if it folds. */
6008 simplify_gen_subreg (machine_mode outermode, rtx op,
6009 machine_mode innermode, unsigned int byte)
6011 rtx newx;
6013 newx = simplify_subreg (outermode, op, innermode, byte);
6014 if (newx)
6015 return newx;
6017 if (GET_CODE (op) == SUBREG
6018 || GET_CODE (op) == CONCAT
6019 || GET_MODE (op) == VOIDmode)
6020 return NULL_RTX;
6022 if (validate_subreg (outermode, innermode, op, byte))
6023 return gen_rtx_SUBREG (outermode, op, byte);
6025 return NULL_RTX;
6028 /* Generates a subreg to get the least significant part of EXPR (in mode
6029 INNER_MODE) to OUTER_MODE. */
6032 lowpart_subreg (machine_mode outer_mode, rtx expr,
6033 machine_mode inner_mode)
6035 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6036 subreg_lowpart_offset (outer_mode, inner_mode));
6039 /* Simplify X, an rtx expression.
6041 Return the simplified expression or NULL if no simplifications
6042 were possible.
6044 This is the preferred entry point into the simplification routines;
6045 however, we still allow passes to call the more specific routines.
6047 Right now GCC has three (yes, three) major bodies of RTL simplification
6048 code that need to be unified.
6050 1. fold_rtx in cse.c. This code uses various CSE specific
6051 information to aid in RTL simplification.
6053 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6054 it uses combine specific information to aid in RTL
6055 simplification.
6057 3. The routines in this file.
6060 Long term we want to only have one body of simplification code; to
6061 get to that state I recommend the following steps:
6063 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6064 which are not pass dependent state into these routines.
6066 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6067 use this routine whenever possible.
6069 3. Allow for pass dependent state to be provided to these
6070 routines and add simplifications based on the pass dependent
6071 state. Remove code from cse.c & combine.c that becomes
6072 redundant/dead.
6074 It will take time, but ultimately the compiler will be easier to
6075 maintain and improve. It's totally silly that when we add a
6076 simplification that it needs to be added to 4 places (3 for RTL
6077 simplification and 1 for tree simplification. */
6080 simplify_rtx (const_rtx x)
6082 const enum rtx_code code = GET_CODE (x);
6083 const machine_mode mode = GET_MODE (x);
6085 switch (GET_RTX_CLASS (code))
6087 case RTX_UNARY:
6088 return simplify_unary_operation (code, mode,
6089 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6090 case RTX_COMM_ARITH:
6091 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6092 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6094 /* Fall through.... */
6096 case RTX_BIN_ARITH:
6097 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6099 case RTX_TERNARY:
6100 case RTX_BITFIELD_OPS:
6101 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6102 XEXP (x, 0), XEXP (x, 1),
6103 XEXP (x, 2));
6105 case RTX_COMPARE:
6106 case RTX_COMM_COMPARE:
6107 return simplify_relational_operation (code, mode,
6108 ((GET_MODE (XEXP (x, 0))
6109 != VOIDmode)
6110 ? GET_MODE (XEXP (x, 0))
6111 : GET_MODE (XEXP (x, 1))),
6112 XEXP (x, 0),
6113 XEXP (x, 1));
6115 case RTX_EXTRA:
6116 if (code == SUBREG)
6117 return simplify_subreg (mode, SUBREG_REG (x),
6118 GET_MODE (SUBREG_REG (x)),
6119 SUBREG_BYTE (x));
6120 break;
6122 case RTX_OBJ:
6123 if (code == LO_SUM)
6125 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6126 if (GET_CODE (XEXP (x, 0)) == HIGH
6127 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6128 return XEXP (x, 1);
6130 break;
6132 default:
6133 break;
6135 return NULL;