PR testsuite/66621
[official-gcc.git] / gcc / simplify-rtx.c
blob521fecf566e7e0ee04c0c343afa255a819ec5d70
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "alias.h"
27 #include "symtab.h"
28 #include "tree.h"
29 #include "fold-const.h"
30 #include "varasm.h"
31 #include "tm_p.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "flags.h"
35 #include "insn-config.h"
36 #include "recog.h"
37 #include "function.h"
38 #include "insn-codes.h"
39 #include "optabs.h"
40 #include "expmed.h"
41 #include "dojump.h"
42 #include "explow.h"
43 #include "calls.h"
44 #include "emit-rtl.h"
45 #include "stmt.h"
46 #include "expr.h"
47 #include "diagnostic-core.h"
48 #include "target.h"
49 #include "predict.h"
51 /* Simplification and canonicalization of RTL. */
53 /* Much code operates on (low, high) pairs; the low value is an
54 unsigned wide int, the high value a signed wide int. We
55 occasionally need to sign extend from low to high as if low were a
56 signed wide int. */
57 #define HWI_SIGN_EXTEND(low) \
58 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
60 static rtx neg_const_int (machine_mode, const_rtx);
61 static bool plus_minus_operand_p (const_rtx);
62 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
63 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
64 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
65 unsigned int);
66 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
67 rtx, rtx);
68 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
69 machine_mode, rtx, rtx);
70 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
71 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
72 rtx, rtx, rtx, rtx);
74 /* Negate a CONST_INT rtx, truncating (because a conversion from a
75 maximally negative number can overflow). */
76 static rtx
77 neg_const_int (machine_mode mode, const_rtx i)
79 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
82 /* Test whether expression, X, is an immediate constant that represents
83 the most significant bit of machine mode MODE. */
85 bool
86 mode_signbit_p (machine_mode mode, const_rtx x)
88 unsigned HOST_WIDE_INT val;
89 unsigned int width;
91 if (GET_MODE_CLASS (mode) != MODE_INT)
92 return false;
94 width = GET_MODE_PRECISION (mode);
95 if (width == 0)
96 return false;
98 if (width <= HOST_BITS_PER_WIDE_INT
99 && CONST_INT_P (x))
100 val = INTVAL (x);
101 #if TARGET_SUPPORTS_WIDE_INT
102 else if (CONST_WIDE_INT_P (x))
104 unsigned int i;
105 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
106 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
107 return false;
108 for (i = 0; i < elts - 1; i++)
109 if (CONST_WIDE_INT_ELT (x, i) != 0)
110 return false;
111 val = CONST_WIDE_INT_ELT (x, elts - 1);
112 width %= HOST_BITS_PER_WIDE_INT;
113 if (width == 0)
114 width = HOST_BITS_PER_WIDE_INT;
116 #else
117 else if (width <= HOST_BITS_PER_DOUBLE_INT
118 && CONST_DOUBLE_AS_INT_P (x)
119 && CONST_DOUBLE_LOW (x) == 0)
121 val = CONST_DOUBLE_HIGH (x);
122 width -= HOST_BITS_PER_WIDE_INT;
124 #endif
125 else
126 /* X is not an integer constant. */
127 return false;
129 if (width < HOST_BITS_PER_WIDE_INT)
130 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
131 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
134 /* Test whether VAL is equal to the most significant bit of mode MODE
135 (after masking with the mode mask of MODE). Returns false if the
136 precision of MODE is too large to handle. */
138 bool
139 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
141 unsigned int width;
143 if (GET_MODE_CLASS (mode) != MODE_INT)
144 return false;
146 width = GET_MODE_PRECISION (mode);
147 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
148 return false;
150 val &= GET_MODE_MASK (mode);
151 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
154 /* Test whether the most significant bit of mode MODE is set in VAL.
155 Returns false if the precision of MODE is too large to handle. */
156 bool
157 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
159 unsigned int width;
161 if (GET_MODE_CLASS (mode) != MODE_INT)
162 return false;
164 width = GET_MODE_PRECISION (mode);
165 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
166 return false;
168 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
169 return val != 0;
172 /* Test whether the most significant bit of mode MODE is clear in VAL.
173 Returns false if the precision of MODE is too large to handle. */
174 bool
175 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
177 unsigned int width;
179 if (GET_MODE_CLASS (mode) != MODE_INT)
180 return false;
182 width = GET_MODE_PRECISION (mode);
183 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
184 return false;
186 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
187 return val == 0;
190 /* Make a binary operation by properly ordering the operands and
191 seeing if the expression folds. */
194 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
195 rtx op1)
197 rtx tem;
199 /* If this simplifies, do it. */
200 tem = simplify_binary_operation (code, mode, op0, op1);
201 if (tem)
202 return tem;
204 /* Put complex operands first and constants second if commutative. */
205 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
206 && swap_commutative_operands_p (op0, op1))
207 std::swap (op0, op1);
209 return gen_rtx_fmt_ee (code, mode, op0, op1);
212 /* If X is a MEM referencing the constant pool, return the real value.
213 Otherwise return X. */
215 avoid_constant_pool_reference (rtx x)
217 rtx c, tmp, addr;
218 machine_mode cmode;
219 HOST_WIDE_INT offset = 0;
221 switch (GET_CODE (x))
223 case MEM:
224 break;
226 case FLOAT_EXTEND:
227 /* Handle float extensions of constant pool references. */
228 tmp = XEXP (x, 0);
229 c = avoid_constant_pool_reference (tmp);
230 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
232 REAL_VALUE_TYPE d;
234 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
235 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
237 return x;
239 default:
240 return x;
243 if (GET_MODE (x) == BLKmode)
244 return x;
246 addr = XEXP (x, 0);
248 /* Call target hook to avoid the effects of -fpic etc.... */
249 addr = targetm.delegitimize_address (addr);
251 /* Split the address into a base and integer offset. */
252 if (GET_CODE (addr) == CONST
253 && GET_CODE (XEXP (addr, 0)) == PLUS
254 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
256 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
257 addr = XEXP (XEXP (addr, 0), 0);
260 if (GET_CODE (addr) == LO_SUM)
261 addr = XEXP (addr, 1);
263 /* If this is a constant pool reference, we can turn it into its
264 constant and hope that simplifications happen. */
265 if (GET_CODE (addr) == SYMBOL_REF
266 && CONSTANT_POOL_ADDRESS_P (addr))
268 c = get_pool_constant (addr);
269 cmode = get_pool_mode (addr);
271 /* If we're accessing the constant in a different mode than it was
272 originally stored, attempt to fix that up via subreg simplifications.
273 If that fails we have no choice but to return the original memory. */
274 if ((offset != 0 || cmode != GET_MODE (x))
275 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
277 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
278 if (tem && CONSTANT_P (tem))
279 return tem;
281 else
282 return c;
285 return x;
288 /* Simplify a MEM based on its attributes. This is the default
289 delegitimize_address target hook, and it's recommended that every
290 overrider call it. */
293 delegitimize_mem_from_attrs (rtx x)
295 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
296 use their base addresses as equivalent. */
297 if (MEM_P (x)
298 && MEM_EXPR (x)
299 && MEM_OFFSET_KNOWN_P (x))
301 tree decl = MEM_EXPR (x);
302 machine_mode mode = GET_MODE (x);
303 HOST_WIDE_INT offset = 0;
305 switch (TREE_CODE (decl))
307 default:
308 decl = NULL;
309 break;
311 case VAR_DECL:
312 break;
314 case ARRAY_REF:
315 case ARRAY_RANGE_REF:
316 case COMPONENT_REF:
317 case BIT_FIELD_REF:
318 case REALPART_EXPR:
319 case IMAGPART_EXPR:
320 case VIEW_CONVERT_EXPR:
322 HOST_WIDE_INT bitsize, bitpos;
323 tree toffset;
324 int unsignedp, volatilep = 0;
326 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
327 &mode, &unsignedp, &volatilep, false);
328 if (bitsize != GET_MODE_BITSIZE (mode)
329 || (bitpos % BITS_PER_UNIT)
330 || (toffset && !tree_fits_shwi_p (toffset)))
331 decl = NULL;
332 else
334 offset += bitpos / BITS_PER_UNIT;
335 if (toffset)
336 offset += tree_to_shwi (toffset);
338 break;
342 if (decl
343 && mode == GET_MODE (x)
344 && TREE_CODE (decl) == VAR_DECL
345 && (TREE_STATIC (decl)
346 || DECL_THREAD_LOCAL_P (decl))
347 && DECL_RTL_SET_P (decl)
348 && MEM_P (DECL_RTL (decl)))
350 rtx newx;
352 offset += MEM_OFFSET (x);
354 newx = DECL_RTL (decl);
356 if (MEM_P (newx))
358 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
360 /* Avoid creating a new MEM needlessly if we already had
361 the same address. We do if there's no OFFSET and the
362 old address X is identical to NEWX, or if X is of the
363 form (plus NEWX OFFSET), or the NEWX is of the form
364 (plus Y (const_int Z)) and X is that with the offset
365 added: (plus Y (const_int Z+OFFSET)). */
366 if (!((offset == 0
367 || (GET_CODE (o) == PLUS
368 && GET_CODE (XEXP (o, 1)) == CONST_INT
369 && (offset == INTVAL (XEXP (o, 1))
370 || (GET_CODE (n) == PLUS
371 && GET_CODE (XEXP (n, 1)) == CONST_INT
372 && (INTVAL (XEXP (n, 1)) + offset
373 == INTVAL (XEXP (o, 1)))
374 && (n = XEXP (n, 0))))
375 && (o = XEXP (o, 0))))
376 && rtx_equal_p (o, n)))
377 x = adjust_address_nv (newx, mode, offset);
379 else if (GET_MODE (x) == GET_MODE (newx)
380 && offset == 0)
381 x = newx;
385 return x;
388 /* Make a unary operation by first seeing if it folds and otherwise making
389 the specified operation. */
392 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
393 machine_mode op_mode)
395 rtx tem;
397 /* If this simplifies, use it. */
398 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
399 return tem;
401 return gen_rtx_fmt_e (code, mode, op);
404 /* Likewise for ternary operations. */
407 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
408 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
410 rtx tem;
412 /* If this simplifies, use it. */
413 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
414 op0, op1, op2)))
415 return tem;
417 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
420 /* Likewise, for relational operations.
421 CMP_MODE specifies mode comparison is done in. */
424 simplify_gen_relational (enum rtx_code code, machine_mode mode,
425 machine_mode cmp_mode, rtx op0, rtx op1)
427 rtx tem;
429 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
430 op0, op1)))
431 return tem;
433 return gen_rtx_fmt_ee (code, mode, op0, op1);
436 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
437 and simplify the result. If FN is non-NULL, call this callback on each
438 X, if it returns non-NULL, replace X with its return value and simplify the
439 result. */
442 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
443 rtx (*fn) (rtx, const_rtx, void *), void *data)
445 enum rtx_code code = GET_CODE (x);
446 machine_mode mode = GET_MODE (x);
447 machine_mode op_mode;
448 const char *fmt;
449 rtx op0, op1, op2, newx, op;
450 rtvec vec, newvec;
451 int i, j;
453 if (__builtin_expect (fn != NULL, 0))
455 newx = fn (x, old_rtx, data);
456 if (newx)
457 return newx;
459 else if (rtx_equal_p (x, old_rtx))
460 return copy_rtx ((rtx) data);
462 switch (GET_RTX_CLASS (code))
464 case RTX_UNARY:
465 op0 = XEXP (x, 0);
466 op_mode = GET_MODE (op0);
467 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
468 if (op0 == XEXP (x, 0))
469 return x;
470 return simplify_gen_unary (code, mode, op0, op_mode);
472 case RTX_BIN_ARITH:
473 case RTX_COMM_ARITH:
474 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
475 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
476 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
477 return x;
478 return simplify_gen_binary (code, mode, op0, op1);
480 case RTX_COMPARE:
481 case RTX_COMM_COMPARE:
482 op0 = XEXP (x, 0);
483 op1 = XEXP (x, 1);
484 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
485 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
486 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
487 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
488 return x;
489 return simplify_gen_relational (code, mode, op_mode, op0, op1);
491 case RTX_TERNARY:
492 case RTX_BITFIELD_OPS:
493 op0 = XEXP (x, 0);
494 op_mode = GET_MODE (op0);
495 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
496 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
497 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
498 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
499 return x;
500 if (op_mode == VOIDmode)
501 op_mode = GET_MODE (op0);
502 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
504 case RTX_EXTRA:
505 if (code == SUBREG)
507 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
508 if (op0 == SUBREG_REG (x))
509 return x;
510 op0 = simplify_gen_subreg (GET_MODE (x), op0,
511 GET_MODE (SUBREG_REG (x)),
512 SUBREG_BYTE (x));
513 return op0 ? op0 : x;
515 break;
517 case RTX_OBJ:
518 if (code == MEM)
520 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
521 if (op0 == XEXP (x, 0))
522 return x;
523 return replace_equiv_address_nv (x, op0);
525 else if (code == LO_SUM)
527 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
528 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
530 /* (lo_sum (high x) y) -> y where x and y have the same base. */
531 if (GET_CODE (op0) == HIGH)
533 rtx base0, base1, offset0, offset1;
534 split_const (XEXP (op0, 0), &base0, &offset0);
535 split_const (op1, &base1, &offset1);
536 if (rtx_equal_p (base0, base1))
537 return op1;
540 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
541 return x;
542 return gen_rtx_LO_SUM (mode, op0, op1);
544 break;
546 default:
547 break;
550 newx = x;
551 fmt = GET_RTX_FORMAT (code);
552 for (i = 0; fmt[i]; i++)
553 switch (fmt[i])
555 case 'E':
556 vec = XVEC (x, i);
557 newvec = XVEC (newx, i);
558 for (j = 0; j < GET_NUM_ELEM (vec); j++)
560 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
561 old_rtx, fn, data);
562 if (op != RTVEC_ELT (vec, j))
564 if (newvec == vec)
566 newvec = shallow_copy_rtvec (vec);
567 if (x == newx)
568 newx = shallow_copy_rtx (x);
569 XVEC (newx, i) = newvec;
571 RTVEC_ELT (newvec, j) = op;
574 break;
576 case 'e':
577 if (XEXP (x, i))
579 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
580 if (op != XEXP (x, i))
582 if (x == newx)
583 newx = shallow_copy_rtx (x);
584 XEXP (newx, i) = op;
587 break;
589 return newx;
592 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
593 resulting RTX. Return a new RTX which is as simplified as possible. */
596 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
598 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
601 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
602 Only handle cases where the truncated value is inherently an rvalue.
604 RTL provides two ways of truncating a value:
606 1. a lowpart subreg. This form is only a truncation when both
607 the outer and inner modes (here MODE and OP_MODE respectively)
608 are scalar integers, and only then when the subreg is used as
609 an rvalue.
611 It is only valid to form such truncating subregs if the
612 truncation requires no action by the target. The onus for
613 proving this is on the creator of the subreg -- e.g. the
614 caller to simplify_subreg or simplify_gen_subreg -- and typically
615 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
617 2. a TRUNCATE. This form handles both scalar and compound integers.
619 The first form is preferred where valid. However, the TRUNCATE
620 handling in simplify_unary_operation turns the second form into the
621 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
622 so it is generally safe to form rvalue truncations using:
624 simplify_gen_unary (TRUNCATE, ...)
626 and leave simplify_unary_operation to work out which representation
627 should be used.
629 Because of the proof requirements on (1), simplify_truncation must
630 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
631 regardless of whether the outer truncation came from a SUBREG or a
632 TRUNCATE. For example, if the caller has proven that an SImode
633 truncation of:
635 (and:DI X Y)
637 is a no-op and can be represented as a subreg, it does not follow
638 that SImode truncations of X and Y are also no-ops. On a target
639 like 64-bit MIPS that requires SImode values to be stored in
640 sign-extended form, an SImode truncation of:
642 (and:DI (reg:DI X) (const_int 63))
644 is trivially a no-op because only the lower 6 bits can be set.
645 However, X is still an arbitrary 64-bit number and so we cannot
646 assume that truncating it too is a no-op. */
648 static rtx
649 simplify_truncation (machine_mode mode, rtx op,
650 machine_mode op_mode)
652 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
653 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
654 gcc_assert (precision <= op_precision);
656 /* Optimize truncations of zero and sign extended values. */
657 if (GET_CODE (op) == ZERO_EXTEND
658 || GET_CODE (op) == SIGN_EXTEND)
660 /* There are three possibilities. If MODE is the same as the
661 origmode, we can omit both the extension and the subreg.
662 If MODE is not larger than the origmode, we can apply the
663 truncation without the extension. Finally, if the outermode
664 is larger than the origmode, we can just extend to the appropriate
665 mode. */
666 machine_mode origmode = GET_MODE (XEXP (op, 0));
667 if (mode == origmode)
668 return XEXP (op, 0);
669 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
670 return simplify_gen_unary (TRUNCATE, mode,
671 XEXP (op, 0), origmode);
672 else
673 return simplify_gen_unary (GET_CODE (op), mode,
674 XEXP (op, 0), origmode);
677 /* If the machine can perform operations in the truncated mode, distribute
678 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
679 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
680 if (1
681 #ifdef WORD_REGISTER_OPERATIONS
682 && precision >= BITS_PER_WORD
683 #endif
684 && (GET_CODE (op) == PLUS
685 || GET_CODE (op) == MINUS
686 || GET_CODE (op) == MULT))
688 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
689 if (op0)
691 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
692 if (op1)
693 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
697 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
698 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
699 the outer subreg is effectively a truncation to the original mode. */
700 if ((GET_CODE (op) == LSHIFTRT
701 || GET_CODE (op) == ASHIFTRT)
702 /* Ensure that OP_MODE is at least twice as wide as MODE
703 to avoid the possibility that an outer LSHIFTRT shifts by more
704 than the sign extension's sign_bit_copies and introduces zeros
705 into the high bits of the result. */
706 && 2 * precision <= op_precision
707 && CONST_INT_P (XEXP (op, 1))
708 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
709 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
710 && UINTVAL (XEXP (op, 1)) < precision)
711 return simplify_gen_binary (ASHIFTRT, mode,
712 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
714 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
715 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
716 the outer subreg is effectively a truncation to the original mode. */
717 if ((GET_CODE (op) == LSHIFTRT
718 || GET_CODE (op) == ASHIFTRT)
719 && CONST_INT_P (XEXP (op, 1))
720 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
721 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
722 && UINTVAL (XEXP (op, 1)) < precision)
723 return simplify_gen_binary (LSHIFTRT, mode,
724 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
726 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
727 to (ashift:QI (x:QI) C), where C is a suitable small constant and
728 the outer subreg is effectively a truncation to the original mode. */
729 if (GET_CODE (op) == ASHIFT
730 && CONST_INT_P (XEXP (op, 1))
731 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
732 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
733 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
734 && UINTVAL (XEXP (op, 1)) < precision)
735 return simplify_gen_binary (ASHIFT, mode,
736 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
738 /* Recognize a word extraction from a multi-word subreg. */
739 if ((GET_CODE (op) == LSHIFTRT
740 || GET_CODE (op) == ASHIFTRT)
741 && SCALAR_INT_MODE_P (mode)
742 && SCALAR_INT_MODE_P (op_mode)
743 && precision >= BITS_PER_WORD
744 && 2 * precision <= op_precision
745 && CONST_INT_P (XEXP (op, 1))
746 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
747 && UINTVAL (XEXP (op, 1)) < op_precision)
749 int byte = subreg_lowpart_offset (mode, op_mode);
750 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
751 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
752 (WORDS_BIG_ENDIAN
753 ? byte - shifted_bytes
754 : byte + shifted_bytes));
757 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
758 and try replacing the TRUNCATE and shift with it. Don't do this
759 if the MEM has a mode-dependent address. */
760 if ((GET_CODE (op) == LSHIFTRT
761 || GET_CODE (op) == ASHIFTRT)
762 && SCALAR_INT_MODE_P (op_mode)
763 && MEM_P (XEXP (op, 0))
764 && CONST_INT_P (XEXP (op, 1))
765 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
766 && INTVAL (XEXP (op, 1)) > 0
767 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
768 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
769 MEM_ADDR_SPACE (XEXP (op, 0)))
770 && ! MEM_VOLATILE_P (XEXP (op, 0))
771 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
772 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
774 int byte = subreg_lowpart_offset (mode, op_mode);
775 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
776 return adjust_address_nv (XEXP (op, 0), mode,
777 (WORDS_BIG_ENDIAN
778 ? byte - shifted_bytes
779 : byte + shifted_bytes));
782 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
783 (OP:SI foo:SI) if OP is NEG or ABS. */
784 if ((GET_CODE (op) == ABS
785 || GET_CODE (op) == NEG)
786 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
787 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
788 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
789 return simplify_gen_unary (GET_CODE (op), mode,
790 XEXP (XEXP (op, 0), 0), mode);
792 /* (truncate:A (subreg:B (truncate:C X) 0)) is
793 (truncate:A X). */
794 if (GET_CODE (op) == SUBREG
795 && SCALAR_INT_MODE_P (mode)
796 && SCALAR_INT_MODE_P (op_mode)
797 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
798 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
799 && subreg_lowpart_p (op))
801 rtx inner = XEXP (SUBREG_REG (op), 0);
802 if (GET_MODE_PRECISION (mode)
803 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
804 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
805 else
806 /* If subreg above is paradoxical and C is narrower
807 than A, return (subreg:A (truncate:C X) 0). */
808 return simplify_gen_subreg (mode, SUBREG_REG (op),
809 GET_MODE (SUBREG_REG (op)), 0);
812 /* (truncate:A (truncate:B X)) is (truncate:A X). */
813 if (GET_CODE (op) == TRUNCATE)
814 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
815 GET_MODE (XEXP (op, 0)));
817 return NULL_RTX;
820 /* Try to simplify a unary operation CODE whose output mode is to be
821 MODE with input operand OP whose mode was originally OP_MODE.
822 Return zero if no simplification can be made. */
824 simplify_unary_operation (enum rtx_code code, machine_mode mode,
825 rtx op, machine_mode op_mode)
827 rtx trueop, tem;
829 trueop = avoid_constant_pool_reference (op);
831 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
832 if (tem)
833 return tem;
835 return simplify_unary_operation_1 (code, mode, op);
838 /* Perform some simplifications we can do even if the operands
839 aren't constant. */
840 static rtx
841 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
843 enum rtx_code reversed;
844 rtx temp;
846 switch (code)
848 case NOT:
849 /* (not (not X)) == X. */
850 if (GET_CODE (op) == NOT)
851 return XEXP (op, 0);
853 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
854 comparison is all ones. */
855 if (COMPARISON_P (op)
856 && (mode == BImode || STORE_FLAG_VALUE == -1)
857 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
858 return simplify_gen_relational (reversed, mode, VOIDmode,
859 XEXP (op, 0), XEXP (op, 1));
861 /* (not (plus X -1)) can become (neg X). */
862 if (GET_CODE (op) == PLUS
863 && XEXP (op, 1) == constm1_rtx)
864 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
866 /* Similarly, (not (neg X)) is (plus X -1). */
867 if (GET_CODE (op) == NEG)
868 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
869 CONSTM1_RTX (mode));
871 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
872 if (GET_CODE (op) == XOR
873 && CONST_INT_P (XEXP (op, 1))
874 && (temp = simplify_unary_operation (NOT, mode,
875 XEXP (op, 1), mode)) != 0)
876 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
878 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
879 if (GET_CODE (op) == PLUS
880 && CONST_INT_P (XEXP (op, 1))
881 && mode_signbit_p (mode, XEXP (op, 1))
882 && (temp = simplify_unary_operation (NOT, mode,
883 XEXP (op, 1), mode)) != 0)
884 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
887 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
888 operands other than 1, but that is not valid. We could do a
889 similar simplification for (not (lshiftrt C X)) where C is
890 just the sign bit, but this doesn't seem common enough to
891 bother with. */
892 if (GET_CODE (op) == ASHIFT
893 && XEXP (op, 0) == const1_rtx)
895 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
896 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
899 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
900 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
901 so we can perform the above simplification. */
902 if (STORE_FLAG_VALUE == -1
903 && GET_CODE (op) == ASHIFTRT
904 && CONST_INT_P (XEXP (op, 1))
905 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
906 return simplify_gen_relational (GE, mode, VOIDmode,
907 XEXP (op, 0), const0_rtx);
910 if (GET_CODE (op) == SUBREG
911 && subreg_lowpart_p (op)
912 && (GET_MODE_SIZE (GET_MODE (op))
913 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
914 && GET_CODE (SUBREG_REG (op)) == ASHIFT
915 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
917 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
918 rtx x;
920 x = gen_rtx_ROTATE (inner_mode,
921 simplify_gen_unary (NOT, inner_mode, const1_rtx,
922 inner_mode),
923 XEXP (SUBREG_REG (op), 1));
924 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
925 if (temp)
926 return temp;
929 /* Apply De Morgan's laws to reduce number of patterns for machines
930 with negating logical insns (and-not, nand, etc.). If result has
931 only one NOT, put it first, since that is how the patterns are
932 coded. */
933 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
935 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
936 machine_mode op_mode;
938 op_mode = GET_MODE (in1);
939 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
941 op_mode = GET_MODE (in2);
942 if (op_mode == VOIDmode)
943 op_mode = mode;
944 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
946 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
947 std::swap (in1, in2);
949 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
950 mode, in1, in2);
953 /* (not (bswap x)) -> (bswap (not x)). */
954 if (GET_CODE (op) == BSWAP)
956 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
957 return simplify_gen_unary (BSWAP, mode, x, mode);
959 break;
961 case NEG:
962 /* (neg (neg X)) == X. */
963 if (GET_CODE (op) == NEG)
964 return XEXP (op, 0);
966 /* (neg (plus X 1)) can become (not X). */
967 if (GET_CODE (op) == PLUS
968 && XEXP (op, 1) == const1_rtx)
969 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
971 /* Similarly, (neg (not X)) is (plus X 1). */
972 if (GET_CODE (op) == NOT)
973 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
974 CONST1_RTX (mode));
976 /* (neg (minus X Y)) can become (minus Y X). This transformation
977 isn't safe for modes with signed zeros, since if X and Y are
978 both +0, (minus Y X) is the same as (minus X Y). If the
979 rounding mode is towards +infinity (or -infinity) then the two
980 expressions will be rounded differently. */
981 if (GET_CODE (op) == MINUS
982 && !HONOR_SIGNED_ZEROS (mode)
983 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
984 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
986 if (GET_CODE (op) == PLUS
987 && !HONOR_SIGNED_ZEROS (mode)
988 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
990 /* (neg (plus A C)) is simplified to (minus -C A). */
991 if (CONST_SCALAR_INT_P (XEXP (op, 1))
992 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
994 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
995 if (temp)
996 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
999 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1000 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1001 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1004 /* (neg (mult A B)) becomes (mult A (neg B)).
1005 This works even for floating-point values. */
1006 if (GET_CODE (op) == MULT
1007 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1009 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1010 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1013 /* NEG commutes with ASHIFT since it is multiplication. Only do
1014 this if we can then eliminate the NEG (e.g., if the operand
1015 is a constant). */
1016 if (GET_CODE (op) == ASHIFT)
1018 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1019 if (temp)
1020 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1023 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1024 C is equal to the width of MODE minus 1. */
1025 if (GET_CODE (op) == ASHIFTRT
1026 && CONST_INT_P (XEXP (op, 1))
1027 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1028 return simplify_gen_binary (LSHIFTRT, mode,
1029 XEXP (op, 0), XEXP (op, 1));
1031 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1032 C is equal to the width of MODE minus 1. */
1033 if (GET_CODE (op) == LSHIFTRT
1034 && CONST_INT_P (XEXP (op, 1))
1035 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1036 return simplify_gen_binary (ASHIFTRT, mode,
1037 XEXP (op, 0), XEXP (op, 1));
1039 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1040 if (GET_CODE (op) == XOR
1041 && XEXP (op, 1) == const1_rtx
1042 && nonzero_bits (XEXP (op, 0), mode) == 1)
1043 return plus_constant (mode, XEXP (op, 0), -1);
1045 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1046 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1047 if (GET_CODE (op) == LT
1048 && XEXP (op, 1) == const0_rtx
1049 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1051 machine_mode inner = GET_MODE (XEXP (op, 0));
1052 int isize = GET_MODE_PRECISION (inner);
1053 if (STORE_FLAG_VALUE == 1)
1055 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1056 GEN_INT (isize - 1));
1057 if (mode == inner)
1058 return temp;
1059 if (GET_MODE_PRECISION (mode) > isize)
1060 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1061 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1063 else if (STORE_FLAG_VALUE == -1)
1065 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1066 GEN_INT (isize - 1));
1067 if (mode == inner)
1068 return temp;
1069 if (GET_MODE_PRECISION (mode) > isize)
1070 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1071 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1074 break;
1076 case TRUNCATE:
1077 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1078 with the umulXi3_highpart patterns. */
1079 if (GET_CODE (op) == LSHIFTRT
1080 && GET_CODE (XEXP (op, 0)) == MULT)
1081 break;
1083 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1085 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1087 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1088 if (temp)
1089 return temp;
1091 /* We can't handle truncation to a partial integer mode here
1092 because we don't know the real bitsize of the partial
1093 integer mode. */
1094 break;
1097 if (GET_MODE (op) != VOIDmode)
1099 temp = simplify_truncation (mode, op, GET_MODE (op));
1100 if (temp)
1101 return temp;
1104 /* If we know that the value is already truncated, we can
1105 replace the TRUNCATE with a SUBREG. */
1106 if (GET_MODE_NUNITS (mode) == 1
1107 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1108 || truncated_to_mode (mode, op)))
1110 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1111 if (temp)
1112 return temp;
1115 /* A truncate of a comparison can be replaced with a subreg if
1116 STORE_FLAG_VALUE permits. This is like the previous test,
1117 but it works even if the comparison is done in a mode larger
1118 than HOST_BITS_PER_WIDE_INT. */
1119 if (HWI_COMPUTABLE_MODE_P (mode)
1120 && COMPARISON_P (op)
1121 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1123 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1124 if (temp)
1125 return temp;
1128 /* A truncate of a memory is just loading the low part of the memory
1129 if we are not changing the meaning of the address. */
1130 if (GET_CODE (op) == MEM
1131 && !VECTOR_MODE_P (mode)
1132 && !MEM_VOLATILE_P (op)
1133 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1135 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1136 if (temp)
1137 return temp;
1140 break;
1142 case FLOAT_TRUNCATE:
1143 if (DECIMAL_FLOAT_MODE_P (mode))
1144 break;
1146 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1147 if (GET_CODE (op) == FLOAT_EXTEND
1148 && GET_MODE (XEXP (op, 0)) == mode)
1149 return XEXP (op, 0);
1151 /* (float_truncate:SF (float_truncate:DF foo:XF))
1152 = (float_truncate:SF foo:XF).
1153 This may eliminate double rounding, so it is unsafe.
1155 (float_truncate:SF (float_extend:XF foo:DF))
1156 = (float_truncate:SF foo:DF).
1158 (float_truncate:DF (float_extend:XF foo:SF))
1159 = (float_extend:DF foo:SF). */
1160 if ((GET_CODE (op) == FLOAT_TRUNCATE
1161 && flag_unsafe_math_optimizations)
1162 || GET_CODE (op) == FLOAT_EXTEND)
1163 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1164 0)))
1165 > GET_MODE_SIZE (mode)
1166 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1167 mode,
1168 XEXP (op, 0), mode);
1170 /* (float_truncate (float x)) is (float x) */
1171 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1172 && (flag_unsafe_math_optimizations
1173 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1174 && ((unsigned)significand_size (GET_MODE (op))
1175 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1176 - num_sign_bit_copies (XEXP (op, 0),
1177 GET_MODE (XEXP (op, 0))))))))
1178 return simplify_gen_unary (GET_CODE (op), mode,
1179 XEXP (op, 0),
1180 GET_MODE (XEXP (op, 0)));
1182 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1183 (OP:SF foo:SF) if OP is NEG or ABS. */
1184 if ((GET_CODE (op) == ABS
1185 || GET_CODE (op) == NEG)
1186 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1187 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1188 return simplify_gen_unary (GET_CODE (op), mode,
1189 XEXP (XEXP (op, 0), 0), mode);
1191 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1192 is (float_truncate:SF x). */
1193 if (GET_CODE (op) == SUBREG
1194 && subreg_lowpart_p (op)
1195 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1196 return SUBREG_REG (op);
1197 break;
1199 case FLOAT_EXTEND:
1200 if (DECIMAL_FLOAT_MODE_P (mode))
1201 break;
1203 /* (float_extend (float_extend x)) is (float_extend x)
1205 (float_extend (float x)) is (float x) assuming that double
1206 rounding can't happen.
1208 if (GET_CODE (op) == FLOAT_EXTEND
1209 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1210 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1211 && ((unsigned)significand_size (GET_MODE (op))
1212 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1213 - num_sign_bit_copies (XEXP (op, 0),
1214 GET_MODE (XEXP (op, 0)))))))
1215 return simplify_gen_unary (GET_CODE (op), mode,
1216 XEXP (op, 0),
1217 GET_MODE (XEXP (op, 0)));
1219 break;
1221 case ABS:
1222 /* (abs (neg <foo>)) -> (abs <foo>) */
1223 if (GET_CODE (op) == NEG)
1224 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1225 GET_MODE (XEXP (op, 0)));
1227 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1228 do nothing. */
1229 if (GET_MODE (op) == VOIDmode)
1230 break;
1232 /* If operand is something known to be positive, ignore the ABS. */
1233 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1234 || val_signbit_known_clear_p (GET_MODE (op),
1235 nonzero_bits (op, GET_MODE (op))))
1236 return op;
1238 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1239 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1240 return gen_rtx_NEG (mode, op);
1242 break;
1244 case FFS:
1245 /* (ffs (*_extend <X>)) = (ffs <X>) */
1246 if (GET_CODE (op) == SIGN_EXTEND
1247 || GET_CODE (op) == ZERO_EXTEND)
1248 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1249 GET_MODE (XEXP (op, 0)));
1250 break;
1252 case POPCOUNT:
1253 switch (GET_CODE (op))
1255 case BSWAP:
1256 case ZERO_EXTEND:
1257 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1258 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1259 GET_MODE (XEXP (op, 0)));
1261 case ROTATE:
1262 case ROTATERT:
1263 /* Rotations don't affect popcount. */
1264 if (!side_effects_p (XEXP (op, 1)))
1265 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1266 GET_MODE (XEXP (op, 0)));
1267 break;
1269 default:
1270 break;
1272 break;
1274 case PARITY:
1275 switch (GET_CODE (op))
1277 case NOT:
1278 case BSWAP:
1279 case ZERO_EXTEND:
1280 case SIGN_EXTEND:
1281 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1282 GET_MODE (XEXP (op, 0)));
1284 case ROTATE:
1285 case ROTATERT:
1286 /* Rotations don't affect parity. */
1287 if (!side_effects_p (XEXP (op, 1)))
1288 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1289 GET_MODE (XEXP (op, 0)));
1290 break;
1292 default:
1293 break;
1295 break;
1297 case BSWAP:
1298 /* (bswap (bswap x)) -> x. */
1299 if (GET_CODE (op) == BSWAP)
1300 return XEXP (op, 0);
1301 break;
1303 case FLOAT:
1304 /* (float (sign_extend <X>)) = (float <X>). */
1305 if (GET_CODE (op) == SIGN_EXTEND)
1306 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1307 GET_MODE (XEXP (op, 0)));
1308 break;
1310 case SIGN_EXTEND:
1311 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1312 becomes just the MINUS if its mode is MODE. This allows
1313 folding switch statements on machines using casesi (such as
1314 the VAX). */
1315 if (GET_CODE (op) == TRUNCATE
1316 && GET_MODE (XEXP (op, 0)) == mode
1317 && GET_CODE (XEXP (op, 0)) == MINUS
1318 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1319 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1320 return XEXP (op, 0);
1322 /* Extending a widening multiplication should be canonicalized to
1323 a wider widening multiplication. */
1324 if (GET_CODE (op) == MULT)
1326 rtx lhs = XEXP (op, 0);
1327 rtx rhs = XEXP (op, 1);
1328 enum rtx_code lcode = GET_CODE (lhs);
1329 enum rtx_code rcode = GET_CODE (rhs);
1331 /* Widening multiplies usually extend both operands, but sometimes
1332 they use a shift to extract a portion of a register. */
1333 if ((lcode == SIGN_EXTEND
1334 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1335 && (rcode == SIGN_EXTEND
1336 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1338 machine_mode lmode = GET_MODE (lhs);
1339 machine_mode rmode = GET_MODE (rhs);
1340 int bits;
1342 if (lcode == ASHIFTRT)
1343 /* Number of bits not shifted off the end. */
1344 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1345 else /* lcode == SIGN_EXTEND */
1346 /* Size of inner mode. */
1347 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1349 if (rcode == ASHIFTRT)
1350 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1351 else /* rcode == SIGN_EXTEND */
1352 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1354 /* We can only widen multiplies if the result is mathematiclly
1355 equivalent. I.e. if overflow was impossible. */
1356 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1357 return simplify_gen_binary
1358 (MULT, mode,
1359 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1360 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1364 /* Check for a sign extension of a subreg of a promoted
1365 variable, where the promotion is sign-extended, and the
1366 target mode is the same as the variable's promotion. */
1367 if (GET_CODE (op) == SUBREG
1368 && SUBREG_PROMOTED_VAR_P (op)
1369 && SUBREG_PROMOTED_SIGNED_P (op)
1370 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1372 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1373 if (temp)
1374 return temp;
1377 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1378 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1379 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1381 gcc_assert (GET_MODE_PRECISION (mode)
1382 > GET_MODE_PRECISION (GET_MODE (op)));
1383 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1384 GET_MODE (XEXP (op, 0)));
1387 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1388 is (sign_extend:M (subreg:O <X>)) if there is mode with
1389 GET_MODE_BITSIZE (N) - I bits.
1390 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1391 is similarly (zero_extend:M (subreg:O <X>)). */
1392 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1393 && GET_CODE (XEXP (op, 0)) == ASHIFT
1394 && CONST_INT_P (XEXP (op, 1))
1395 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1396 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1398 machine_mode tmode
1399 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1400 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1401 gcc_assert (GET_MODE_BITSIZE (mode)
1402 > GET_MODE_BITSIZE (GET_MODE (op)));
1403 if (tmode != BLKmode)
1405 rtx inner =
1406 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1407 if (inner)
1408 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1409 ? SIGN_EXTEND : ZERO_EXTEND,
1410 mode, inner, tmode);
1414 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1415 /* As we do not know which address space the pointer is referring to,
1416 we can do this only if the target does not support different pointer
1417 or address modes depending on the address space. */
1418 if (target_default_pointer_address_modes_p ()
1419 && ! POINTERS_EXTEND_UNSIGNED
1420 && mode == Pmode && GET_MODE (op) == ptr_mode
1421 && (CONSTANT_P (op)
1422 || (GET_CODE (op) == SUBREG
1423 && REG_P (SUBREG_REG (op))
1424 && REG_POINTER (SUBREG_REG (op))
1425 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1426 return convert_memory_address (Pmode, op);
1427 #endif
1428 break;
1430 case ZERO_EXTEND:
1431 /* Check for a zero extension of a subreg of a promoted
1432 variable, where the promotion is zero-extended, and the
1433 target mode is the same as the variable's promotion. */
1434 if (GET_CODE (op) == SUBREG
1435 && SUBREG_PROMOTED_VAR_P (op)
1436 && SUBREG_PROMOTED_UNSIGNED_P (op)
1437 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1439 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1440 if (temp)
1441 return temp;
1444 /* Extending a widening multiplication should be canonicalized to
1445 a wider widening multiplication. */
1446 if (GET_CODE (op) == MULT)
1448 rtx lhs = XEXP (op, 0);
1449 rtx rhs = XEXP (op, 1);
1450 enum rtx_code lcode = GET_CODE (lhs);
1451 enum rtx_code rcode = GET_CODE (rhs);
1453 /* Widening multiplies usually extend both operands, but sometimes
1454 they use a shift to extract a portion of a register. */
1455 if ((lcode == ZERO_EXTEND
1456 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1457 && (rcode == ZERO_EXTEND
1458 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1460 machine_mode lmode = GET_MODE (lhs);
1461 machine_mode rmode = GET_MODE (rhs);
1462 int bits;
1464 if (lcode == LSHIFTRT)
1465 /* Number of bits not shifted off the end. */
1466 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1467 else /* lcode == ZERO_EXTEND */
1468 /* Size of inner mode. */
1469 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1471 if (rcode == LSHIFTRT)
1472 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1473 else /* rcode == ZERO_EXTEND */
1474 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1476 /* We can only widen multiplies if the result is mathematiclly
1477 equivalent. I.e. if overflow was impossible. */
1478 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1479 return simplify_gen_binary
1480 (MULT, mode,
1481 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1482 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1486 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1487 if (GET_CODE (op) == ZERO_EXTEND)
1488 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1489 GET_MODE (XEXP (op, 0)));
1491 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1492 is (zero_extend:M (subreg:O <X>)) if there is mode with
1493 GET_MODE_PRECISION (N) - I bits. */
1494 if (GET_CODE (op) == LSHIFTRT
1495 && GET_CODE (XEXP (op, 0)) == ASHIFT
1496 && CONST_INT_P (XEXP (op, 1))
1497 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1498 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1500 machine_mode tmode
1501 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1502 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1503 if (tmode != BLKmode)
1505 rtx inner =
1506 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1507 if (inner)
1508 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1512 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1513 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1514 of mode N. E.g.
1515 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1516 (and:SI (reg:SI) (const_int 63)). */
1517 if (GET_CODE (op) == SUBREG
1518 && GET_MODE_PRECISION (GET_MODE (op))
1519 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1520 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1521 <= HOST_BITS_PER_WIDE_INT
1522 && GET_MODE_PRECISION (mode)
1523 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1524 && subreg_lowpart_p (op)
1525 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1526 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1528 if (GET_MODE_PRECISION (mode)
1529 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1530 return SUBREG_REG (op);
1531 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1532 GET_MODE (SUBREG_REG (op)));
1535 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1536 /* As we do not know which address space the pointer is referring to,
1537 we can do this only if the target does not support different pointer
1538 or address modes depending on the address space. */
1539 if (target_default_pointer_address_modes_p ()
1540 && POINTERS_EXTEND_UNSIGNED > 0
1541 && mode == Pmode && GET_MODE (op) == ptr_mode
1542 && (CONSTANT_P (op)
1543 || (GET_CODE (op) == SUBREG
1544 && REG_P (SUBREG_REG (op))
1545 && REG_POINTER (SUBREG_REG (op))
1546 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1547 return convert_memory_address (Pmode, op);
1548 #endif
1549 break;
1551 default:
1552 break;
1555 return 0;
1558 /* Try to compute the value of a unary operation CODE whose output mode is to
1559 be MODE with input operand OP whose mode was originally OP_MODE.
1560 Return zero if the value cannot be computed. */
1562 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1563 rtx op, machine_mode op_mode)
1565 unsigned int width = GET_MODE_PRECISION (mode);
1567 if (code == VEC_DUPLICATE)
1569 gcc_assert (VECTOR_MODE_P (mode));
1570 if (GET_MODE (op) != VOIDmode)
1572 if (!VECTOR_MODE_P (GET_MODE (op)))
1573 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1574 else
1575 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1576 (GET_MODE (op)));
1578 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1579 || GET_CODE (op) == CONST_VECTOR)
1581 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1582 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1583 rtvec v = rtvec_alloc (n_elts);
1584 unsigned int i;
1586 if (GET_CODE (op) != CONST_VECTOR)
1587 for (i = 0; i < n_elts; i++)
1588 RTVEC_ELT (v, i) = op;
1589 else
1591 machine_mode inmode = GET_MODE (op);
1592 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1593 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1595 gcc_assert (in_n_elts < n_elts);
1596 gcc_assert ((n_elts % in_n_elts) == 0);
1597 for (i = 0; i < n_elts; i++)
1598 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1600 return gen_rtx_CONST_VECTOR (mode, v);
1604 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1606 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1607 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1608 machine_mode opmode = GET_MODE (op);
1609 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1610 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1611 rtvec v = rtvec_alloc (n_elts);
1612 unsigned int i;
1614 gcc_assert (op_n_elts == n_elts);
1615 for (i = 0; i < n_elts; i++)
1617 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1618 CONST_VECTOR_ELT (op, i),
1619 GET_MODE_INNER (opmode));
1620 if (!x)
1621 return 0;
1622 RTVEC_ELT (v, i) = x;
1624 return gen_rtx_CONST_VECTOR (mode, v);
1627 /* The order of these tests is critical so that, for example, we don't
1628 check the wrong mode (input vs. output) for a conversion operation,
1629 such as FIX. At some point, this should be simplified. */
1631 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1633 REAL_VALUE_TYPE d;
1635 if (op_mode == VOIDmode)
1637 /* CONST_INT have VOIDmode as the mode. We assume that all
1638 the bits of the constant are significant, though, this is
1639 a dangerous assumption as many times CONST_INTs are
1640 created and used with garbage in the bits outside of the
1641 precision of the implied mode of the const_int. */
1642 op_mode = MAX_MODE_INT;
1645 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1646 d = real_value_truncate (mode, d);
1647 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1649 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1651 REAL_VALUE_TYPE d;
1653 if (op_mode == VOIDmode)
1655 /* CONST_INT have VOIDmode as the mode. We assume that all
1656 the bits of the constant are significant, though, this is
1657 a dangerous assumption as many times CONST_INTs are
1658 created and used with garbage in the bits outside of the
1659 precision of the implied mode of the const_int. */
1660 op_mode = MAX_MODE_INT;
1663 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1664 d = real_value_truncate (mode, d);
1665 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1668 if (CONST_SCALAR_INT_P (op) && width > 0)
1670 wide_int result;
1671 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1672 rtx_mode_t op0 = std::make_pair (op, imode);
1673 int int_value;
1675 #if TARGET_SUPPORTS_WIDE_INT == 0
1676 /* This assert keeps the simplification from producing a result
1677 that cannot be represented in a CONST_DOUBLE but a lot of
1678 upstream callers expect that this function never fails to
1679 simplify something and so you if you added this to the test
1680 above the code would die later anyway. If this assert
1681 happens, you just need to make the port support wide int. */
1682 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1683 #endif
1685 switch (code)
1687 case NOT:
1688 result = wi::bit_not (op0);
1689 break;
1691 case NEG:
1692 result = wi::neg (op0);
1693 break;
1695 case ABS:
1696 result = wi::abs (op0);
1697 break;
1699 case FFS:
1700 result = wi::shwi (wi::ffs (op0), mode);
1701 break;
1703 case CLZ:
1704 if (wi::ne_p (op0, 0))
1705 int_value = wi::clz (op0);
1706 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1707 int_value = GET_MODE_PRECISION (mode);
1708 result = wi::shwi (int_value, mode);
1709 break;
1711 case CLRSB:
1712 result = wi::shwi (wi::clrsb (op0), mode);
1713 break;
1715 case CTZ:
1716 if (wi::ne_p (op0, 0))
1717 int_value = wi::ctz (op0);
1718 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1719 int_value = GET_MODE_PRECISION (mode);
1720 result = wi::shwi (int_value, mode);
1721 break;
1723 case POPCOUNT:
1724 result = wi::shwi (wi::popcount (op0), mode);
1725 break;
1727 case PARITY:
1728 result = wi::shwi (wi::parity (op0), mode);
1729 break;
1731 case BSWAP:
1732 result = wide_int (op0).bswap ();
1733 break;
1735 case TRUNCATE:
1736 case ZERO_EXTEND:
1737 result = wide_int::from (op0, width, UNSIGNED);
1738 break;
1740 case SIGN_EXTEND:
1741 result = wide_int::from (op0, width, SIGNED);
1742 break;
1744 case SQRT:
1745 default:
1746 return 0;
1749 return immed_wide_int_const (result, mode);
1752 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1753 && SCALAR_FLOAT_MODE_P (mode)
1754 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1756 REAL_VALUE_TYPE d;
1757 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1759 switch (code)
1761 case SQRT:
1762 return 0;
1763 case ABS:
1764 d = real_value_abs (&d);
1765 break;
1766 case NEG:
1767 d = real_value_negate (&d);
1768 break;
1769 case FLOAT_TRUNCATE:
1770 d = real_value_truncate (mode, d);
1771 break;
1772 case FLOAT_EXTEND:
1773 /* All this does is change the mode, unless changing
1774 mode class. */
1775 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1776 real_convert (&d, mode, &d);
1777 break;
1778 case FIX:
1779 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1780 break;
1781 case NOT:
1783 long tmp[4];
1784 int i;
1786 real_to_target (tmp, &d, GET_MODE (op));
1787 for (i = 0; i < 4; i++)
1788 tmp[i] = ~tmp[i];
1789 real_from_target (&d, tmp, mode);
1790 break;
1792 default:
1793 gcc_unreachable ();
1795 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1797 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1798 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1799 && GET_MODE_CLASS (mode) == MODE_INT
1800 && width > 0)
1802 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1803 operators are intentionally left unspecified (to ease implementation
1804 by target backends), for consistency, this routine implements the
1805 same semantics for constant folding as used by the middle-end. */
1807 /* This was formerly used only for non-IEEE float.
1808 eggert@twinsun.com says it is safe for IEEE also. */
1809 REAL_VALUE_TYPE x, t;
1810 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1811 wide_int wmax, wmin;
1812 /* This is part of the abi to real_to_integer, but we check
1813 things before making this call. */
1814 bool fail;
1816 switch (code)
1818 case FIX:
1819 if (REAL_VALUE_ISNAN (x))
1820 return const0_rtx;
1822 /* Test against the signed upper bound. */
1823 wmax = wi::max_value (width, SIGNED);
1824 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1825 if (REAL_VALUES_LESS (t, x))
1826 return immed_wide_int_const (wmax, mode);
1828 /* Test against the signed lower bound. */
1829 wmin = wi::min_value (width, SIGNED);
1830 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1831 if (REAL_VALUES_LESS (x, t))
1832 return immed_wide_int_const (wmin, mode);
1834 return immed_wide_int_const (real_to_integer (&x, &fail, width), mode);
1835 break;
1837 case UNSIGNED_FIX:
1838 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1839 return const0_rtx;
1841 /* Test against the unsigned upper bound. */
1842 wmax = wi::max_value (width, UNSIGNED);
1843 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1844 if (REAL_VALUES_LESS (t, x))
1845 return immed_wide_int_const (wmax, mode);
1847 return immed_wide_int_const (real_to_integer (&x, &fail, width),
1848 mode);
1849 break;
1851 default:
1852 gcc_unreachable ();
1856 return NULL_RTX;
1859 /* Subroutine of simplify_binary_operation to simplify a binary operation
1860 CODE that can commute with byte swapping, with result mode MODE and
1861 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1862 Return zero if no simplification or canonicalization is possible. */
1864 static rtx
1865 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1866 rtx op0, rtx op1)
1868 rtx tem;
1870 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1871 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1873 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1874 simplify_gen_unary (BSWAP, mode, op1, mode));
1875 return simplify_gen_unary (BSWAP, mode, tem, mode);
1878 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1879 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1881 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1882 return simplify_gen_unary (BSWAP, mode, tem, mode);
1885 return NULL_RTX;
1888 /* Subroutine of simplify_binary_operation to simplify a commutative,
1889 associative binary operation CODE with result mode MODE, operating
1890 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1891 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1892 canonicalization is possible. */
1894 static rtx
1895 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1896 rtx op0, rtx op1)
1898 rtx tem;
1900 /* Linearize the operator to the left. */
1901 if (GET_CODE (op1) == code)
1903 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1904 if (GET_CODE (op0) == code)
1906 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1907 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1910 /* "a op (b op c)" becomes "(b op c) op a". */
1911 if (! swap_commutative_operands_p (op1, op0))
1912 return simplify_gen_binary (code, mode, op1, op0);
1914 std::swap (op0, op1);
1917 if (GET_CODE (op0) == code)
1919 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1920 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1922 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1923 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1926 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1927 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1928 if (tem != 0)
1929 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1931 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1932 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1933 if (tem != 0)
1934 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1937 return 0;
1941 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1942 and OP1. Return 0 if no simplification is possible.
1944 Don't use this for relational operations such as EQ or LT.
1945 Use simplify_relational_operation instead. */
1947 simplify_binary_operation (enum rtx_code code, machine_mode mode,
1948 rtx op0, rtx op1)
1950 rtx trueop0, trueop1;
1951 rtx tem;
1953 /* Relational operations don't work here. We must know the mode
1954 of the operands in order to do the comparison correctly.
1955 Assuming a full word can give incorrect results.
1956 Consider comparing 128 with -128 in QImode. */
1957 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1958 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1960 /* Make sure the constant is second. */
1961 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1962 && swap_commutative_operands_p (op0, op1))
1963 std::swap (op0, op1);
1965 trueop0 = avoid_constant_pool_reference (op0);
1966 trueop1 = avoid_constant_pool_reference (op1);
1968 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1969 if (tem)
1970 return tem;
1971 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1974 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1975 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1976 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1977 actual constants. */
1979 static rtx
1980 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
1981 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1983 rtx tem, reversed, opleft, opright;
1984 HOST_WIDE_INT val;
1985 unsigned int width = GET_MODE_PRECISION (mode);
1987 /* Even if we can't compute a constant result,
1988 there are some cases worth simplifying. */
1990 switch (code)
1992 case PLUS:
1993 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1994 when x is NaN, infinite, or finite and nonzero. They aren't
1995 when x is -0 and the rounding mode is not towards -infinity,
1996 since (-0) + 0 is then 0. */
1997 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1998 return op0;
2000 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2001 transformations are safe even for IEEE. */
2002 if (GET_CODE (op0) == NEG)
2003 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2004 else if (GET_CODE (op1) == NEG)
2005 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2007 /* (~a) + 1 -> -a */
2008 if (INTEGRAL_MODE_P (mode)
2009 && GET_CODE (op0) == NOT
2010 && trueop1 == const1_rtx)
2011 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2013 /* Handle both-operands-constant cases. We can only add
2014 CONST_INTs to constants since the sum of relocatable symbols
2015 can't be handled by most assemblers. Don't add CONST_INT
2016 to CONST_INT since overflow won't be computed properly if wider
2017 than HOST_BITS_PER_WIDE_INT. */
2019 if ((GET_CODE (op0) == CONST
2020 || GET_CODE (op0) == SYMBOL_REF
2021 || GET_CODE (op0) == LABEL_REF)
2022 && CONST_INT_P (op1))
2023 return plus_constant (mode, op0, INTVAL (op1));
2024 else if ((GET_CODE (op1) == CONST
2025 || GET_CODE (op1) == SYMBOL_REF
2026 || GET_CODE (op1) == LABEL_REF)
2027 && CONST_INT_P (op0))
2028 return plus_constant (mode, op1, INTVAL (op0));
2030 /* See if this is something like X * C - X or vice versa or
2031 if the multiplication is written as a shift. If so, we can
2032 distribute and make a new multiply, shift, or maybe just
2033 have X (if C is 2 in the example above). But don't make
2034 something more expensive than we had before. */
2036 if (SCALAR_INT_MODE_P (mode))
2038 rtx lhs = op0, rhs = op1;
2040 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2041 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2043 if (GET_CODE (lhs) == NEG)
2045 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2046 lhs = XEXP (lhs, 0);
2048 else if (GET_CODE (lhs) == MULT
2049 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2051 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2052 lhs = XEXP (lhs, 0);
2054 else if (GET_CODE (lhs) == ASHIFT
2055 && CONST_INT_P (XEXP (lhs, 1))
2056 && INTVAL (XEXP (lhs, 1)) >= 0
2057 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2059 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2060 GET_MODE_PRECISION (mode));
2061 lhs = XEXP (lhs, 0);
2064 if (GET_CODE (rhs) == NEG)
2066 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2067 rhs = XEXP (rhs, 0);
2069 else if (GET_CODE (rhs) == MULT
2070 && CONST_INT_P (XEXP (rhs, 1)))
2072 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2073 rhs = XEXP (rhs, 0);
2075 else if (GET_CODE (rhs) == ASHIFT
2076 && CONST_INT_P (XEXP (rhs, 1))
2077 && INTVAL (XEXP (rhs, 1)) >= 0
2078 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2080 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2081 GET_MODE_PRECISION (mode));
2082 rhs = XEXP (rhs, 0);
2085 if (rtx_equal_p (lhs, rhs))
2087 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2088 rtx coeff;
2089 bool speed = optimize_function_for_speed_p (cfun);
2091 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2093 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2094 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2095 ? tem : 0;
2099 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2100 if (CONST_SCALAR_INT_P (op1)
2101 && GET_CODE (op0) == XOR
2102 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2103 && mode_signbit_p (mode, op1))
2104 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2105 simplify_gen_binary (XOR, mode, op1,
2106 XEXP (op0, 1)));
2108 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2109 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2110 && GET_CODE (op0) == MULT
2111 && GET_CODE (XEXP (op0, 0)) == NEG)
2113 rtx in1, in2;
2115 in1 = XEXP (XEXP (op0, 0), 0);
2116 in2 = XEXP (op0, 1);
2117 return simplify_gen_binary (MINUS, mode, op1,
2118 simplify_gen_binary (MULT, mode,
2119 in1, in2));
2122 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2123 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2124 is 1. */
2125 if (COMPARISON_P (op0)
2126 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2127 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2128 && (reversed = reversed_comparison (op0, mode)))
2129 return
2130 simplify_gen_unary (NEG, mode, reversed, mode);
2132 /* If one of the operands is a PLUS or a MINUS, see if we can
2133 simplify this by the associative law.
2134 Don't use the associative law for floating point.
2135 The inaccuracy makes it nonassociative,
2136 and subtle programs can break if operations are associated. */
2138 if (INTEGRAL_MODE_P (mode)
2139 && (plus_minus_operand_p (op0)
2140 || plus_minus_operand_p (op1))
2141 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2142 return tem;
2144 /* Reassociate floating point addition only when the user
2145 specifies associative math operations. */
2146 if (FLOAT_MODE_P (mode)
2147 && flag_associative_math)
2149 tem = simplify_associative_operation (code, mode, op0, op1);
2150 if (tem)
2151 return tem;
2153 break;
2155 case COMPARE:
2156 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2157 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2158 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2159 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2161 rtx xop00 = XEXP (op0, 0);
2162 rtx xop10 = XEXP (op1, 0);
2164 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2165 return xop00;
2167 if (REG_P (xop00) && REG_P (xop10)
2168 && GET_MODE (xop00) == GET_MODE (xop10)
2169 && REGNO (xop00) == REGNO (xop10)
2170 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2171 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2172 return xop00;
2174 break;
2176 case MINUS:
2177 /* We can't assume x-x is 0 even with non-IEEE floating point,
2178 but since it is zero except in very strange circumstances, we
2179 will treat it as zero with -ffinite-math-only. */
2180 if (rtx_equal_p (trueop0, trueop1)
2181 && ! side_effects_p (op0)
2182 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2183 return CONST0_RTX (mode);
2185 /* Change subtraction from zero into negation. (0 - x) is the
2186 same as -x when x is NaN, infinite, or finite and nonzero.
2187 But if the mode has signed zeros, and does not round towards
2188 -infinity, then 0 - 0 is 0, not -0. */
2189 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2190 return simplify_gen_unary (NEG, mode, op1, mode);
2192 /* (-1 - a) is ~a. */
2193 if (trueop0 == constm1_rtx)
2194 return simplify_gen_unary (NOT, mode, op1, mode);
2196 /* Subtracting 0 has no effect unless the mode has signed zeros
2197 and supports rounding towards -infinity. In such a case,
2198 0 - 0 is -0. */
2199 if (!(HONOR_SIGNED_ZEROS (mode)
2200 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2201 && trueop1 == CONST0_RTX (mode))
2202 return op0;
2204 /* See if this is something like X * C - X or vice versa or
2205 if the multiplication is written as a shift. If so, we can
2206 distribute and make a new multiply, shift, or maybe just
2207 have X (if C is 2 in the example above). But don't make
2208 something more expensive than we had before. */
2210 if (SCALAR_INT_MODE_P (mode))
2212 rtx lhs = op0, rhs = op1;
2214 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2215 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2217 if (GET_CODE (lhs) == NEG)
2219 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2220 lhs = XEXP (lhs, 0);
2222 else if (GET_CODE (lhs) == MULT
2223 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2225 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2226 lhs = XEXP (lhs, 0);
2228 else if (GET_CODE (lhs) == ASHIFT
2229 && CONST_INT_P (XEXP (lhs, 1))
2230 && INTVAL (XEXP (lhs, 1)) >= 0
2231 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2233 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2234 GET_MODE_PRECISION (mode));
2235 lhs = XEXP (lhs, 0);
2238 if (GET_CODE (rhs) == NEG)
2240 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2241 rhs = XEXP (rhs, 0);
2243 else if (GET_CODE (rhs) == MULT
2244 && CONST_INT_P (XEXP (rhs, 1)))
2246 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2247 rhs = XEXP (rhs, 0);
2249 else if (GET_CODE (rhs) == ASHIFT
2250 && CONST_INT_P (XEXP (rhs, 1))
2251 && INTVAL (XEXP (rhs, 1)) >= 0
2252 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2254 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2255 GET_MODE_PRECISION (mode));
2256 negcoeff1 = -negcoeff1;
2257 rhs = XEXP (rhs, 0);
2260 if (rtx_equal_p (lhs, rhs))
2262 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2263 rtx coeff;
2264 bool speed = optimize_function_for_speed_p (cfun);
2266 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2268 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2269 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2270 ? tem : 0;
2274 /* (a - (-b)) -> (a + b). True even for IEEE. */
2275 if (GET_CODE (op1) == NEG)
2276 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2278 /* (-x - c) may be simplified as (-c - x). */
2279 if (GET_CODE (op0) == NEG
2280 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2282 tem = simplify_unary_operation (NEG, mode, op1, mode);
2283 if (tem)
2284 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2287 /* Don't let a relocatable value get a negative coeff. */
2288 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2289 return simplify_gen_binary (PLUS, mode,
2290 op0,
2291 neg_const_int (mode, op1));
2293 /* (x - (x & y)) -> (x & ~y) */
2294 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2296 if (rtx_equal_p (op0, XEXP (op1, 0)))
2298 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2299 GET_MODE (XEXP (op1, 1)));
2300 return simplify_gen_binary (AND, mode, op0, tem);
2302 if (rtx_equal_p (op0, XEXP (op1, 1)))
2304 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2305 GET_MODE (XEXP (op1, 0)));
2306 return simplify_gen_binary (AND, mode, op0, tem);
2310 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2311 by reversing the comparison code if valid. */
2312 if (STORE_FLAG_VALUE == 1
2313 && trueop0 == const1_rtx
2314 && COMPARISON_P (op1)
2315 && (reversed = reversed_comparison (op1, mode)))
2316 return reversed;
2318 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2319 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2320 && GET_CODE (op1) == MULT
2321 && GET_CODE (XEXP (op1, 0)) == NEG)
2323 rtx in1, in2;
2325 in1 = XEXP (XEXP (op1, 0), 0);
2326 in2 = XEXP (op1, 1);
2327 return simplify_gen_binary (PLUS, mode,
2328 simplify_gen_binary (MULT, mode,
2329 in1, in2),
2330 op0);
2333 /* Canonicalize (minus (neg A) (mult B C)) to
2334 (minus (mult (neg B) C) A). */
2335 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2336 && GET_CODE (op1) == MULT
2337 && GET_CODE (op0) == NEG)
2339 rtx in1, in2;
2341 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2342 in2 = XEXP (op1, 1);
2343 return simplify_gen_binary (MINUS, mode,
2344 simplify_gen_binary (MULT, mode,
2345 in1, in2),
2346 XEXP (op0, 0));
2349 /* If one of the operands is a PLUS or a MINUS, see if we can
2350 simplify this by the associative law. This will, for example,
2351 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2352 Don't use the associative law for floating point.
2353 The inaccuracy makes it nonassociative,
2354 and subtle programs can break if operations are associated. */
2356 if (INTEGRAL_MODE_P (mode)
2357 && (plus_minus_operand_p (op0)
2358 || plus_minus_operand_p (op1))
2359 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2360 return tem;
2361 break;
2363 case MULT:
2364 if (trueop1 == constm1_rtx)
2365 return simplify_gen_unary (NEG, mode, op0, mode);
2367 if (GET_CODE (op0) == NEG)
2369 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2370 /* If op1 is a MULT as well and simplify_unary_operation
2371 just moved the NEG to the second operand, simplify_gen_binary
2372 below could through simplify_associative_operation move
2373 the NEG around again and recurse endlessly. */
2374 if (temp
2375 && GET_CODE (op1) == MULT
2376 && GET_CODE (temp) == MULT
2377 && XEXP (op1, 0) == XEXP (temp, 0)
2378 && GET_CODE (XEXP (temp, 1)) == NEG
2379 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2380 temp = NULL_RTX;
2381 if (temp)
2382 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2384 if (GET_CODE (op1) == NEG)
2386 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2387 /* If op0 is a MULT as well and simplify_unary_operation
2388 just moved the NEG to the second operand, simplify_gen_binary
2389 below could through simplify_associative_operation move
2390 the NEG around again and recurse endlessly. */
2391 if (temp
2392 && GET_CODE (op0) == MULT
2393 && GET_CODE (temp) == MULT
2394 && XEXP (op0, 0) == XEXP (temp, 0)
2395 && GET_CODE (XEXP (temp, 1)) == NEG
2396 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2397 temp = NULL_RTX;
2398 if (temp)
2399 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2402 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2403 x is NaN, since x * 0 is then also NaN. Nor is it valid
2404 when the mode has signed zeros, since multiplying a negative
2405 number by 0 will give -0, not 0. */
2406 if (!HONOR_NANS (mode)
2407 && !HONOR_SIGNED_ZEROS (mode)
2408 && trueop1 == CONST0_RTX (mode)
2409 && ! side_effects_p (op0))
2410 return op1;
2412 /* In IEEE floating point, x*1 is not equivalent to x for
2413 signalling NaNs. */
2414 if (!HONOR_SNANS (mode)
2415 && trueop1 == CONST1_RTX (mode))
2416 return op0;
2418 /* Convert multiply by constant power of two into shift. */
2419 if (CONST_SCALAR_INT_P (trueop1))
2421 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2422 if (val >= 0)
2423 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2426 /* x*2 is x+x and x*(-1) is -x */
2427 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2428 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2429 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2430 && GET_MODE (op0) == mode)
2432 REAL_VALUE_TYPE d;
2433 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2435 if (REAL_VALUES_EQUAL (d, dconst2))
2436 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2438 if (!HONOR_SNANS (mode)
2439 && REAL_VALUES_EQUAL (d, dconstm1))
2440 return simplify_gen_unary (NEG, mode, op0, mode);
2443 /* Optimize -x * -x as x * x. */
2444 if (FLOAT_MODE_P (mode)
2445 && GET_CODE (op0) == NEG
2446 && GET_CODE (op1) == NEG
2447 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2448 && !side_effects_p (XEXP (op0, 0)))
2449 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2451 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2452 if (SCALAR_FLOAT_MODE_P (mode)
2453 && GET_CODE (op0) == ABS
2454 && GET_CODE (op1) == ABS
2455 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2456 && !side_effects_p (XEXP (op0, 0)))
2457 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2459 /* Reassociate multiplication, but for floating point MULTs
2460 only when the user specifies unsafe math optimizations. */
2461 if (! FLOAT_MODE_P (mode)
2462 || flag_unsafe_math_optimizations)
2464 tem = simplify_associative_operation (code, mode, op0, op1);
2465 if (tem)
2466 return tem;
2468 break;
2470 case IOR:
2471 if (trueop1 == CONST0_RTX (mode))
2472 return op0;
2473 if (INTEGRAL_MODE_P (mode)
2474 && trueop1 == CONSTM1_RTX (mode)
2475 && !side_effects_p (op0))
2476 return op1;
2477 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2478 return op0;
2479 /* A | (~A) -> -1 */
2480 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2481 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2482 && ! side_effects_p (op0)
2483 && SCALAR_INT_MODE_P (mode))
2484 return constm1_rtx;
2486 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2487 if (CONST_INT_P (op1)
2488 && HWI_COMPUTABLE_MODE_P (mode)
2489 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2490 && !side_effects_p (op0))
2491 return op1;
2493 /* Canonicalize (X & C1) | C2. */
2494 if (GET_CODE (op0) == AND
2495 && CONST_INT_P (trueop1)
2496 && CONST_INT_P (XEXP (op0, 1)))
2498 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2499 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2500 HOST_WIDE_INT c2 = INTVAL (trueop1);
2502 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2503 if ((c1 & c2) == c1
2504 && !side_effects_p (XEXP (op0, 0)))
2505 return trueop1;
2507 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2508 if (((c1|c2) & mask) == mask)
2509 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2511 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2512 if (((c1 & ~c2) & mask) != (c1 & mask))
2514 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2515 gen_int_mode (c1 & ~c2, mode));
2516 return simplify_gen_binary (IOR, mode, tem, op1);
2520 /* Convert (A & B) | A to A. */
2521 if (GET_CODE (op0) == AND
2522 && (rtx_equal_p (XEXP (op0, 0), op1)
2523 || rtx_equal_p (XEXP (op0, 1), op1))
2524 && ! side_effects_p (XEXP (op0, 0))
2525 && ! side_effects_p (XEXP (op0, 1)))
2526 return op1;
2528 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2529 mode size to (rotate A CX). */
2531 if (GET_CODE (op1) == ASHIFT
2532 || GET_CODE (op1) == SUBREG)
2534 opleft = op1;
2535 opright = op0;
2537 else
2539 opright = op1;
2540 opleft = op0;
2543 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2544 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2545 && CONST_INT_P (XEXP (opleft, 1))
2546 && CONST_INT_P (XEXP (opright, 1))
2547 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2548 == GET_MODE_PRECISION (mode)))
2549 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2551 /* Same, but for ashift that has been "simplified" to a wider mode
2552 by simplify_shift_const. */
2554 if (GET_CODE (opleft) == SUBREG
2555 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2556 && GET_CODE (opright) == LSHIFTRT
2557 && GET_CODE (XEXP (opright, 0)) == SUBREG
2558 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2559 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2560 && (GET_MODE_SIZE (GET_MODE (opleft))
2561 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2562 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2563 SUBREG_REG (XEXP (opright, 0)))
2564 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2565 && CONST_INT_P (XEXP (opright, 1))
2566 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2567 == GET_MODE_PRECISION (mode)))
2568 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2569 XEXP (SUBREG_REG (opleft), 1));
2571 /* If we have (ior (and (X C1) C2)), simplify this by making
2572 C1 as small as possible if C1 actually changes. */
2573 if (CONST_INT_P (op1)
2574 && (HWI_COMPUTABLE_MODE_P (mode)
2575 || INTVAL (op1) > 0)
2576 && GET_CODE (op0) == AND
2577 && CONST_INT_P (XEXP (op0, 1))
2578 && CONST_INT_P (op1)
2579 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2581 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2582 gen_int_mode (UINTVAL (XEXP (op0, 1))
2583 & ~UINTVAL (op1),
2584 mode));
2585 return simplify_gen_binary (IOR, mode, tmp, op1);
2588 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2589 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2590 the PLUS does not affect any of the bits in OP1: then we can do
2591 the IOR as a PLUS and we can associate. This is valid if OP1
2592 can be safely shifted left C bits. */
2593 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2594 && GET_CODE (XEXP (op0, 0)) == PLUS
2595 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2596 && CONST_INT_P (XEXP (op0, 1))
2597 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2599 int count = INTVAL (XEXP (op0, 1));
2600 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2602 if (mask >> count == INTVAL (trueop1)
2603 && trunc_int_for_mode (mask, mode) == mask
2604 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2605 return simplify_gen_binary (ASHIFTRT, mode,
2606 plus_constant (mode, XEXP (op0, 0),
2607 mask),
2608 XEXP (op0, 1));
2611 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2612 if (tem)
2613 return tem;
2615 tem = simplify_associative_operation (code, mode, op0, op1);
2616 if (tem)
2617 return tem;
2618 break;
2620 case XOR:
2621 if (trueop1 == CONST0_RTX (mode))
2622 return op0;
2623 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2624 return simplify_gen_unary (NOT, mode, op0, mode);
2625 if (rtx_equal_p (trueop0, trueop1)
2626 && ! side_effects_p (op0)
2627 && GET_MODE_CLASS (mode) != MODE_CC)
2628 return CONST0_RTX (mode);
2630 /* Canonicalize XOR of the most significant bit to PLUS. */
2631 if (CONST_SCALAR_INT_P (op1)
2632 && mode_signbit_p (mode, op1))
2633 return simplify_gen_binary (PLUS, mode, op0, op1);
2634 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2635 if (CONST_SCALAR_INT_P (op1)
2636 && GET_CODE (op0) == PLUS
2637 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2638 && mode_signbit_p (mode, XEXP (op0, 1)))
2639 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2640 simplify_gen_binary (XOR, mode, op1,
2641 XEXP (op0, 1)));
2643 /* If we are XORing two things that have no bits in common,
2644 convert them into an IOR. This helps to detect rotation encoded
2645 using those methods and possibly other simplifications. */
2647 if (HWI_COMPUTABLE_MODE_P (mode)
2648 && (nonzero_bits (op0, mode)
2649 & nonzero_bits (op1, mode)) == 0)
2650 return (simplify_gen_binary (IOR, mode, op0, op1));
2652 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2653 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2654 (NOT y). */
2656 int num_negated = 0;
2658 if (GET_CODE (op0) == NOT)
2659 num_negated++, op0 = XEXP (op0, 0);
2660 if (GET_CODE (op1) == NOT)
2661 num_negated++, op1 = XEXP (op1, 0);
2663 if (num_negated == 2)
2664 return simplify_gen_binary (XOR, mode, op0, op1);
2665 else if (num_negated == 1)
2666 return simplify_gen_unary (NOT, mode,
2667 simplify_gen_binary (XOR, mode, op0, op1),
2668 mode);
2671 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2672 correspond to a machine insn or result in further simplifications
2673 if B is a constant. */
2675 if (GET_CODE (op0) == AND
2676 && rtx_equal_p (XEXP (op0, 1), op1)
2677 && ! side_effects_p (op1))
2678 return simplify_gen_binary (AND, mode,
2679 simplify_gen_unary (NOT, mode,
2680 XEXP (op0, 0), mode),
2681 op1);
2683 else if (GET_CODE (op0) == AND
2684 && rtx_equal_p (XEXP (op0, 0), op1)
2685 && ! side_effects_p (op1))
2686 return simplify_gen_binary (AND, mode,
2687 simplify_gen_unary (NOT, mode,
2688 XEXP (op0, 1), mode),
2689 op1);
2691 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2692 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2693 out bits inverted twice and not set by C. Similarly, given
2694 (xor (and (xor A B) C) D), simplify without inverting C in
2695 the xor operand: (xor (and A C) (B&C)^D).
2697 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2698 && GET_CODE (XEXP (op0, 0)) == XOR
2699 && CONST_INT_P (op1)
2700 && CONST_INT_P (XEXP (op0, 1))
2701 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2703 enum rtx_code op = GET_CODE (op0);
2704 rtx a = XEXP (XEXP (op0, 0), 0);
2705 rtx b = XEXP (XEXP (op0, 0), 1);
2706 rtx c = XEXP (op0, 1);
2707 rtx d = op1;
2708 HOST_WIDE_INT bval = INTVAL (b);
2709 HOST_WIDE_INT cval = INTVAL (c);
2710 HOST_WIDE_INT dval = INTVAL (d);
2711 HOST_WIDE_INT xcval;
2713 if (op == IOR)
2714 xcval = ~cval;
2715 else
2716 xcval = cval;
2718 return simplify_gen_binary (XOR, mode,
2719 simplify_gen_binary (op, mode, a, c),
2720 gen_int_mode ((bval & xcval) ^ dval,
2721 mode));
2724 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2725 we can transform like this:
2726 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2727 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2728 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2729 Attempt a few simplifications when B and C are both constants. */
2730 if (GET_CODE (op0) == AND
2731 && CONST_INT_P (op1)
2732 && CONST_INT_P (XEXP (op0, 1)))
2734 rtx a = XEXP (op0, 0);
2735 rtx b = XEXP (op0, 1);
2736 rtx c = op1;
2737 HOST_WIDE_INT bval = INTVAL (b);
2738 HOST_WIDE_INT cval = INTVAL (c);
2740 /* Instead of computing ~A&C, we compute its negated value,
2741 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2742 optimize for sure. If it does not simplify, we still try
2743 to compute ~A&C below, but since that always allocates
2744 RTL, we don't try that before committing to returning a
2745 simplified expression. */
2746 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2747 GEN_INT (~cval));
2749 if ((~cval & bval) == 0)
2751 rtx na_c = NULL_RTX;
2752 if (n_na_c)
2753 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2754 else
2756 /* If ~A does not simplify, don't bother: we don't
2757 want to simplify 2 operations into 3, and if na_c
2758 were to simplify with na, n_na_c would have
2759 simplified as well. */
2760 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2761 if (na)
2762 na_c = simplify_gen_binary (AND, mode, na, c);
2765 /* Try to simplify ~A&C | ~B&C. */
2766 if (na_c != NULL_RTX)
2767 return simplify_gen_binary (IOR, mode, na_c,
2768 gen_int_mode (~bval & cval, mode));
2770 else
2772 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2773 if (n_na_c == CONSTM1_RTX (mode))
2775 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2776 gen_int_mode (~cval & bval,
2777 mode));
2778 return simplify_gen_binary (IOR, mode, a_nc_b,
2779 gen_int_mode (~bval & cval,
2780 mode));
2785 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2786 comparison if STORE_FLAG_VALUE is 1. */
2787 if (STORE_FLAG_VALUE == 1
2788 && trueop1 == const1_rtx
2789 && COMPARISON_P (op0)
2790 && (reversed = reversed_comparison (op0, mode)))
2791 return reversed;
2793 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2794 is (lt foo (const_int 0)), so we can perform the above
2795 simplification if STORE_FLAG_VALUE is 1. */
2797 if (STORE_FLAG_VALUE == 1
2798 && trueop1 == const1_rtx
2799 && GET_CODE (op0) == LSHIFTRT
2800 && CONST_INT_P (XEXP (op0, 1))
2801 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2802 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2804 /* (xor (comparison foo bar) (const_int sign-bit))
2805 when STORE_FLAG_VALUE is the sign bit. */
2806 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2807 && trueop1 == const_true_rtx
2808 && COMPARISON_P (op0)
2809 && (reversed = reversed_comparison (op0, mode)))
2810 return reversed;
2812 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2813 if (tem)
2814 return tem;
2816 tem = simplify_associative_operation (code, mode, op0, op1);
2817 if (tem)
2818 return tem;
2819 break;
2821 case AND:
2822 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2823 return trueop1;
2824 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2825 return op0;
2826 if (HWI_COMPUTABLE_MODE_P (mode))
2828 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2829 HOST_WIDE_INT nzop1;
2830 if (CONST_INT_P (trueop1))
2832 HOST_WIDE_INT val1 = INTVAL (trueop1);
2833 /* If we are turning off bits already known off in OP0, we need
2834 not do an AND. */
2835 if ((nzop0 & ~val1) == 0)
2836 return op0;
2838 nzop1 = nonzero_bits (trueop1, mode);
2839 /* If we are clearing all the nonzero bits, the result is zero. */
2840 if ((nzop1 & nzop0) == 0
2841 && !side_effects_p (op0) && !side_effects_p (op1))
2842 return CONST0_RTX (mode);
2844 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2845 && GET_MODE_CLASS (mode) != MODE_CC)
2846 return op0;
2847 /* A & (~A) -> 0 */
2848 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2849 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2850 && ! side_effects_p (op0)
2851 && GET_MODE_CLASS (mode) != MODE_CC)
2852 return CONST0_RTX (mode);
2854 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2855 there are no nonzero bits of C outside of X's mode. */
2856 if ((GET_CODE (op0) == SIGN_EXTEND
2857 || GET_CODE (op0) == ZERO_EXTEND)
2858 && CONST_INT_P (trueop1)
2859 && HWI_COMPUTABLE_MODE_P (mode)
2860 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2861 & UINTVAL (trueop1)) == 0)
2863 machine_mode imode = GET_MODE (XEXP (op0, 0));
2864 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2865 gen_int_mode (INTVAL (trueop1),
2866 imode));
2867 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2870 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2871 we might be able to further simplify the AND with X and potentially
2872 remove the truncation altogether. */
2873 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2875 rtx x = XEXP (op0, 0);
2876 machine_mode xmode = GET_MODE (x);
2877 tem = simplify_gen_binary (AND, xmode, x,
2878 gen_int_mode (INTVAL (trueop1), xmode));
2879 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2882 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2883 if (GET_CODE (op0) == IOR
2884 && CONST_INT_P (trueop1)
2885 && CONST_INT_P (XEXP (op0, 1)))
2887 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2888 return simplify_gen_binary (IOR, mode,
2889 simplify_gen_binary (AND, mode,
2890 XEXP (op0, 0), op1),
2891 gen_int_mode (tmp, mode));
2894 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2895 insn (and may simplify more). */
2896 if (GET_CODE (op0) == XOR
2897 && rtx_equal_p (XEXP (op0, 0), op1)
2898 && ! side_effects_p (op1))
2899 return simplify_gen_binary (AND, mode,
2900 simplify_gen_unary (NOT, mode,
2901 XEXP (op0, 1), mode),
2902 op1);
2904 if (GET_CODE (op0) == XOR
2905 && rtx_equal_p (XEXP (op0, 1), op1)
2906 && ! side_effects_p (op1))
2907 return simplify_gen_binary (AND, mode,
2908 simplify_gen_unary (NOT, mode,
2909 XEXP (op0, 0), mode),
2910 op1);
2912 /* Similarly for (~(A ^ B)) & A. */
2913 if (GET_CODE (op0) == NOT
2914 && GET_CODE (XEXP (op0, 0)) == XOR
2915 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2916 && ! side_effects_p (op1))
2917 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2919 if (GET_CODE (op0) == NOT
2920 && GET_CODE (XEXP (op0, 0)) == XOR
2921 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2922 && ! side_effects_p (op1))
2923 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2925 /* Convert (A | B) & A to A. */
2926 if (GET_CODE (op0) == IOR
2927 && (rtx_equal_p (XEXP (op0, 0), op1)
2928 || rtx_equal_p (XEXP (op0, 1), op1))
2929 && ! side_effects_p (XEXP (op0, 0))
2930 && ! side_effects_p (XEXP (op0, 1)))
2931 return op1;
2933 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2934 ((A & N) + B) & M -> (A + B) & M
2935 Similarly if (N & M) == 0,
2936 ((A | N) + B) & M -> (A + B) & M
2937 and for - instead of + and/or ^ instead of |.
2938 Also, if (N & M) == 0, then
2939 (A +- N) & M -> A & M. */
2940 if (CONST_INT_P (trueop1)
2941 && HWI_COMPUTABLE_MODE_P (mode)
2942 && ~UINTVAL (trueop1)
2943 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2944 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2946 rtx pmop[2];
2947 int which;
2949 pmop[0] = XEXP (op0, 0);
2950 pmop[1] = XEXP (op0, 1);
2952 if (CONST_INT_P (pmop[1])
2953 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2954 return simplify_gen_binary (AND, mode, pmop[0], op1);
2956 for (which = 0; which < 2; which++)
2958 tem = pmop[which];
2959 switch (GET_CODE (tem))
2961 case AND:
2962 if (CONST_INT_P (XEXP (tem, 1))
2963 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2964 == UINTVAL (trueop1))
2965 pmop[which] = XEXP (tem, 0);
2966 break;
2967 case IOR:
2968 case XOR:
2969 if (CONST_INT_P (XEXP (tem, 1))
2970 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2971 pmop[which] = XEXP (tem, 0);
2972 break;
2973 default:
2974 break;
2978 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2980 tem = simplify_gen_binary (GET_CODE (op0), mode,
2981 pmop[0], pmop[1]);
2982 return simplify_gen_binary (code, mode, tem, op1);
2986 /* (and X (ior (not X) Y) -> (and X Y) */
2987 if (GET_CODE (op1) == IOR
2988 && GET_CODE (XEXP (op1, 0)) == NOT
2989 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
2990 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2992 /* (and (ior (not X) Y) X) -> (and X Y) */
2993 if (GET_CODE (op0) == IOR
2994 && GET_CODE (XEXP (op0, 0)) == NOT
2995 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
2996 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2998 /* (and X (ior Y (not X)) -> (and X Y) */
2999 if (GET_CODE (op1) == IOR
3000 && GET_CODE (XEXP (op1, 1)) == NOT
3001 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3002 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3004 /* (and (ior Y (not X)) X) -> (and X Y) */
3005 if (GET_CODE (op0) == IOR
3006 && GET_CODE (XEXP (op0, 1)) == NOT
3007 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3008 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3010 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3011 if (tem)
3012 return tem;
3014 tem = simplify_associative_operation (code, mode, op0, op1);
3015 if (tem)
3016 return tem;
3017 break;
3019 case UDIV:
3020 /* 0/x is 0 (or x&0 if x has side-effects). */
3021 if (trueop0 == CONST0_RTX (mode))
3023 if (side_effects_p (op1))
3024 return simplify_gen_binary (AND, mode, op1, trueop0);
3025 return trueop0;
3027 /* x/1 is x. */
3028 if (trueop1 == CONST1_RTX (mode))
3030 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3031 if (tem)
3032 return tem;
3034 /* Convert divide by power of two into shift. */
3035 if (CONST_INT_P (trueop1)
3036 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3037 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3038 break;
3040 case DIV:
3041 /* Handle floating point and integers separately. */
3042 if (SCALAR_FLOAT_MODE_P (mode))
3044 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3045 safe for modes with NaNs, since 0.0 / 0.0 will then be
3046 NaN rather than 0.0. Nor is it safe for modes with signed
3047 zeros, since dividing 0 by a negative number gives -0.0 */
3048 if (trueop0 == CONST0_RTX (mode)
3049 && !HONOR_NANS (mode)
3050 && !HONOR_SIGNED_ZEROS (mode)
3051 && ! side_effects_p (op1))
3052 return op0;
3053 /* x/1.0 is x. */
3054 if (trueop1 == CONST1_RTX (mode)
3055 && !HONOR_SNANS (mode))
3056 return op0;
3058 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3059 && trueop1 != CONST0_RTX (mode))
3061 REAL_VALUE_TYPE d;
3062 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3064 /* x/-1.0 is -x. */
3065 if (REAL_VALUES_EQUAL (d, dconstm1)
3066 && !HONOR_SNANS (mode))
3067 return simplify_gen_unary (NEG, mode, op0, mode);
3069 /* Change FP division by a constant into multiplication.
3070 Only do this with -freciprocal-math. */
3071 if (flag_reciprocal_math
3072 && !REAL_VALUES_EQUAL (d, dconst0))
3074 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3075 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3076 return simplify_gen_binary (MULT, mode, op0, tem);
3080 else if (SCALAR_INT_MODE_P (mode))
3082 /* 0/x is 0 (or x&0 if x has side-effects). */
3083 if (trueop0 == CONST0_RTX (mode)
3084 && !cfun->can_throw_non_call_exceptions)
3086 if (side_effects_p (op1))
3087 return simplify_gen_binary (AND, mode, op1, trueop0);
3088 return trueop0;
3090 /* x/1 is x. */
3091 if (trueop1 == CONST1_RTX (mode))
3093 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3094 if (tem)
3095 return tem;
3097 /* x/-1 is -x. */
3098 if (trueop1 == constm1_rtx)
3100 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3101 if (x)
3102 return simplify_gen_unary (NEG, mode, x, mode);
3105 break;
3107 case UMOD:
3108 /* 0%x is 0 (or x&0 if x has side-effects). */
3109 if (trueop0 == CONST0_RTX (mode))
3111 if (side_effects_p (op1))
3112 return simplify_gen_binary (AND, mode, op1, trueop0);
3113 return trueop0;
3115 /* x%1 is 0 (of x&0 if x has side-effects). */
3116 if (trueop1 == CONST1_RTX (mode))
3118 if (side_effects_p (op0))
3119 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3120 return CONST0_RTX (mode);
3122 /* Implement modulus by power of two as AND. */
3123 if (CONST_INT_P (trueop1)
3124 && exact_log2 (UINTVAL (trueop1)) > 0)
3125 return simplify_gen_binary (AND, mode, op0,
3126 gen_int_mode (INTVAL (op1) - 1, mode));
3127 break;
3129 case MOD:
3130 /* 0%x is 0 (or x&0 if x has side-effects). */
3131 if (trueop0 == CONST0_RTX (mode))
3133 if (side_effects_p (op1))
3134 return simplify_gen_binary (AND, mode, op1, trueop0);
3135 return trueop0;
3137 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3138 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3140 if (side_effects_p (op0))
3141 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3142 return CONST0_RTX (mode);
3144 break;
3146 case ROTATERT:
3147 case ROTATE:
3148 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3149 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3150 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3151 amount instead. */
3152 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3153 if (CONST_INT_P (trueop1)
3154 && IN_RANGE (INTVAL (trueop1),
3155 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3156 GET_MODE_PRECISION (mode) - 1))
3157 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3158 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3159 - INTVAL (trueop1)));
3160 #endif
3161 /* FALLTHRU */
3162 case ASHIFTRT:
3163 if (trueop1 == CONST0_RTX (mode))
3164 return op0;
3165 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3166 return op0;
3167 /* Rotating ~0 always results in ~0. */
3168 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3169 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3170 && ! side_effects_p (op1))
3171 return op0;
3172 /* Given:
3173 scalar modes M1, M2
3174 scalar constants c1, c2
3175 size (M2) > size (M1)
3176 c1 == size (M2) - size (M1)
3177 optimize:
3178 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3179 <low_part>)
3180 (const_int <c2>))
3182 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3183 <low_part>). */
3184 if (code == ASHIFTRT
3185 && !VECTOR_MODE_P (mode)
3186 && SUBREG_P (op0)
3187 && CONST_INT_P (op1)
3188 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3189 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3190 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3191 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3192 > GET_MODE_BITSIZE (mode))
3193 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3194 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3195 - GET_MODE_BITSIZE (mode)))
3196 && subreg_lowpart_p (op0))
3198 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3199 + INTVAL (op1));
3200 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3201 tmp = simplify_gen_binary (ASHIFTRT,
3202 GET_MODE (SUBREG_REG (op0)),
3203 XEXP (SUBREG_REG (op0), 0),
3204 tmp);
3205 return simplify_gen_subreg (mode, tmp, inner_mode,
3206 subreg_lowpart_offset (mode,
3207 inner_mode));
3209 canonicalize_shift:
3210 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3212 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3213 if (val != INTVAL (op1))
3214 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3216 break;
3218 case ASHIFT:
3219 case SS_ASHIFT:
3220 case US_ASHIFT:
3221 if (trueop1 == CONST0_RTX (mode))
3222 return op0;
3223 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3224 return op0;
3225 goto canonicalize_shift;
3227 case LSHIFTRT:
3228 if (trueop1 == CONST0_RTX (mode))
3229 return op0;
3230 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3231 return op0;
3232 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3233 if (GET_CODE (op0) == CLZ
3234 && CONST_INT_P (trueop1)
3235 && STORE_FLAG_VALUE == 1
3236 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3238 machine_mode imode = GET_MODE (XEXP (op0, 0));
3239 unsigned HOST_WIDE_INT zero_val = 0;
3241 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3242 && zero_val == GET_MODE_PRECISION (imode)
3243 && INTVAL (trueop1) == exact_log2 (zero_val))
3244 return simplify_gen_relational (EQ, mode, imode,
3245 XEXP (op0, 0), const0_rtx);
3247 goto canonicalize_shift;
3249 case SMIN:
3250 if (width <= HOST_BITS_PER_WIDE_INT
3251 && mode_signbit_p (mode, trueop1)
3252 && ! side_effects_p (op0))
3253 return op1;
3254 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3255 return op0;
3256 tem = simplify_associative_operation (code, mode, op0, op1);
3257 if (tem)
3258 return tem;
3259 break;
3261 case SMAX:
3262 if (width <= HOST_BITS_PER_WIDE_INT
3263 && CONST_INT_P (trueop1)
3264 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3265 && ! side_effects_p (op0))
3266 return op1;
3267 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3268 return op0;
3269 tem = simplify_associative_operation (code, mode, op0, op1);
3270 if (tem)
3271 return tem;
3272 break;
3274 case UMIN:
3275 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3276 return op1;
3277 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3278 return op0;
3279 tem = simplify_associative_operation (code, mode, op0, op1);
3280 if (tem)
3281 return tem;
3282 break;
3284 case UMAX:
3285 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3286 return op1;
3287 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3288 return op0;
3289 tem = simplify_associative_operation (code, mode, op0, op1);
3290 if (tem)
3291 return tem;
3292 break;
3294 case SS_PLUS:
3295 case US_PLUS:
3296 case SS_MINUS:
3297 case US_MINUS:
3298 case SS_MULT:
3299 case US_MULT:
3300 case SS_DIV:
3301 case US_DIV:
3302 /* ??? There are simplifications that can be done. */
3303 return 0;
3305 case VEC_SELECT:
3306 if (!VECTOR_MODE_P (mode))
3308 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3309 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3310 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3311 gcc_assert (XVECLEN (trueop1, 0) == 1);
3312 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3314 if (GET_CODE (trueop0) == CONST_VECTOR)
3315 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3316 (trueop1, 0, 0)));
3318 /* Extract a scalar element from a nested VEC_SELECT expression
3319 (with optional nested VEC_CONCAT expression). Some targets
3320 (i386) extract scalar element from a vector using chain of
3321 nested VEC_SELECT expressions. When input operand is a memory
3322 operand, this operation can be simplified to a simple scalar
3323 load from an offseted memory address. */
3324 if (GET_CODE (trueop0) == VEC_SELECT)
3326 rtx op0 = XEXP (trueop0, 0);
3327 rtx op1 = XEXP (trueop0, 1);
3329 machine_mode opmode = GET_MODE (op0);
3330 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3331 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3333 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3334 int elem;
3336 rtvec vec;
3337 rtx tmp_op, tmp;
3339 gcc_assert (GET_CODE (op1) == PARALLEL);
3340 gcc_assert (i < n_elts);
3342 /* Select element, pointed by nested selector. */
3343 elem = INTVAL (XVECEXP (op1, 0, i));
3345 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3346 if (GET_CODE (op0) == VEC_CONCAT)
3348 rtx op00 = XEXP (op0, 0);
3349 rtx op01 = XEXP (op0, 1);
3351 machine_mode mode00, mode01;
3352 int n_elts00, n_elts01;
3354 mode00 = GET_MODE (op00);
3355 mode01 = GET_MODE (op01);
3357 /* Find out number of elements of each operand. */
3358 if (VECTOR_MODE_P (mode00))
3360 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3361 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3363 else
3364 n_elts00 = 1;
3366 if (VECTOR_MODE_P (mode01))
3368 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3369 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3371 else
3372 n_elts01 = 1;
3374 gcc_assert (n_elts == n_elts00 + n_elts01);
3376 /* Select correct operand of VEC_CONCAT
3377 and adjust selector. */
3378 if (elem < n_elts01)
3379 tmp_op = op00;
3380 else
3382 tmp_op = op01;
3383 elem -= n_elts00;
3386 else
3387 tmp_op = op0;
3389 vec = rtvec_alloc (1);
3390 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3392 tmp = gen_rtx_fmt_ee (code, mode,
3393 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3394 return tmp;
3396 if (GET_CODE (trueop0) == VEC_DUPLICATE
3397 && GET_MODE (XEXP (trueop0, 0)) == mode)
3398 return XEXP (trueop0, 0);
3400 else
3402 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3403 gcc_assert (GET_MODE_INNER (mode)
3404 == GET_MODE_INNER (GET_MODE (trueop0)));
3405 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3407 if (GET_CODE (trueop0) == CONST_VECTOR)
3409 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3410 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3411 rtvec v = rtvec_alloc (n_elts);
3412 unsigned int i;
3414 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3415 for (i = 0; i < n_elts; i++)
3417 rtx x = XVECEXP (trueop1, 0, i);
3419 gcc_assert (CONST_INT_P (x));
3420 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3421 INTVAL (x));
3424 return gen_rtx_CONST_VECTOR (mode, v);
3427 /* Recognize the identity. */
3428 if (GET_MODE (trueop0) == mode)
3430 bool maybe_ident = true;
3431 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3433 rtx j = XVECEXP (trueop1, 0, i);
3434 if (!CONST_INT_P (j) || INTVAL (j) != i)
3436 maybe_ident = false;
3437 break;
3440 if (maybe_ident)
3441 return trueop0;
3444 /* If we build {a,b} then permute it, build the result directly. */
3445 if (XVECLEN (trueop1, 0) == 2
3446 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3447 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3448 && GET_CODE (trueop0) == VEC_CONCAT
3449 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3450 && GET_MODE (XEXP (trueop0, 0)) == mode
3451 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3452 && GET_MODE (XEXP (trueop0, 1)) == mode)
3454 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3455 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3456 rtx subop0, subop1;
3458 gcc_assert (i0 < 4 && i1 < 4);
3459 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3460 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3462 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3465 if (XVECLEN (trueop1, 0) == 2
3466 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3467 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3468 && GET_CODE (trueop0) == VEC_CONCAT
3469 && GET_MODE (trueop0) == mode)
3471 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3472 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3473 rtx subop0, subop1;
3475 gcc_assert (i0 < 2 && i1 < 2);
3476 subop0 = XEXP (trueop0, i0);
3477 subop1 = XEXP (trueop0, i1);
3479 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3482 /* If we select one half of a vec_concat, return that. */
3483 if (GET_CODE (trueop0) == VEC_CONCAT
3484 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3486 rtx subop0 = XEXP (trueop0, 0);
3487 rtx subop1 = XEXP (trueop0, 1);
3488 machine_mode mode0 = GET_MODE (subop0);
3489 machine_mode mode1 = GET_MODE (subop1);
3490 int li = GET_MODE_SIZE (GET_MODE_INNER (mode0));
3491 int l0 = GET_MODE_SIZE (mode0) / li;
3492 int l1 = GET_MODE_SIZE (mode1) / li;
3493 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3494 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3496 bool success = true;
3497 for (int i = 1; i < l0; ++i)
3499 rtx j = XVECEXP (trueop1, 0, i);
3500 if (!CONST_INT_P (j) || INTVAL (j) != i)
3502 success = false;
3503 break;
3506 if (success)
3507 return subop0;
3509 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3511 bool success = true;
3512 for (int i = 1; i < l1; ++i)
3514 rtx j = XVECEXP (trueop1, 0, i);
3515 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3517 success = false;
3518 break;
3521 if (success)
3522 return subop1;
3527 if (XVECLEN (trueop1, 0) == 1
3528 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3529 && GET_CODE (trueop0) == VEC_CONCAT)
3531 rtx vec = trueop0;
3532 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3534 /* Try to find the element in the VEC_CONCAT. */
3535 while (GET_MODE (vec) != mode
3536 && GET_CODE (vec) == VEC_CONCAT)
3538 HOST_WIDE_INT vec_size;
3540 if (CONST_INT_P (XEXP (vec, 0)))
3542 /* vec_concat of two const_ints doesn't make sense with
3543 respect to modes. */
3544 if (CONST_INT_P (XEXP (vec, 1)))
3545 return 0;
3547 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3548 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3550 else
3551 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3553 if (offset < vec_size)
3554 vec = XEXP (vec, 0);
3555 else
3557 offset -= vec_size;
3558 vec = XEXP (vec, 1);
3560 vec = avoid_constant_pool_reference (vec);
3563 if (GET_MODE (vec) == mode)
3564 return vec;
3567 /* If we select elements in a vec_merge that all come from the same
3568 operand, select from that operand directly. */
3569 if (GET_CODE (op0) == VEC_MERGE)
3571 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3572 if (CONST_INT_P (trueop02))
3574 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3575 bool all_operand0 = true;
3576 bool all_operand1 = true;
3577 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3579 rtx j = XVECEXP (trueop1, 0, i);
3580 if (sel & (1 << UINTVAL (j)))
3581 all_operand1 = false;
3582 else
3583 all_operand0 = false;
3585 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3586 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3587 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3588 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3592 /* If we have two nested selects that are inverses of each
3593 other, replace them with the source operand. */
3594 if (GET_CODE (trueop0) == VEC_SELECT
3595 && GET_MODE (XEXP (trueop0, 0)) == mode)
3597 rtx op0_subop1 = XEXP (trueop0, 1);
3598 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3599 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3601 /* Apply the outer ordering vector to the inner one. (The inner
3602 ordering vector is expressly permitted to be of a different
3603 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3604 then the two VEC_SELECTs cancel. */
3605 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3607 rtx x = XVECEXP (trueop1, 0, i);
3608 if (!CONST_INT_P (x))
3609 return 0;
3610 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3611 if (!CONST_INT_P (y) || i != INTVAL (y))
3612 return 0;
3614 return XEXP (trueop0, 0);
3617 return 0;
3618 case VEC_CONCAT:
3620 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3621 ? GET_MODE (trueop0)
3622 : GET_MODE_INNER (mode));
3623 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3624 ? GET_MODE (trueop1)
3625 : GET_MODE_INNER (mode));
3627 gcc_assert (VECTOR_MODE_P (mode));
3628 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3629 == GET_MODE_SIZE (mode));
3631 if (VECTOR_MODE_P (op0_mode))
3632 gcc_assert (GET_MODE_INNER (mode)
3633 == GET_MODE_INNER (op0_mode));
3634 else
3635 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3637 if (VECTOR_MODE_P (op1_mode))
3638 gcc_assert (GET_MODE_INNER (mode)
3639 == GET_MODE_INNER (op1_mode));
3640 else
3641 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3643 if ((GET_CODE (trueop0) == CONST_VECTOR
3644 || CONST_SCALAR_INT_P (trueop0)
3645 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3646 && (GET_CODE (trueop1) == CONST_VECTOR
3647 || CONST_SCALAR_INT_P (trueop1)
3648 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3650 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3651 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3652 rtvec v = rtvec_alloc (n_elts);
3653 unsigned int i;
3654 unsigned in_n_elts = 1;
3656 if (VECTOR_MODE_P (op0_mode))
3657 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3658 for (i = 0; i < n_elts; i++)
3660 if (i < in_n_elts)
3662 if (!VECTOR_MODE_P (op0_mode))
3663 RTVEC_ELT (v, i) = trueop0;
3664 else
3665 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3667 else
3669 if (!VECTOR_MODE_P (op1_mode))
3670 RTVEC_ELT (v, i) = trueop1;
3671 else
3672 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3673 i - in_n_elts);
3677 return gen_rtx_CONST_VECTOR (mode, v);
3680 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3681 Restrict the transformation to avoid generating a VEC_SELECT with a
3682 mode unrelated to its operand. */
3683 if (GET_CODE (trueop0) == VEC_SELECT
3684 && GET_CODE (trueop1) == VEC_SELECT
3685 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3686 && GET_MODE (XEXP (trueop0, 0)) == mode)
3688 rtx par0 = XEXP (trueop0, 1);
3689 rtx par1 = XEXP (trueop1, 1);
3690 int len0 = XVECLEN (par0, 0);
3691 int len1 = XVECLEN (par1, 0);
3692 rtvec vec = rtvec_alloc (len0 + len1);
3693 for (int i = 0; i < len0; i++)
3694 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3695 for (int i = 0; i < len1; i++)
3696 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3697 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3698 gen_rtx_PARALLEL (VOIDmode, vec));
3701 return 0;
3703 default:
3704 gcc_unreachable ();
3707 return 0;
3711 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3712 rtx op0, rtx op1)
3714 unsigned int width = GET_MODE_PRECISION (mode);
3716 if (VECTOR_MODE_P (mode)
3717 && code != VEC_CONCAT
3718 && GET_CODE (op0) == CONST_VECTOR
3719 && GET_CODE (op1) == CONST_VECTOR)
3721 unsigned n_elts = GET_MODE_NUNITS (mode);
3722 machine_mode op0mode = GET_MODE (op0);
3723 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3724 machine_mode op1mode = GET_MODE (op1);
3725 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3726 rtvec v = rtvec_alloc (n_elts);
3727 unsigned int i;
3729 gcc_assert (op0_n_elts == n_elts);
3730 gcc_assert (op1_n_elts == n_elts);
3731 for (i = 0; i < n_elts; i++)
3733 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3734 CONST_VECTOR_ELT (op0, i),
3735 CONST_VECTOR_ELT (op1, i));
3736 if (!x)
3737 return 0;
3738 RTVEC_ELT (v, i) = x;
3741 return gen_rtx_CONST_VECTOR (mode, v);
3744 if (VECTOR_MODE_P (mode)
3745 && code == VEC_CONCAT
3746 && (CONST_SCALAR_INT_P (op0)
3747 || GET_CODE (op0) == CONST_FIXED
3748 || CONST_DOUBLE_AS_FLOAT_P (op0))
3749 && (CONST_SCALAR_INT_P (op1)
3750 || CONST_DOUBLE_AS_FLOAT_P (op1)
3751 || GET_CODE (op1) == CONST_FIXED))
3753 unsigned n_elts = GET_MODE_NUNITS (mode);
3754 rtvec v = rtvec_alloc (n_elts);
3756 gcc_assert (n_elts >= 2);
3757 if (n_elts == 2)
3759 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3760 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3762 RTVEC_ELT (v, 0) = op0;
3763 RTVEC_ELT (v, 1) = op1;
3765 else
3767 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3768 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3769 unsigned i;
3771 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3772 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3773 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3775 for (i = 0; i < op0_n_elts; ++i)
3776 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3777 for (i = 0; i < op1_n_elts; ++i)
3778 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3781 return gen_rtx_CONST_VECTOR (mode, v);
3784 if (SCALAR_FLOAT_MODE_P (mode)
3785 && CONST_DOUBLE_AS_FLOAT_P (op0)
3786 && CONST_DOUBLE_AS_FLOAT_P (op1)
3787 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3789 if (code == AND
3790 || code == IOR
3791 || code == XOR)
3793 long tmp0[4];
3794 long tmp1[4];
3795 REAL_VALUE_TYPE r;
3796 int i;
3798 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3799 GET_MODE (op0));
3800 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3801 GET_MODE (op1));
3802 for (i = 0; i < 4; i++)
3804 switch (code)
3806 case AND:
3807 tmp0[i] &= tmp1[i];
3808 break;
3809 case IOR:
3810 tmp0[i] |= tmp1[i];
3811 break;
3812 case XOR:
3813 tmp0[i] ^= tmp1[i];
3814 break;
3815 default:
3816 gcc_unreachable ();
3819 real_from_target (&r, tmp0, mode);
3820 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3822 else
3824 REAL_VALUE_TYPE f0, f1, value, result;
3825 bool inexact;
3827 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3828 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3829 real_convert (&f0, mode, &f0);
3830 real_convert (&f1, mode, &f1);
3832 if (HONOR_SNANS (mode)
3833 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3834 return 0;
3836 if (code == DIV
3837 && REAL_VALUES_EQUAL (f1, dconst0)
3838 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3839 return 0;
3841 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3842 && flag_trapping_math
3843 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3845 int s0 = REAL_VALUE_NEGATIVE (f0);
3846 int s1 = REAL_VALUE_NEGATIVE (f1);
3848 switch (code)
3850 case PLUS:
3851 /* Inf + -Inf = NaN plus exception. */
3852 if (s0 != s1)
3853 return 0;
3854 break;
3855 case MINUS:
3856 /* Inf - Inf = NaN plus exception. */
3857 if (s0 == s1)
3858 return 0;
3859 break;
3860 case DIV:
3861 /* Inf / Inf = NaN plus exception. */
3862 return 0;
3863 default:
3864 break;
3868 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3869 && flag_trapping_math
3870 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3871 || (REAL_VALUE_ISINF (f1)
3872 && REAL_VALUES_EQUAL (f0, dconst0))))
3873 /* Inf * 0 = NaN plus exception. */
3874 return 0;
3876 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3877 &f0, &f1);
3878 real_convert (&result, mode, &value);
3880 /* Don't constant fold this floating point operation if
3881 the result has overflowed and flag_trapping_math. */
3883 if (flag_trapping_math
3884 && MODE_HAS_INFINITIES (mode)
3885 && REAL_VALUE_ISINF (result)
3886 && !REAL_VALUE_ISINF (f0)
3887 && !REAL_VALUE_ISINF (f1))
3888 /* Overflow plus exception. */
3889 return 0;
3891 /* Don't constant fold this floating point operation if the
3892 result may dependent upon the run-time rounding mode and
3893 flag_rounding_math is set, or if GCC's software emulation
3894 is unable to accurately represent the result. */
3896 if ((flag_rounding_math
3897 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3898 && (inexact || !real_identical (&result, &value)))
3899 return NULL_RTX;
3901 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3905 /* We can fold some multi-word operations. */
3906 if ((GET_MODE_CLASS (mode) == MODE_INT
3907 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3908 && CONST_SCALAR_INT_P (op0)
3909 && CONST_SCALAR_INT_P (op1))
3911 wide_int result;
3912 bool overflow;
3913 rtx_mode_t pop0 = std::make_pair (op0, mode);
3914 rtx_mode_t pop1 = std::make_pair (op1, mode);
3916 #if TARGET_SUPPORTS_WIDE_INT == 0
3917 /* This assert keeps the simplification from producing a result
3918 that cannot be represented in a CONST_DOUBLE but a lot of
3919 upstream callers expect that this function never fails to
3920 simplify something and so you if you added this to the test
3921 above the code would die later anyway. If this assert
3922 happens, you just need to make the port support wide int. */
3923 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3924 #endif
3925 switch (code)
3927 case MINUS:
3928 result = wi::sub (pop0, pop1);
3929 break;
3931 case PLUS:
3932 result = wi::add (pop0, pop1);
3933 break;
3935 case MULT:
3936 result = wi::mul (pop0, pop1);
3937 break;
3939 case DIV:
3940 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3941 if (overflow)
3942 return NULL_RTX;
3943 break;
3945 case MOD:
3946 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3947 if (overflow)
3948 return NULL_RTX;
3949 break;
3951 case UDIV:
3952 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3953 if (overflow)
3954 return NULL_RTX;
3955 break;
3957 case UMOD:
3958 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3959 if (overflow)
3960 return NULL_RTX;
3961 break;
3963 case AND:
3964 result = wi::bit_and (pop0, pop1);
3965 break;
3967 case IOR:
3968 result = wi::bit_or (pop0, pop1);
3969 break;
3971 case XOR:
3972 result = wi::bit_xor (pop0, pop1);
3973 break;
3975 case SMIN:
3976 result = wi::smin (pop0, pop1);
3977 break;
3979 case SMAX:
3980 result = wi::smax (pop0, pop1);
3981 break;
3983 case UMIN:
3984 result = wi::umin (pop0, pop1);
3985 break;
3987 case UMAX:
3988 result = wi::umax (pop0, pop1);
3989 break;
3991 case LSHIFTRT:
3992 case ASHIFTRT:
3993 case ASHIFT:
3995 wide_int wop1 = pop1;
3996 if (SHIFT_COUNT_TRUNCATED)
3997 wop1 = wi::umod_trunc (wop1, width);
3998 else if (wi::geu_p (wop1, width))
3999 return NULL_RTX;
4001 switch (code)
4003 case LSHIFTRT:
4004 result = wi::lrshift (pop0, wop1);
4005 break;
4007 case ASHIFTRT:
4008 result = wi::arshift (pop0, wop1);
4009 break;
4011 case ASHIFT:
4012 result = wi::lshift (pop0, wop1);
4013 break;
4015 default:
4016 gcc_unreachable ();
4018 break;
4020 case ROTATE:
4021 case ROTATERT:
4023 if (wi::neg_p (pop1))
4024 return NULL_RTX;
4026 switch (code)
4028 case ROTATE:
4029 result = wi::lrotate (pop0, pop1);
4030 break;
4032 case ROTATERT:
4033 result = wi::rrotate (pop0, pop1);
4034 break;
4036 default:
4037 gcc_unreachable ();
4039 break;
4041 default:
4042 return NULL_RTX;
4044 return immed_wide_int_const (result, mode);
4047 return NULL_RTX;
4052 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4053 PLUS or MINUS.
4055 Rather than test for specific case, we do this by a brute-force method
4056 and do all possible simplifications until no more changes occur. Then
4057 we rebuild the operation. */
4059 struct simplify_plus_minus_op_data
4061 rtx op;
4062 short neg;
4065 static bool
4066 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4068 int result;
4070 result = (commutative_operand_precedence (y)
4071 - commutative_operand_precedence (x));
4072 if (result)
4073 return result > 0;
4075 /* Group together equal REGs to do more simplification. */
4076 if (REG_P (x) && REG_P (y))
4077 return REGNO (x) > REGNO (y);
4078 else
4079 return false;
4082 static rtx
4083 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4084 rtx op1)
4086 struct simplify_plus_minus_op_data ops[16];
4087 rtx result, tem;
4088 int n_ops = 2;
4089 int changed, n_constants, canonicalized = 0;
4090 int i, j;
4092 memset (ops, 0, sizeof ops);
4094 /* Set up the two operands and then expand them until nothing has been
4095 changed. If we run out of room in our array, give up; this should
4096 almost never happen. */
4098 ops[0].op = op0;
4099 ops[0].neg = 0;
4100 ops[1].op = op1;
4101 ops[1].neg = (code == MINUS);
4105 changed = 0;
4106 n_constants = 0;
4108 for (i = 0; i < n_ops; i++)
4110 rtx this_op = ops[i].op;
4111 int this_neg = ops[i].neg;
4112 enum rtx_code this_code = GET_CODE (this_op);
4114 switch (this_code)
4116 case PLUS:
4117 case MINUS:
4118 if (n_ops == ARRAY_SIZE (ops))
4119 return NULL_RTX;
4121 ops[n_ops].op = XEXP (this_op, 1);
4122 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4123 n_ops++;
4125 ops[i].op = XEXP (this_op, 0);
4126 changed = 1;
4127 canonicalized |= this_neg || i != n_ops - 2;
4128 break;
4130 case NEG:
4131 ops[i].op = XEXP (this_op, 0);
4132 ops[i].neg = ! this_neg;
4133 changed = 1;
4134 canonicalized = 1;
4135 break;
4137 case CONST:
4138 if (n_ops != ARRAY_SIZE (ops)
4139 && GET_CODE (XEXP (this_op, 0)) == PLUS
4140 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4141 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4143 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4144 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4145 ops[n_ops].neg = this_neg;
4146 n_ops++;
4147 changed = 1;
4148 canonicalized = 1;
4150 break;
4152 case NOT:
4153 /* ~a -> (-a - 1) */
4154 if (n_ops != ARRAY_SIZE (ops))
4156 ops[n_ops].op = CONSTM1_RTX (mode);
4157 ops[n_ops++].neg = this_neg;
4158 ops[i].op = XEXP (this_op, 0);
4159 ops[i].neg = !this_neg;
4160 changed = 1;
4161 canonicalized = 1;
4163 break;
4165 case CONST_INT:
4166 n_constants++;
4167 if (this_neg)
4169 ops[i].op = neg_const_int (mode, this_op);
4170 ops[i].neg = 0;
4171 changed = 1;
4172 canonicalized = 1;
4174 break;
4176 default:
4177 break;
4181 while (changed);
4183 if (n_constants > 1)
4184 canonicalized = 1;
4186 gcc_assert (n_ops >= 2);
4188 /* If we only have two operands, we can avoid the loops. */
4189 if (n_ops == 2)
4191 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4192 rtx lhs, rhs;
4194 /* Get the two operands. Be careful with the order, especially for
4195 the cases where code == MINUS. */
4196 if (ops[0].neg && ops[1].neg)
4198 lhs = gen_rtx_NEG (mode, ops[0].op);
4199 rhs = ops[1].op;
4201 else if (ops[0].neg)
4203 lhs = ops[1].op;
4204 rhs = ops[0].op;
4206 else
4208 lhs = ops[0].op;
4209 rhs = ops[1].op;
4212 return simplify_const_binary_operation (code, mode, lhs, rhs);
4215 /* Now simplify each pair of operands until nothing changes. */
4218 /* Insertion sort is good enough for a small array. */
4219 for (i = 1; i < n_ops; i++)
4221 struct simplify_plus_minus_op_data save;
4222 j = i - 1;
4223 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4224 continue;
4226 canonicalized = 1;
4227 save = ops[i];
4229 ops[j + 1] = ops[j];
4230 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4231 ops[j + 1] = save;
4234 changed = 0;
4235 for (i = n_ops - 1; i > 0; i--)
4236 for (j = i - 1; j >= 0; j--)
4238 rtx lhs = ops[j].op, rhs = ops[i].op;
4239 int lneg = ops[j].neg, rneg = ops[i].neg;
4241 if (lhs != 0 && rhs != 0)
4243 enum rtx_code ncode = PLUS;
4245 if (lneg != rneg)
4247 ncode = MINUS;
4248 if (lneg)
4249 std::swap (lhs, rhs);
4251 else if (swap_commutative_operands_p (lhs, rhs))
4252 std::swap (lhs, rhs);
4254 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4255 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4257 rtx tem_lhs, tem_rhs;
4259 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4260 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4261 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4263 if (tem && !CONSTANT_P (tem))
4264 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4266 else
4267 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4269 if (tem)
4271 /* Reject "simplifications" that just wrap the two
4272 arguments in a CONST. Failure to do so can result
4273 in infinite recursion with simplify_binary_operation
4274 when it calls us to simplify CONST operations.
4275 Also, if we find such a simplification, don't try
4276 any more combinations with this rhs: We must have
4277 something like symbol+offset, ie. one of the
4278 trivial CONST expressions we handle later. */
4279 if (GET_CODE (tem) == CONST
4280 && GET_CODE (XEXP (tem, 0)) == ncode
4281 && XEXP (XEXP (tem, 0), 0) == lhs
4282 && XEXP (XEXP (tem, 0), 1) == rhs)
4283 break;
4284 lneg &= rneg;
4285 if (GET_CODE (tem) == NEG)
4286 tem = XEXP (tem, 0), lneg = !lneg;
4287 if (CONST_INT_P (tem) && lneg)
4288 tem = neg_const_int (mode, tem), lneg = 0;
4290 ops[i].op = tem;
4291 ops[i].neg = lneg;
4292 ops[j].op = NULL_RTX;
4293 changed = 1;
4294 canonicalized = 1;
4299 /* If nothing changed, fail. */
4300 if (!canonicalized)
4301 return NULL_RTX;
4303 /* Pack all the operands to the lower-numbered entries. */
4304 for (i = 0, j = 0; j < n_ops; j++)
4305 if (ops[j].op)
4307 ops[i] = ops[j];
4308 i++;
4310 n_ops = i;
4312 while (changed);
4314 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4315 if (n_ops == 2
4316 && CONST_INT_P (ops[1].op)
4317 && CONSTANT_P (ops[0].op)
4318 && ops[0].neg)
4319 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4321 /* We suppressed creation of trivial CONST expressions in the
4322 combination loop to avoid recursion. Create one manually now.
4323 The combination loop should have ensured that there is exactly
4324 one CONST_INT, and the sort will have ensured that it is last
4325 in the array and that any other constant will be next-to-last. */
4327 if (n_ops > 1
4328 && CONST_INT_P (ops[n_ops - 1].op)
4329 && CONSTANT_P (ops[n_ops - 2].op))
4331 rtx value = ops[n_ops - 1].op;
4332 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4333 value = neg_const_int (mode, value);
4334 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4335 INTVAL (value));
4336 n_ops--;
4339 /* Put a non-negated operand first, if possible. */
4341 for (i = 0; i < n_ops && ops[i].neg; i++)
4342 continue;
4343 if (i == n_ops)
4344 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4345 else if (i != 0)
4347 tem = ops[0].op;
4348 ops[0] = ops[i];
4349 ops[i].op = tem;
4350 ops[i].neg = 1;
4353 /* Now make the result by performing the requested operations. */
4354 result = ops[0].op;
4355 for (i = 1; i < n_ops; i++)
4356 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4357 mode, result, ops[i].op);
4359 return result;
4362 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4363 static bool
4364 plus_minus_operand_p (const_rtx x)
4366 return GET_CODE (x) == PLUS
4367 || GET_CODE (x) == MINUS
4368 || (GET_CODE (x) == CONST
4369 && GET_CODE (XEXP (x, 0)) == PLUS
4370 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4371 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4374 /* Like simplify_binary_operation except used for relational operators.
4375 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4376 not also be VOIDmode.
4378 CMP_MODE specifies in which mode the comparison is done in, so it is
4379 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4380 the operands or, if both are VOIDmode, the operands are compared in
4381 "infinite precision". */
4383 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4384 machine_mode cmp_mode, rtx op0, rtx op1)
4386 rtx tem, trueop0, trueop1;
4388 if (cmp_mode == VOIDmode)
4389 cmp_mode = GET_MODE (op0);
4390 if (cmp_mode == VOIDmode)
4391 cmp_mode = GET_MODE (op1);
4393 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4394 if (tem)
4396 if (SCALAR_FLOAT_MODE_P (mode))
4398 if (tem == const0_rtx)
4399 return CONST0_RTX (mode);
4400 #ifdef FLOAT_STORE_FLAG_VALUE
4402 REAL_VALUE_TYPE val;
4403 val = FLOAT_STORE_FLAG_VALUE (mode);
4404 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4406 #else
4407 return NULL_RTX;
4408 #endif
4410 if (VECTOR_MODE_P (mode))
4412 if (tem == const0_rtx)
4413 return CONST0_RTX (mode);
4414 #ifdef VECTOR_STORE_FLAG_VALUE
4416 int i, units;
4417 rtvec v;
4419 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4420 if (val == NULL_RTX)
4421 return NULL_RTX;
4422 if (val == const1_rtx)
4423 return CONST1_RTX (mode);
4425 units = GET_MODE_NUNITS (mode);
4426 v = rtvec_alloc (units);
4427 for (i = 0; i < units; i++)
4428 RTVEC_ELT (v, i) = val;
4429 return gen_rtx_raw_CONST_VECTOR (mode, v);
4431 #else
4432 return NULL_RTX;
4433 #endif
4436 return tem;
4439 /* For the following tests, ensure const0_rtx is op1. */
4440 if (swap_commutative_operands_p (op0, op1)
4441 || (op0 == const0_rtx && op1 != const0_rtx))
4442 std::swap (op0, op1), code = swap_condition (code);
4444 /* If op0 is a compare, extract the comparison arguments from it. */
4445 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4446 return simplify_gen_relational (code, mode, VOIDmode,
4447 XEXP (op0, 0), XEXP (op0, 1));
4449 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4450 || CC0_P (op0))
4451 return NULL_RTX;
4453 trueop0 = avoid_constant_pool_reference (op0);
4454 trueop1 = avoid_constant_pool_reference (op1);
4455 return simplify_relational_operation_1 (code, mode, cmp_mode,
4456 trueop0, trueop1);
4459 /* This part of simplify_relational_operation is only used when CMP_MODE
4460 is not in class MODE_CC (i.e. it is a real comparison).
4462 MODE is the mode of the result, while CMP_MODE specifies in which
4463 mode the comparison is done in, so it is the mode of the operands. */
4465 static rtx
4466 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4467 machine_mode cmp_mode, rtx op0, rtx op1)
4469 enum rtx_code op0code = GET_CODE (op0);
4471 if (op1 == const0_rtx && COMPARISON_P (op0))
4473 /* If op0 is a comparison, extract the comparison arguments
4474 from it. */
4475 if (code == NE)
4477 if (GET_MODE (op0) == mode)
4478 return simplify_rtx (op0);
4479 else
4480 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4481 XEXP (op0, 0), XEXP (op0, 1));
4483 else if (code == EQ)
4485 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4486 if (new_code != UNKNOWN)
4487 return simplify_gen_relational (new_code, mode, VOIDmode,
4488 XEXP (op0, 0), XEXP (op0, 1));
4492 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4493 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4494 if ((code == LTU || code == GEU)
4495 && GET_CODE (op0) == PLUS
4496 && CONST_INT_P (XEXP (op0, 1))
4497 && (rtx_equal_p (op1, XEXP (op0, 0))
4498 || rtx_equal_p (op1, XEXP (op0, 1)))
4499 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4500 && XEXP (op0, 1) != const0_rtx)
4502 rtx new_cmp
4503 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4504 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4505 cmp_mode, XEXP (op0, 0), new_cmp);
4508 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4509 if ((code == LTU || code == GEU)
4510 && GET_CODE (op0) == PLUS
4511 && rtx_equal_p (op1, XEXP (op0, 1))
4512 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4513 && !rtx_equal_p (op1, XEXP (op0, 0)))
4514 return simplify_gen_relational (code, mode, cmp_mode, op0,
4515 copy_rtx (XEXP (op0, 0)));
4517 if (op1 == const0_rtx)
4519 /* Canonicalize (GTU x 0) as (NE x 0). */
4520 if (code == GTU)
4521 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4522 /* Canonicalize (LEU x 0) as (EQ x 0). */
4523 if (code == LEU)
4524 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4526 else if (op1 == const1_rtx)
4528 switch (code)
4530 case GE:
4531 /* Canonicalize (GE x 1) as (GT x 0). */
4532 return simplify_gen_relational (GT, mode, cmp_mode,
4533 op0, const0_rtx);
4534 case GEU:
4535 /* Canonicalize (GEU x 1) as (NE x 0). */
4536 return simplify_gen_relational (NE, mode, cmp_mode,
4537 op0, const0_rtx);
4538 case LT:
4539 /* Canonicalize (LT x 1) as (LE x 0). */
4540 return simplify_gen_relational (LE, mode, cmp_mode,
4541 op0, const0_rtx);
4542 case LTU:
4543 /* Canonicalize (LTU x 1) as (EQ x 0). */
4544 return simplify_gen_relational (EQ, mode, cmp_mode,
4545 op0, const0_rtx);
4546 default:
4547 break;
4550 else if (op1 == constm1_rtx)
4552 /* Canonicalize (LE x -1) as (LT x 0). */
4553 if (code == LE)
4554 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4555 /* Canonicalize (GT x -1) as (GE x 0). */
4556 if (code == GT)
4557 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4560 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4561 if ((code == EQ || code == NE)
4562 && (op0code == PLUS || op0code == MINUS)
4563 && CONSTANT_P (op1)
4564 && CONSTANT_P (XEXP (op0, 1))
4565 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4567 rtx x = XEXP (op0, 0);
4568 rtx c = XEXP (op0, 1);
4569 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4570 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4572 /* Detect an infinite recursive condition, where we oscillate at this
4573 simplification case between:
4574 A + B == C <---> C - B == A,
4575 where A, B, and C are all constants with non-simplifiable expressions,
4576 usually SYMBOL_REFs. */
4577 if (GET_CODE (tem) == invcode
4578 && CONSTANT_P (x)
4579 && rtx_equal_p (c, XEXP (tem, 1)))
4580 return NULL_RTX;
4582 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4585 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4586 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4587 if (code == NE
4588 && op1 == const0_rtx
4589 && GET_MODE_CLASS (mode) == MODE_INT
4590 && cmp_mode != VOIDmode
4591 /* ??? Work-around BImode bugs in the ia64 backend. */
4592 && mode != BImode
4593 && cmp_mode != BImode
4594 && nonzero_bits (op0, cmp_mode) == 1
4595 && STORE_FLAG_VALUE == 1)
4596 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4597 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4598 : lowpart_subreg (mode, op0, cmp_mode);
4600 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4601 if ((code == EQ || code == NE)
4602 && op1 == const0_rtx
4603 && op0code == XOR)
4604 return simplify_gen_relational (code, mode, cmp_mode,
4605 XEXP (op0, 0), XEXP (op0, 1));
4607 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4608 if ((code == EQ || code == NE)
4609 && op0code == XOR
4610 && rtx_equal_p (XEXP (op0, 0), op1)
4611 && !side_effects_p (XEXP (op0, 0)))
4612 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4613 CONST0_RTX (mode));
4615 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4616 if ((code == EQ || code == NE)
4617 && op0code == XOR
4618 && rtx_equal_p (XEXP (op0, 1), op1)
4619 && !side_effects_p (XEXP (op0, 1)))
4620 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4621 CONST0_RTX (mode));
4623 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4624 if ((code == EQ || code == NE)
4625 && op0code == XOR
4626 && CONST_SCALAR_INT_P (op1)
4627 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4628 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4629 simplify_gen_binary (XOR, cmp_mode,
4630 XEXP (op0, 1), op1));
4632 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4633 can be implemented with a BICS instruction on some targets, or
4634 constant-folded if y is a constant. */
4635 if ((code == EQ || code == NE)
4636 && op0code == AND
4637 && rtx_equal_p (XEXP (op0, 0), op1)
4638 && !side_effects_p (op1)
4639 && op1 != CONST0_RTX (cmp_mode))
4641 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4642 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4644 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4645 CONST0_RTX (cmp_mode));
4648 /* Likewise for (eq/ne (and x y) y). */
4649 if ((code == EQ || code == NE)
4650 && op0code == AND
4651 && rtx_equal_p (XEXP (op0, 1), op1)
4652 && !side_effects_p (op1)
4653 && op1 != CONST0_RTX (cmp_mode))
4655 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4656 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4658 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4659 CONST0_RTX (cmp_mode));
4662 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4663 if ((code == EQ || code == NE)
4664 && GET_CODE (op0) == BSWAP
4665 && CONST_SCALAR_INT_P (op1))
4666 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4667 simplify_gen_unary (BSWAP, cmp_mode,
4668 op1, cmp_mode));
4670 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4671 if ((code == EQ || code == NE)
4672 && GET_CODE (op0) == BSWAP
4673 && GET_CODE (op1) == BSWAP)
4674 return simplify_gen_relational (code, mode, cmp_mode,
4675 XEXP (op0, 0), XEXP (op1, 0));
4677 if (op0code == POPCOUNT && op1 == const0_rtx)
4678 switch (code)
4680 case EQ:
4681 case LE:
4682 case LEU:
4683 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4684 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4685 XEXP (op0, 0), const0_rtx);
4687 case NE:
4688 case GT:
4689 case GTU:
4690 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4691 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4692 XEXP (op0, 0), const0_rtx);
4694 default:
4695 break;
4698 return NULL_RTX;
4701 enum
4703 CMP_EQ = 1,
4704 CMP_LT = 2,
4705 CMP_GT = 4,
4706 CMP_LTU = 8,
4707 CMP_GTU = 16
4711 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4712 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4713 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4714 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4715 For floating-point comparisons, assume that the operands were ordered. */
4717 static rtx
4718 comparison_result (enum rtx_code code, int known_results)
4720 switch (code)
4722 case EQ:
4723 case UNEQ:
4724 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4725 case NE:
4726 case LTGT:
4727 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4729 case LT:
4730 case UNLT:
4731 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4732 case GE:
4733 case UNGE:
4734 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4736 case GT:
4737 case UNGT:
4738 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4739 case LE:
4740 case UNLE:
4741 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4743 case LTU:
4744 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4745 case GEU:
4746 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4748 case GTU:
4749 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4750 case LEU:
4751 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4753 case ORDERED:
4754 return const_true_rtx;
4755 case UNORDERED:
4756 return const0_rtx;
4757 default:
4758 gcc_unreachable ();
4762 /* Check if the given comparison (done in the given MODE) is actually
4763 a tautology or a contradiction. If the mode is VOID_mode, the
4764 comparison is done in "infinite precision". If no simplification
4765 is possible, this function returns zero. Otherwise, it returns
4766 either const_true_rtx or const0_rtx. */
4769 simplify_const_relational_operation (enum rtx_code code,
4770 machine_mode mode,
4771 rtx op0, rtx op1)
4773 rtx tem;
4774 rtx trueop0;
4775 rtx trueop1;
4777 gcc_assert (mode != VOIDmode
4778 || (GET_MODE (op0) == VOIDmode
4779 && GET_MODE (op1) == VOIDmode));
4781 /* If op0 is a compare, extract the comparison arguments from it. */
4782 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4784 op1 = XEXP (op0, 1);
4785 op0 = XEXP (op0, 0);
4787 if (GET_MODE (op0) != VOIDmode)
4788 mode = GET_MODE (op0);
4789 else if (GET_MODE (op1) != VOIDmode)
4790 mode = GET_MODE (op1);
4791 else
4792 return 0;
4795 /* We can't simplify MODE_CC values since we don't know what the
4796 actual comparison is. */
4797 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4798 return 0;
4800 /* Make sure the constant is second. */
4801 if (swap_commutative_operands_p (op0, op1))
4803 std::swap (op0, op1);
4804 code = swap_condition (code);
4807 trueop0 = avoid_constant_pool_reference (op0);
4808 trueop1 = avoid_constant_pool_reference (op1);
4810 /* For integer comparisons of A and B maybe we can simplify A - B and can
4811 then simplify a comparison of that with zero. If A and B are both either
4812 a register or a CONST_INT, this can't help; testing for these cases will
4813 prevent infinite recursion here and speed things up.
4815 We can only do this for EQ and NE comparisons as otherwise we may
4816 lose or introduce overflow which we cannot disregard as undefined as
4817 we do not know the signedness of the operation on either the left or
4818 the right hand side of the comparison. */
4820 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4821 && (code == EQ || code == NE)
4822 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4823 && (REG_P (op1) || CONST_INT_P (trueop1)))
4824 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4825 /* We cannot do this if tem is a nonzero address. */
4826 && ! nonzero_address_p (tem))
4827 return simplify_const_relational_operation (signed_condition (code),
4828 mode, tem, const0_rtx);
4830 if (! HONOR_NANS (mode) && code == ORDERED)
4831 return const_true_rtx;
4833 if (! HONOR_NANS (mode) && code == UNORDERED)
4834 return const0_rtx;
4836 /* For modes without NaNs, if the two operands are equal, we know the
4837 result except if they have side-effects. Even with NaNs we know
4838 the result of unordered comparisons and, if signaling NaNs are
4839 irrelevant, also the result of LT/GT/LTGT. */
4840 if ((! HONOR_NANS (trueop0)
4841 || code == UNEQ || code == UNLE || code == UNGE
4842 || ((code == LT || code == GT || code == LTGT)
4843 && ! HONOR_SNANS (trueop0)))
4844 && rtx_equal_p (trueop0, trueop1)
4845 && ! side_effects_p (trueop0))
4846 return comparison_result (code, CMP_EQ);
4848 /* If the operands are floating-point constants, see if we can fold
4849 the result. */
4850 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4851 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4852 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4854 REAL_VALUE_TYPE d0, d1;
4856 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4857 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4859 /* Comparisons are unordered iff at least one of the values is NaN. */
4860 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4861 switch (code)
4863 case UNEQ:
4864 case UNLT:
4865 case UNGT:
4866 case UNLE:
4867 case UNGE:
4868 case NE:
4869 case UNORDERED:
4870 return const_true_rtx;
4871 case EQ:
4872 case LT:
4873 case GT:
4874 case LE:
4875 case GE:
4876 case LTGT:
4877 case ORDERED:
4878 return const0_rtx;
4879 default:
4880 return 0;
4883 return comparison_result (code,
4884 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4885 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4888 /* Otherwise, see if the operands are both integers. */
4889 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4890 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4892 /* It would be nice if we really had a mode here. However, the
4893 largest int representable on the target is as good as
4894 infinite. */
4895 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4896 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4897 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4899 if (wi::eq_p (ptrueop0, ptrueop1))
4900 return comparison_result (code, CMP_EQ);
4901 else
4903 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4904 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4905 return comparison_result (code, cr);
4909 /* Optimize comparisons with upper and lower bounds. */
4910 if (HWI_COMPUTABLE_MODE_P (mode)
4911 && CONST_INT_P (trueop1))
4913 int sign;
4914 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4915 HOST_WIDE_INT val = INTVAL (trueop1);
4916 HOST_WIDE_INT mmin, mmax;
4918 if (code == GEU
4919 || code == LEU
4920 || code == GTU
4921 || code == LTU)
4922 sign = 0;
4923 else
4924 sign = 1;
4926 /* Get a reduced range if the sign bit is zero. */
4927 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4929 mmin = 0;
4930 mmax = nonzero;
4932 else
4934 rtx mmin_rtx, mmax_rtx;
4935 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4937 mmin = INTVAL (mmin_rtx);
4938 mmax = INTVAL (mmax_rtx);
4939 if (sign)
4941 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4943 mmin >>= (sign_copies - 1);
4944 mmax >>= (sign_copies - 1);
4948 switch (code)
4950 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4951 case GEU:
4952 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4953 return const_true_rtx;
4954 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4955 return const0_rtx;
4956 break;
4957 case GE:
4958 if (val <= mmin)
4959 return const_true_rtx;
4960 if (val > mmax)
4961 return const0_rtx;
4962 break;
4964 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4965 case LEU:
4966 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4967 return const_true_rtx;
4968 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4969 return const0_rtx;
4970 break;
4971 case LE:
4972 if (val >= mmax)
4973 return const_true_rtx;
4974 if (val < mmin)
4975 return const0_rtx;
4976 break;
4978 case EQ:
4979 /* x == y is always false for y out of range. */
4980 if (val < mmin || val > mmax)
4981 return const0_rtx;
4982 break;
4984 /* x > y is always false for y >= mmax, always true for y < mmin. */
4985 case GTU:
4986 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4987 return const0_rtx;
4988 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4989 return const_true_rtx;
4990 break;
4991 case GT:
4992 if (val >= mmax)
4993 return const0_rtx;
4994 if (val < mmin)
4995 return const_true_rtx;
4996 break;
4998 /* x < y is always false for y <= mmin, always true for y > mmax. */
4999 case LTU:
5000 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5001 return const0_rtx;
5002 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5003 return const_true_rtx;
5004 break;
5005 case LT:
5006 if (val <= mmin)
5007 return const0_rtx;
5008 if (val > mmax)
5009 return const_true_rtx;
5010 break;
5012 case NE:
5013 /* x != y is always true for y out of range. */
5014 if (val < mmin || val > mmax)
5015 return const_true_rtx;
5016 break;
5018 default:
5019 break;
5023 /* Optimize integer comparisons with zero. */
5024 if (trueop1 == const0_rtx)
5026 /* Some addresses are known to be nonzero. We don't know
5027 their sign, but equality comparisons are known. */
5028 if (nonzero_address_p (trueop0))
5030 if (code == EQ || code == LEU)
5031 return const0_rtx;
5032 if (code == NE || code == GTU)
5033 return const_true_rtx;
5036 /* See if the first operand is an IOR with a constant. If so, we
5037 may be able to determine the result of this comparison. */
5038 if (GET_CODE (op0) == IOR)
5040 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5041 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5043 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5044 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5045 && (UINTVAL (inner_const)
5046 & ((unsigned HOST_WIDE_INT) 1
5047 << sign_bitnum)));
5049 switch (code)
5051 case EQ:
5052 case LEU:
5053 return const0_rtx;
5054 case NE:
5055 case GTU:
5056 return const_true_rtx;
5057 case LT:
5058 case LE:
5059 if (has_sign)
5060 return const_true_rtx;
5061 break;
5062 case GT:
5063 case GE:
5064 if (has_sign)
5065 return const0_rtx;
5066 break;
5067 default:
5068 break;
5074 /* Optimize comparison of ABS with zero. */
5075 if (trueop1 == CONST0_RTX (mode)
5076 && (GET_CODE (trueop0) == ABS
5077 || (GET_CODE (trueop0) == FLOAT_EXTEND
5078 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5080 switch (code)
5082 case LT:
5083 /* Optimize abs(x) < 0.0. */
5084 if (!HONOR_SNANS (mode)
5085 && (!INTEGRAL_MODE_P (mode)
5086 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5088 if (INTEGRAL_MODE_P (mode)
5089 && (issue_strict_overflow_warning
5090 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5091 warning (OPT_Wstrict_overflow,
5092 ("assuming signed overflow does not occur when "
5093 "assuming abs (x) < 0 is false"));
5094 return const0_rtx;
5096 break;
5098 case GE:
5099 /* Optimize abs(x) >= 0.0. */
5100 if (!HONOR_NANS (mode)
5101 && (!INTEGRAL_MODE_P (mode)
5102 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5104 if (INTEGRAL_MODE_P (mode)
5105 && (issue_strict_overflow_warning
5106 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5107 warning (OPT_Wstrict_overflow,
5108 ("assuming signed overflow does not occur when "
5109 "assuming abs (x) >= 0 is true"));
5110 return const_true_rtx;
5112 break;
5114 case UNGE:
5115 /* Optimize ! (abs(x) < 0.0). */
5116 return const_true_rtx;
5118 default:
5119 break;
5123 return 0;
5126 /* Simplify CODE, an operation with result mode MODE and three operands,
5127 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5128 a constant. Return 0 if no simplifications is possible. */
5131 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5132 machine_mode op0_mode, rtx op0, rtx op1,
5133 rtx op2)
5135 unsigned int width = GET_MODE_PRECISION (mode);
5136 bool any_change = false;
5137 rtx tem, trueop2;
5139 /* VOIDmode means "infinite" precision. */
5140 if (width == 0)
5141 width = HOST_BITS_PER_WIDE_INT;
5143 switch (code)
5145 case FMA:
5146 /* Simplify negations around the multiplication. */
5147 /* -a * -b + c => a * b + c. */
5148 if (GET_CODE (op0) == NEG)
5150 tem = simplify_unary_operation (NEG, mode, op1, mode);
5151 if (tem)
5152 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5154 else if (GET_CODE (op1) == NEG)
5156 tem = simplify_unary_operation (NEG, mode, op0, mode);
5157 if (tem)
5158 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5161 /* Canonicalize the two multiplication operands. */
5162 /* a * -b + c => -b * a + c. */
5163 if (swap_commutative_operands_p (op0, op1))
5164 std::swap (op0, op1), any_change = true;
5166 if (any_change)
5167 return gen_rtx_FMA (mode, op0, op1, op2);
5168 return NULL_RTX;
5170 case SIGN_EXTRACT:
5171 case ZERO_EXTRACT:
5172 if (CONST_INT_P (op0)
5173 && CONST_INT_P (op1)
5174 && CONST_INT_P (op2)
5175 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5176 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5178 /* Extracting a bit-field from a constant */
5179 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5180 HOST_WIDE_INT op1val = INTVAL (op1);
5181 HOST_WIDE_INT op2val = INTVAL (op2);
5182 if (BITS_BIG_ENDIAN)
5183 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5184 else
5185 val >>= op2val;
5187 if (HOST_BITS_PER_WIDE_INT != op1val)
5189 /* First zero-extend. */
5190 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5191 /* If desired, propagate sign bit. */
5192 if (code == SIGN_EXTRACT
5193 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5194 != 0)
5195 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5198 return gen_int_mode (val, mode);
5200 break;
5202 case IF_THEN_ELSE:
5203 if (CONST_INT_P (op0))
5204 return op0 != const0_rtx ? op1 : op2;
5206 /* Convert c ? a : a into "a". */
5207 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5208 return op1;
5210 /* Convert a != b ? a : b into "a". */
5211 if (GET_CODE (op0) == NE
5212 && ! side_effects_p (op0)
5213 && ! HONOR_NANS (mode)
5214 && ! HONOR_SIGNED_ZEROS (mode)
5215 && ((rtx_equal_p (XEXP (op0, 0), op1)
5216 && rtx_equal_p (XEXP (op0, 1), op2))
5217 || (rtx_equal_p (XEXP (op0, 0), op2)
5218 && rtx_equal_p (XEXP (op0, 1), op1))))
5219 return op1;
5221 /* Convert a == b ? a : b into "b". */
5222 if (GET_CODE (op0) == EQ
5223 && ! side_effects_p (op0)
5224 && ! HONOR_NANS (mode)
5225 && ! HONOR_SIGNED_ZEROS (mode)
5226 && ((rtx_equal_p (XEXP (op0, 0), op1)
5227 && rtx_equal_p (XEXP (op0, 1), op2))
5228 || (rtx_equal_p (XEXP (op0, 0), op2)
5229 && rtx_equal_p (XEXP (op0, 1), op1))))
5230 return op2;
5232 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5234 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5235 ? GET_MODE (XEXP (op0, 1))
5236 : GET_MODE (XEXP (op0, 0)));
5237 rtx temp;
5239 /* Look for happy constants in op1 and op2. */
5240 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5242 HOST_WIDE_INT t = INTVAL (op1);
5243 HOST_WIDE_INT f = INTVAL (op2);
5245 if (t == STORE_FLAG_VALUE && f == 0)
5246 code = GET_CODE (op0);
5247 else if (t == 0 && f == STORE_FLAG_VALUE)
5249 enum rtx_code tmp;
5250 tmp = reversed_comparison_code (op0, NULL_RTX);
5251 if (tmp == UNKNOWN)
5252 break;
5253 code = tmp;
5255 else
5256 break;
5258 return simplify_gen_relational (code, mode, cmp_mode,
5259 XEXP (op0, 0), XEXP (op0, 1));
5262 if (cmp_mode == VOIDmode)
5263 cmp_mode = op0_mode;
5264 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5265 cmp_mode, XEXP (op0, 0),
5266 XEXP (op0, 1));
5268 /* See if any simplifications were possible. */
5269 if (temp)
5271 if (CONST_INT_P (temp))
5272 return temp == const0_rtx ? op2 : op1;
5273 else if (temp)
5274 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5277 break;
5279 case VEC_MERGE:
5280 gcc_assert (GET_MODE (op0) == mode);
5281 gcc_assert (GET_MODE (op1) == mode);
5282 gcc_assert (VECTOR_MODE_P (mode));
5283 trueop2 = avoid_constant_pool_reference (op2);
5284 if (CONST_INT_P (trueop2))
5286 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5287 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5288 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5289 unsigned HOST_WIDE_INT mask;
5290 if (n_elts == HOST_BITS_PER_WIDE_INT)
5291 mask = -1;
5292 else
5293 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5295 if (!(sel & mask) && !side_effects_p (op0))
5296 return op1;
5297 if ((sel & mask) == mask && !side_effects_p (op1))
5298 return op0;
5300 rtx trueop0 = avoid_constant_pool_reference (op0);
5301 rtx trueop1 = avoid_constant_pool_reference (op1);
5302 if (GET_CODE (trueop0) == CONST_VECTOR
5303 && GET_CODE (trueop1) == CONST_VECTOR)
5305 rtvec v = rtvec_alloc (n_elts);
5306 unsigned int i;
5308 for (i = 0; i < n_elts; i++)
5309 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5310 ? CONST_VECTOR_ELT (trueop0, i)
5311 : CONST_VECTOR_ELT (trueop1, i));
5312 return gen_rtx_CONST_VECTOR (mode, v);
5315 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5316 if no element from a appears in the result. */
5317 if (GET_CODE (op0) == VEC_MERGE)
5319 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5320 if (CONST_INT_P (tem))
5322 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5323 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5324 return simplify_gen_ternary (code, mode, mode,
5325 XEXP (op0, 1), op1, op2);
5326 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5327 return simplify_gen_ternary (code, mode, mode,
5328 XEXP (op0, 0), op1, op2);
5331 if (GET_CODE (op1) == VEC_MERGE)
5333 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5334 if (CONST_INT_P (tem))
5336 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5337 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5338 return simplify_gen_ternary (code, mode, mode,
5339 op0, XEXP (op1, 1), op2);
5340 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5341 return simplify_gen_ternary (code, mode, mode,
5342 op0, XEXP (op1, 0), op2);
5346 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5347 with a. */
5348 if (GET_CODE (op0) == VEC_DUPLICATE
5349 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5350 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5351 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5353 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5354 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5356 if (XEXP (XEXP (op0, 0), 0) == op1
5357 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5358 return op1;
5363 if (rtx_equal_p (op0, op1)
5364 && !side_effects_p (op2) && !side_effects_p (op1))
5365 return op0;
5367 break;
5369 default:
5370 gcc_unreachable ();
5373 return 0;
5376 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5377 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5378 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5380 Works by unpacking OP into a collection of 8-bit values
5381 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5382 and then repacking them again for OUTERMODE. */
5384 static rtx
5385 simplify_immed_subreg (machine_mode outermode, rtx op,
5386 machine_mode innermode, unsigned int byte)
5388 enum {
5389 value_bit = 8,
5390 value_mask = (1 << value_bit) - 1
5392 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5393 int value_start;
5394 int i;
5395 int elem;
5397 int num_elem;
5398 rtx * elems;
5399 int elem_bitsize;
5400 rtx result_s;
5401 rtvec result_v = NULL;
5402 enum mode_class outer_class;
5403 machine_mode outer_submode;
5404 int max_bitsize;
5406 /* Some ports misuse CCmode. */
5407 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5408 return op;
5410 /* We have no way to represent a complex constant at the rtl level. */
5411 if (COMPLEX_MODE_P (outermode))
5412 return NULL_RTX;
5414 /* We support any size mode. */
5415 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5416 GET_MODE_BITSIZE (innermode));
5418 /* Unpack the value. */
5420 if (GET_CODE (op) == CONST_VECTOR)
5422 num_elem = CONST_VECTOR_NUNITS (op);
5423 elems = &CONST_VECTOR_ELT (op, 0);
5424 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5426 else
5428 num_elem = 1;
5429 elems = &op;
5430 elem_bitsize = max_bitsize;
5432 /* If this asserts, it is too complicated; reducing value_bit may help. */
5433 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5434 /* I don't know how to handle endianness of sub-units. */
5435 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5437 for (elem = 0; elem < num_elem; elem++)
5439 unsigned char * vp;
5440 rtx el = elems[elem];
5442 /* Vectors are kept in target memory order. (This is probably
5443 a mistake.) */
5445 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5446 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5447 / BITS_PER_UNIT);
5448 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5449 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5450 unsigned bytele = (subword_byte % UNITS_PER_WORD
5451 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5452 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5455 switch (GET_CODE (el))
5457 case CONST_INT:
5458 for (i = 0;
5459 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5460 i += value_bit)
5461 *vp++ = INTVAL (el) >> i;
5462 /* CONST_INTs are always logically sign-extended. */
5463 for (; i < elem_bitsize; i += value_bit)
5464 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5465 break;
5467 case CONST_WIDE_INT:
5469 rtx_mode_t val = std::make_pair (el, innermode);
5470 unsigned char extend = wi::sign_mask (val);
5472 for (i = 0; i < elem_bitsize; i += value_bit)
5473 *vp++ = wi::extract_uhwi (val, i, value_bit);
5474 for (; i < elem_bitsize; i += value_bit)
5475 *vp++ = extend;
5477 break;
5479 case CONST_DOUBLE:
5480 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5482 unsigned char extend = 0;
5483 /* If this triggers, someone should have generated a
5484 CONST_INT instead. */
5485 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5487 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5488 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5489 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5491 *vp++
5492 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5493 i += value_bit;
5496 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5497 extend = -1;
5498 for (; i < elem_bitsize; i += value_bit)
5499 *vp++ = extend;
5501 else
5503 /* This is big enough for anything on the platform. */
5504 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5505 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5507 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5508 gcc_assert (bitsize <= elem_bitsize);
5509 gcc_assert (bitsize % value_bit == 0);
5511 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5512 GET_MODE (el));
5514 /* real_to_target produces its result in words affected by
5515 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5516 and use WORDS_BIG_ENDIAN instead; see the documentation
5517 of SUBREG in rtl.texi. */
5518 for (i = 0; i < bitsize; i += value_bit)
5520 int ibase;
5521 if (WORDS_BIG_ENDIAN)
5522 ibase = bitsize - 1 - i;
5523 else
5524 ibase = i;
5525 *vp++ = tmp[ibase / 32] >> i % 32;
5528 /* It shouldn't matter what's done here, so fill it with
5529 zero. */
5530 for (; i < elem_bitsize; i += value_bit)
5531 *vp++ = 0;
5533 break;
5535 case CONST_FIXED:
5536 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5538 for (i = 0; i < elem_bitsize; i += value_bit)
5539 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5541 else
5543 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5544 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5545 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5546 i += value_bit)
5547 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5548 >> (i - HOST_BITS_PER_WIDE_INT);
5549 for (; i < elem_bitsize; i += value_bit)
5550 *vp++ = 0;
5552 break;
5554 default:
5555 gcc_unreachable ();
5559 /* Now, pick the right byte to start with. */
5560 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5561 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5562 will already have offset 0. */
5563 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5565 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5566 - byte);
5567 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5568 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5569 byte = (subword_byte % UNITS_PER_WORD
5570 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5573 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5574 so if it's become negative it will instead be very large.) */
5575 gcc_assert (byte < GET_MODE_SIZE (innermode));
5577 /* Convert from bytes to chunks of size value_bit. */
5578 value_start = byte * (BITS_PER_UNIT / value_bit);
5580 /* Re-pack the value. */
5582 if (VECTOR_MODE_P (outermode))
5584 num_elem = GET_MODE_NUNITS (outermode);
5585 result_v = rtvec_alloc (num_elem);
5586 elems = &RTVEC_ELT (result_v, 0);
5587 outer_submode = GET_MODE_INNER (outermode);
5589 else
5591 num_elem = 1;
5592 elems = &result_s;
5593 outer_submode = outermode;
5596 outer_class = GET_MODE_CLASS (outer_submode);
5597 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5599 gcc_assert (elem_bitsize % value_bit == 0);
5600 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5602 for (elem = 0; elem < num_elem; elem++)
5604 unsigned char *vp;
5606 /* Vectors are stored in target memory order. (This is probably
5607 a mistake.) */
5609 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5610 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5611 / BITS_PER_UNIT);
5612 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5613 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5614 unsigned bytele = (subword_byte % UNITS_PER_WORD
5615 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5616 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5619 switch (outer_class)
5621 case MODE_INT:
5622 case MODE_PARTIAL_INT:
5624 int u;
5625 int base = 0;
5626 int units
5627 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5628 / HOST_BITS_PER_WIDE_INT;
5629 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5630 wide_int r;
5632 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5633 return NULL_RTX;
5634 for (u = 0; u < units; u++)
5636 unsigned HOST_WIDE_INT buf = 0;
5637 for (i = 0;
5638 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5639 i += value_bit)
5640 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5642 tmp[u] = buf;
5643 base += HOST_BITS_PER_WIDE_INT;
5645 r = wide_int::from_array (tmp, units,
5646 GET_MODE_PRECISION (outer_submode));
5647 #if TARGET_SUPPORTS_WIDE_INT == 0
5648 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5649 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5650 return NULL_RTX;
5651 #endif
5652 elems[elem] = immed_wide_int_const (r, outer_submode);
5654 break;
5656 case MODE_FLOAT:
5657 case MODE_DECIMAL_FLOAT:
5659 REAL_VALUE_TYPE r;
5660 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5662 /* real_from_target wants its input in words affected by
5663 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5664 and use WORDS_BIG_ENDIAN instead; see the documentation
5665 of SUBREG in rtl.texi. */
5666 for (i = 0; i < max_bitsize / 32; i++)
5667 tmp[i] = 0;
5668 for (i = 0; i < elem_bitsize; i += value_bit)
5670 int ibase;
5671 if (WORDS_BIG_ENDIAN)
5672 ibase = elem_bitsize - 1 - i;
5673 else
5674 ibase = i;
5675 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5678 real_from_target (&r, tmp, outer_submode);
5679 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5681 break;
5683 case MODE_FRACT:
5684 case MODE_UFRACT:
5685 case MODE_ACCUM:
5686 case MODE_UACCUM:
5688 FIXED_VALUE_TYPE f;
5689 f.data.low = 0;
5690 f.data.high = 0;
5691 f.mode = outer_submode;
5693 for (i = 0;
5694 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5695 i += value_bit)
5696 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5697 for (; i < elem_bitsize; i += value_bit)
5698 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5699 << (i - HOST_BITS_PER_WIDE_INT));
5701 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5703 break;
5705 default:
5706 gcc_unreachable ();
5709 if (VECTOR_MODE_P (outermode))
5710 return gen_rtx_CONST_VECTOR (outermode, result_v);
5711 else
5712 return result_s;
5715 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5716 Return 0 if no simplifications are possible. */
5718 simplify_subreg (machine_mode outermode, rtx op,
5719 machine_mode innermode, unsigned int byte)
5721 /* Little bit of sanity checking. */
5722 gcc_assert (innermode != VOIDmode);
5723 gcc_assert (outermode != VOIDmode);
5724 gcc_assert (innermode != BLKmode);
5725 gcc_assert (outermode != BLKmode);
5727 gcc_assert (GET_MODE (op) == innermode
5728 || GET_MODE (op) == VOIDmode);
5730 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5731 return NULL_RTX;
5733 if (byte >= GET_MODE_SIZE (innermode))
5734 return NULL_RTX;
5736 if (outermode == innermode && !byte)
5737 return op;
5739 if (CONST_SCALAR_INT_P (op)
5740 || CONST_DOUBLE_AS_FLOAT_P (op)
5741 || GET_CODE (op) == CONST_FIXED
5742 || GET_CODE (op) == CONST_VECTOR)
5743 return simplify_immed_subreg (outermode, op, innermode, byte);
5745 /* Changing mode twice with SUBREG => just change it once,
5746 or not at all if changing back op starting mode. */
5747 if (GET_CODE (op) == SUBREG)
5749 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5750 int final_offset = byte + SUBREG_BYTE (op);
5751 rtx newx;
5753 if (outermode == innermostmode
5754 && byte == 0 && SUBREG_BYTE (op) == 0)
5755 return SUBREG_REG (op);
5757 /* The SUBREG_BYTE represents offset, as if the value were stored
5758 in memory. Irritating exception is paradoxical subreg, where
5759 we define SUBREG_BYTE to be 0. On big endian machines, this
5760 value should be negative. For a moment, undo this exception. */
5761 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5763 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5764 if (WORDS_BIG_ENDIAN)
5765 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5766 if (BYTES_BIG_ENDIAN)
5767 final_offset += difference % UNITS_PER_WORD;
5769 if (SUBREG_BYTE (op) == 0
5770 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5772 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5773 if (WORDS_BIG_ENDIAN)
5774 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5775 if (BYTES_BIG_ENDIAN)
5776 final_offset += difference % UNITS_PER_WORD;
5779 /* See whether resulting subreg will be paradoxical. */
5780 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5782 /* In nonparadoxical subregs we can't handle negative offsets. */
5783 if (final_offset < 0)
5784 return NULL_RTX;
5785 /* Bail out in case resulting subreg would be incorrect. */
5786 if (final_offset % GET_MODE_SIZE (outermode)
5787 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5788 return NULL_RTX;
5790 else
5792 int offset = 0;
5793 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5795 /* In paradoxical subreg, see if we are still looking on lower part.
5796 If so, our SUBREG_BYTE will be 0. */
5797 if (WORDS_BIG_ENDIAN)
5798 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5799 if (BYTES_BIG_ENDIAN)
5800 offset += difference % UNITS_PER_WORD;
5801 if (offset == final_offset)
5802 final_offset = 0;
5803 else
5804 return NULL_RTX;
5807 /* Recurse for further possible simplifications. */
5808 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5809 final_offset);
5810 if (newx)
5811 return newx;
5812 if (validate_subreg (outermode, innermostmode,
5813 SUBREG_REG (op), final_offset))
5815 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5816 if (SUBREG_PROMOTED_VAR_P (op)
5817 && SUBREG_PROMOTED_SIGN (op) >= 0
5818 && GET_MODE_CLASS (outermode) == MODE_INT
5819 && IN_RANGE (GET_MODE_SIZE (outermode),
5820 GET_MODE_SIZE (innermode),
5821 GET_MODE_SIZE (innermostmode))
5822 && subreg_lowpart_p (newx))
5824 SUBREG_PROMOTED_VAR_P (newx) = 1;
5825 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5827 return newx;
5829 return NULL_RTX;
5832 /* SUBREG of a hard register => just change the register number
5833 and/or mode. If the hard register is not valid in that mode,
5834 suppress this simplification. If the hard register is the stack,
5835 frame, or argument pointer, leave this as a SUBREG. */
5837 if (REG_P (op) && HARD_REGISTER_P (op))
5839 unsigned int regno, final_regno;
5841 regno = REGNO (op);
5842 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5843 if (HARD_REGISTER_NUM_P (final_regno))
5845 rtx x;
5846 int final_offset = byte;
5848 /* Adjust offset for paradoxical subregs. */
5849 if (byte == 0
5850 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5852 int difference = (GET_MODE_SIZE (innermode)
5853 - GET_MODE_SIZE (outermode));
5854 if (WORDS_BIG_ENDIAN)
5855 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5856 if (BYTES_BIG_ENDIAN)
5857 final_offset += difference % UNITS_PER_WORD;
5860 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5862 /* Propagate original regno. We don't have any way to specify
5863 the offset inside original regno, so do so only for lowpart.
5864 The information is used only by alias analysis that can not
5865 grog partial register anyway. */
5867 if (subreg_lowpart_offset (outermode, innermode) == byte)
5868 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5869 return x;
5873 /* If we have a SUBREG of a register that we are replacing and we are
5874 replacing it with a MEM, make a new MEM and try replacing the
5875 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5876 or if we would be widening it. */
5878 if (MEM_P (op)
5879 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5880 /* Allow splitting of volatile memory references in case we don't
5881 have instruction to move the whole thing. */
5882 && (! MEM_VOLATILE_P (op)
5883 || ! have_insn_for (SET, innermode))
5884 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5885 return adjust_address_nv (op, outermode, byte);
5887 /* Handle complex values represented as CONCAT
5888 of real and imaginary part. */
5889 if (GET_CODE (op) == CONCAT)
5891 unsigned int part_size, final_offset;
5892 rtx part, res;
5894 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5895 if (byte < part_size)
5897 part = XEXP (op, 0);
5898 final_offset = byte;
5900 else
5902 part = XEXP (op, 1);
5903 final_offset = byte - part_size;
5906 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5907 return NULL_RTX;
5909 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5910 if (res)
5911 return res;
5912 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5913 return gen_rtx_SUBREG (outermode, part, final_offset);
5914 return NULL_RTX;
5917 /* A SUBREG resulting from a zero extension may fold to zero if
5918 it extracts higher bits that the ZERO_EXTEND's source bits. */
5919 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5921 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5922 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5923 return CONST0_RTX (outermode);
5926 if (SCALAR_INT_MODE_P (outermode)
5927 && SCALAR_INT_MODE_P (innermode)
5928 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5929 && byte == subreg_lowpart_offset (outermode, innermode))
5931 rtx tem = simplify_truncation (outermode, op, innermode);
5932 if (tem)
5933 return tem;
5936 return NULL_RTX;
5939 /* Make a SUBREG operation or equivalent if it folds. */
5942 simplify_gen_subreg (machine_mode outermode, rtx op,
5943 machine_mode innermode, unsigned int byte)
5945 rtx newx;
5947 newx = simplify_subreg (outermode, op, innermode, byte);
5948 if (newx)
5949 return newx;
5951 if (GET_CODE (op) == SUBREG
5952 || GET_CODE (op) == CONCAT
5953 || GET_MODE (op) == VOIDmode)
5954 return NULL_RTX;
5956 if (validate_subreg (outermode, innermode, op, byte))
5957 return gen_rtx_SUBREG (outermode, op, byte);
5959 return NULL_RTX;
5962 /* Simplify X, an rtx expression.
5964 Return the simplified expression or NULL if no simplifications
5965 were possible.
5967 This is the preferred entry point into the simplification routines;
5968 however, we still allow passes to call the more specific routines.
5970 Right now GCC has three (yes, three) major bodies of RTL simplification
5971 code that need to be unified.
5973 1. fold_rtx in cse.c. This code uses various CSE specific
5974 information to aid in RTL simplification.
5976 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5977 it uses combine specific information to aid in RTL
5978 simplification.
5980 3. The routines in this file.
5983 Long term we want to only have one body of simplification code; to
5984 get to that state I recommend the following steps:
5986 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5987 which are not pass dependent state into these routines.
5989 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5990 use this routine whenever possible.
5992 3. Allow for pass dependent state to be provided to these
5993 routines and add simplifications based on the pass dependent
5994 state. Remove code from cse.c & combine.c that becomes
5995 redundant/dead.
5997 It will take time, but ultimately the compiler will be easier to
5998 maintain and improve. It's totally silly that when we add a
5999 simplification that it needs to be added to 4 places (3 for RTL
6000 simplification and 1 for tree simplification. */
6003 simplify_rtx (const_rtx x)
6005 const enum rtx_code code = GET_CODE (x);
6006 const machine_mode mode = GET_MODE (x);
6008 switch (GET_RTX_CLASS (code))
6010 case RTX_UNARY:
6011 return simplify_unary_operation (code, mode,
6012 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6013 case RTX_COMM_ARITH:
6014 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6015 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6017 /* Fall through.... */
6019 case RTX_BIN_ARITH:
6020 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6022 case RTX_TERNARY:
6023 case RTX_BITFIELD_OPS:
6024 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6025 XEXP (x, 0), XEXP (x, 1),
6026 XEXP (x, 2));
6028 case RTX_COMPARE:
6029 case RTX_COMM_COMPARE:
6030 return simplify_relational_operation (code, mode,
6031 ((GET_MODE (XEXP (x, 0))
6032 != VOIDmode)
6033 ? GET_MODE (XEXP (x, 0))
6034 : GET_MODE (XEXP (x, 1))),
6035 XEXP (x, 0),
6036 XEXP (x, 1));
6038 case RTX_EXTRA:
6039 if (code == SUBREG)
6040 return simplify_subreg (mode, SUBREG_REG (x),
6041 GET_MODE (SUBREG_REG (x)),
6042 SUBREG_BYTE (x));
6043 break;
6045 case RTX_OBJ:
6046 if (code == LO_SUM)
6048 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6049 if (GET_CODE (XEXP (x, 0)) == HIGH
6050 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6051 return XEXP (x, 1);
6053 break;
6055 default:
6056 break;
6058 return NULL;