PR target/65871
[official-gcc.git] / gcc / simplify-rtx.c
blob665421b79a2583f47d5f59c0a3bb7dc1a2f4f8c6
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "hash-set.h"
27 #include "machmode.h"
28 #include "vec.h"
29 #include "double-int.h"
30 #include "input.h"
31 #include "alias.h"
32 #include "symtab.h"
33 #include "wide-int.h"
34 #include "inchash.h"
35 #include "tree.h"
36 #include "fold-const.h"
37 #include "varasm.h"
38 #include "tm_p.h"
39 #include "regs.h"
40 #include "hard-reg-set.h"
41 #include "flags.h"
42 #include "insn-config.h"
43 #include "recog.h"
44 #include "function.h"
45 #include "insn-codes.h"
46 #include "optabs.h"
47 #include "hashtab.h"
48 #include "statistics.h"
49 #include "real.h"
50 #include "fixed-value.h"
51 #include "expmed.h"
52 #include "dojump.h"
53 #include "explow.h"
54 #include "calls.h"
55 #include "emit-rtl.h"
56 #include "stmt.h"
57 #include "expr.h"
58 #include "diagnostic-core.h"
59 #include "ggc.h"
60 #include "target.h"
61 #include "predict.h"
63 /* Simplification and canonicalization of RTL. */
65 /* Much code operates on (low, high) pairs; the low value is an
66 unsigned wide int, the high value a signed wide int. We
67 occasionally need to sign extend from low to high as if low were a
68 signed wide int. */
69 #define HWI_SIGN_EXTEND(low) \
70 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
72 static rtx neg_const_int (machine_mode, const_rtx);
73 static bool plus_minus_operand_p (const_rtx);
74 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
75 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
76 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
77 unsigned int);
78 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
79 rtx, rtx);
80 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
81 machine_mode, rtx, rtx);
82 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
83 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
84 rtx, rtx, rtx, rtx);
86 /* Negate a CONST_INT rtx, truncating (because a conversion from a
87 maximally negative number can overflow). */
88 static rtx
89 neg_const_int (machine_mode mode, const_rtx i)
91 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
94 /* Test whether expression, X, is an immediate constant that represents
95 the most significant bit of machine mode MODE. */
97 bool
98 mode_signbit_p (machine_mode mode, const_rtx x)
100 unsigned HOST_WIDE_INT val;
101 unsigned int width;
103 if (GET_MODE_CLASS (mode) != MODE_INT)
104 return false;
106 width = GET_MODE_PRECISION (mode);
107 if (width == 0)
108 return false;
110 if (width <= HOST_BITS_PER_WIDE_INT
111 && CONST_INT_P (x))
112 val = INTVAL (x);
113 #if TARGET_SUPPORTS_WIDE_INT
114 else if (CONST_WIDE_INT_P (x))
116 unsigned int i;
117 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
118 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
119 return false;
120 for (i = 0; i < elts - 1; i++)
121 if (CONST_WIDE_INT_ELT (x, i) != 0)
122 return false;
123 val = CONST_WIDE_INT_ELT (x, elts - 1);
124 width %= HOST_BITS_PER_WIDE_INT;
125 if (width == 0)
126 width = HOST_BITS_PER_WIDE_INT;
128 #else
129 else if (width <= HOST_BITS_PER_DOUBLE_INT
130 && CONST_DOUBLE_AS_INT_P (x)
131 && CONST_DOUBLE_LOW (x) == 0)
133 val = CONST_DOUBLE_HIGH (x);
134 width -= HOST_BITS_PER_WIDE_INT;
136 #endif
137 else
138 /* X is not an integer constant. */
139 return false;
141 if (width < HOST_BITS_PER_WIDE_INT)
142 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
143 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
146 /* Test whether VAL is equal to the most significant bit of mode MODE
147 (after masking with the mode mask of MODE). Returns false if the
148 precision of MODE is too large to handle. */
150 bool
151 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
153 unsigned int width;
155 if (GET_MODE_CLASS (mode) != MODE_INT)
156 return false;
158 width = GET_MODE_PRECISION (mode);
159 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
160 return false;
162 val &= GET_MODE_MASK (mode);
163 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
166 /* Test whether the most significant bit of mode MODE is set in VAL.
167 Returns false if the precision of MODE is too large to handle. */
168 bool
169 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
171 unsigned int width;
173 if (GET_MODE_CLASS (mode) != MODE_INT)
174 return false;
176 width = GET_MODE_PRECISION (mode);
177 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
178 return false;
180 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
181 return val != 0;
184 /* Test whether the most significant bit of mode MODE is clear in VAL.
185 Returns false if the precision of MODE is too large to handle. */
186 bool
187 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
189 unsigned int width;
191 if (GET_MODE_CLASS (mode) != MODE_INT)
192 return false;
194 width = GET_MODE_PRECISION (mode);
195 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
196 return false;
198 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
199 return val == 0;
202 /* Make a binary operation by properly ordering the operands and
203 seeing if the expression folds. */
206 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
207 rtx op1)
209 rtx tem;
211 /* If this simplifies, do it. */
212 tem = simplify_binary_operation (code, mode, op0, op1);
213 if (tem)
214 return tem;
216 /* Put complex operands first and constants second if commutative. */
217 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
218 && swap_commutative_operands_p (op0, op1))
219 std::swap (op0, op1);
221 return gen_rtx_fmt_ee (code, mode, op0, op1);
224 /* If X is a MEM referencing the constant pool, return the real value.
225 Otherwise return X. */
227 avoid_constant_pool_reference (rtx x)
229 rtx c, tmp, addr;
230 machine_mode cmode;
231 HOST_WIDE_INT offset = 0;
233 switch (GET_CODE (x))
235 case MEM:
236 break;
238 case FLOAT_EXTEND:
239 /* Handle float extensions of constant pool references. */
240 tmp = XEXP (x, 0);
241 c = avoid_constant_pool_reference (tmp);
242 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
244 REAL_VALUE_TYPE d;
246 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
247 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
249 return x;
251 default:
252 return x;
255 if (GET_MODE (x) == BLKmode)
256 return x;
258 addr = XEXP (x, 0);
260 /* Call target hook to avoid the effects of -fpic etc.... */
261 addr = targetm.delegitimize_address (addr);
263 /* Split the address into a base and integer offset. */
264 if (GET_CODE (addr) == CONST
265 && GET_CODE (XEXP (addr, 0)) == PLUS
266 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
268 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
269 addr = XEXP (XEXP (addr, 0), 0);
272 if (GET_CODE (addr) == LO_SUM)
273 addr = XEXP (addr, 1);
275 /* If this is a constant pool reference, we can turn it into its
276 constant and hope that simplifications happen. */
277 if (GET_CODE (addr) == SYMBOL_REF
278 && CONSTANT_POOL_ADDRESS_P (addr))
280 c = get_pool_constant (addr);
281 cmode = get_pool_mode (addr);
283 /* If we're accessing the constant in a different mode than it was
284 originally stored, attempt to fix that up via subreg simplifications.
285 If that fails we have no choice but to return the original memory. */
286 if ((offset != 0 || cmode != GET_MODE (x))
287 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
289 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
290 if (tem && CONSTANT_P (tem))
291 return tem;
293 else
294 return c;
297 return x;
300 /* Simplify a MEM based on its attributes. This is the default
301 delegitimize_address target hook, and it's recommended that every
302 overrider call it. */
305 delegitimize_mem_from_attrs (rtx x)
307 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
308 use their base addresses as equivalent. */
309 if (MEM_P (x)
310 && MEM_EXPR (x)
311 && MEM_OFFSET_KNOWN_P (x))
313 tree decl = MEM_EXPR (x);
314 machine_mode mode = GET_MODE (x);
315 HOST_WIDE_INT offset = 0;
317 switch (TREE_CODE (decl))
319 default:
320 decl = NULL;
321 break;
323 case VAR_DECL:
324 break;
326 case ARRAY_REF:
327 case ARRAY_RANGE_REF:
328 case COMPONENT_REF:
329 case BIT_FIELD_REF:
330 case REALPART_EXPR:
331 case IMAGPART_EXPR:
332 case VIEW_CONVERT_EXPR:
334 HOST_WIDE_INT bitsize, bitpos;
335 tree toffset;
336 int unsignedp, volatilep = 0;
338 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
339 &mode, &unsignedp, &volatilep, false);
340 if (bitsize != GET_MODE_BITSIZE (mode)
341 || (bitpos % BITS_PER_UNIT)
342 || (toffset && !tree_fits_shwi_p (toffset)))
343 decl = NULL;
344 else
346 offset += bitpos / BITS_PER_UNIT;
347 if (toffset)
348 offset += tree_to_shwi (toffset);
350 break;
354 if (decl
355 && mode == GET_MODE (x)
356 && TREE_CODE (decl) == VAR_DECL
357 && (TREE_STATIC (decl)
358 || DECL_THREAD_LOCAL_P (decl))
359 && DECL_RTL_SET_P (decl)
360 && MEM_P (DECL_RTL (decl)))
362 rtx newx;
364 offset += MEM_OFFSET (x);
366 newx = DECL_RTL (decl);
368 if (MEM_P (newx))
370 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
372 /* Avoid creating a new MEM needlessly if we already had
373 the same address. We do if there's no OFFSET and the
374 old address X is identical to NEWX, or if X is of the
375 form (plus NEWX OFFSET), or the NEWX is of the form
376 (plus Y (const_int Z)) and X is that with the offset
377 added: (plus Y (const_int Z+OFFSET)). */
378 if (!((offset == 0
379 || (GET_CODE (o) == PLUS
380 && GET_CODE (XEXP (o, 1)) == CONST_INT
381 && (offset == INTVAL (XEXP (o, 1))
382 || (GET_CODE (n) == PLUS
383 && GET_CODE (XEXP (n, 1)) == CONST_INT
384 && (INTVAL (XEXP (n, 1)) + offset
385 == INTVAL (XEXP (o, 1)))
386 && (n = XEXP (n, 0))))
387 && (o = XEXP (o, 0))))
388 && rtx_equal_p (o, n)))
389 x = adjust_address_nv (newx, mode, offset);
391 else if (GET_MODE (x) == GET_MODE (newx)
392 && offset == 0)
393 x = newx;
397 return x;
400 /* Make a unary operation by first seeing if it folds and otherwise making
401 the specified operation. */
404 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
405 machine_mode op_mode)
407 rtx tem;
409 /* If this simplifies, use it. */
410 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
411 return tem;
413 return gen_rtx_fmt_e (code, mode, op);
416 /* Likewise for ternary operations. */
419 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
420 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
422 rtx tem;
424 /* If this simplifies, use it. */
425 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
426 op0, op1, op2)))
427 return tem;
429 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
432 /* Likewise, for relational operations.
433 CMP_MODE specifies mode comparison is done in. */
436 simplify_gen_relational (enum rtx_code code, machine_mode mode,
437 machine_mode cmp_mode, rtx op0, rtx op1)
439 rtx tem;
441 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
442 op0, op1)))
443 return tem;
445 return gen_rtx_fmt_ee (code, mode, op0, op1);
448 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
449 and simplify the result. If FN is non-NULL, call this callback on each
450 X, if it returns non-NULL, replace X with its return value and simplify the
451 result. */
454 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
455 rtx (*fn) (rtx, const_rtx, void *), void *data)
457 enum rtx_code code = GET_CODE (x);
458 machine_mode mode = GET_MODE (x);
459 machine_mode op_mode;
460 const char *fmt;
461 rtx op0, op1, op2, newx, op;
462 rtvec vec, newvec;
463 int i, j;
465 if (__builtin_expect (fn != NULL, 0))
467 newx = fn (x, old_rtx, data);
468 if (newx)
469 return newx;
471 else if (rtx_equal_p (x, old_rtx))
472 return copy_rtx ((rtx) data);
474 switch (GET_RTX_CLASS (code))
476 case RTX_UNARY:
477 op0 = XEXP (x, 0);
478 op_mode = GET_MODE (op0);
479 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
480 if (op0 == XEXP (x, 0))
481 return x;
482 return simplify_gen_unary (code, mode, op0, op_mode);
484 case RTX_BIN_ARITH:
485 case RTX_COMM_ARITH:
486 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
487 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
488 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
489 return x;
490 return simplify_gen_binary (code, mode, op0, op1);
492 case RTX_COMPARE:
493 case RTX_COMM_COMPARE:
494 op0 = XEXP (x, 0);
495 op1 = XEXP (x, 1);
496 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
497 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
498 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
499 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
500 return x;
501 return simplify_gen_relational (code, mode, op_mode, op0, op1);
503 case RTX_TERNARY:
504 case RTX_BITFIELD_OPS:
505 op0 = XEXP (x, 0);
506 op_mode = GET_MODE (op0);
507 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
508 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
509 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
510 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
511 return x;
512 if (op_mode == VOIDmode)
513 op_mode = GET_MODE (op0);
514 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
516 case RTX_EXTRA:
517 if (code == SUBREG)
519 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
520 if (op0 == SUBREG_REG (x))
521 return x;
522 op0 = simplify_gen_subreg (GET_MODE (x), op0,
523 GET_MODE (SUBREG_REG (x)),
524 SUBREG_BYTE (x));
525 return op0 ? op0 : x;
527 break;
529 case RTX_OBJ:
530 if (code == MEM)
532 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
533 if (op0 == XEXP (x, 0))
534 return x;
535 return replace_equiv_address_nv (x, op0);
537 else if (code == LO_SUM)
539 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
540 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
542 /* (lo_sum (high x) y) -> y where x and y have the same base. */
543 if (GET_CODE (op0) == HIGH)
545 rtx base0, base1, offset0, offset1;
546 split_const (XEXP (op0, 0), &base0, &offset0);
547 split_const (op1, &base1, &offset1);
548 if (rtx_equal_p (base0, base1))
549 return op1;
552 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
553 return x;
554 return gen_rtx_LO_SUM (mode, op0, op1);
556 break;
558 default:
559 break;
562 newx = x;
563 fmt = GET_RTX_FORMAT (code);
564 for (i = 0; fmt[i]; i++)
565 switch (fmt[i])
567 case 'E':
568 vec = XVEC (x, i);
569 newvec = XVEC (newx, i);
570 for (j = 0; j < GET_NUM_ELEM (vec); j++)
572 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
573 old_rtx, fn, data);
574 if (op != RTVEC_ELT (vec, j))
576 if (newvec == vec)
578 newvec = shallow_copy_rtvec (vec);
579 if (x == newx)
580 newx = shallow_copy_rtx (x);
581 XVEC (newx, i) = newvec;
583 RTVEC_ELT (newvec, j) = op;
586 break;
588 case 'e':
589 if (XEXP (x, i))
591 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
592 if (op != XEXP (x, i))
594 if (x == newx)
595 newx = shallow_copy_rtx (x);
596 XEXP (newx, i) = op;
599 break;
601 return newx;
604 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
605 resulting RTX. Return a new RTX which is as simplified as possible. */
608 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
610 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
613 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
614 Only handle cases where the truncated value is inherently an rvalue.
616 RTL provides two ways of truncating a value:
618 1. a lowpart subreg. This form is only a truncation when both
619 the outer and inner modes (here MODE and OP_MODE respectively)
620 are scalar integers, and only then when the subreg is used as
621 an rvalue.
623 It is only valid to form such truncating subregs if the
624 truncation requires no action by the target. The onus for
625 proving this is on the creator of the subreg -- e.g. the
626 caller to simplify_subreg or simplify_gen_subreg -- and typically
627 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
629 2. a TRUNCATE. This form handles both scalar and compound integers.
631 The first form is preferred where valid. However, the TRUNCATE
632 handling in simplify_unary_operation turns the second form into the
633 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
634 so it is generally safe to form rvalue truncations using:
636 simplify_gen_unary (TRUNCATE, ...)
638 and leave simplify_unary_operation to work out which representation
639 should be used.
641 Because of the proof requirements on (1), simplify_truncation must
642 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
643 regardless of whether the outer truncation came from a SUBREG or a
644 TRUNCATE. For example, if the caller has proven that an SImode
645 truncation of:
647 (and:DI X Y)
649 is a no-op and can be represented as a subreg, it does not follow
650 that SImode truncations of X and Y are also no-ops. On a target
651 like 64-bit MIPS that requires SImode values to be stored in
652 sign-extended form, an SImode truncation of:
654 (and:DI (reg:DI X) (const_int 63))
656 is trivially a no-op because only the lower 6 bits can be set.
657 However, X is still an arbitrary 64-bit number and so we cannot
658 assume that truncating it too is a no-op. */
660 static rtx
661 simplify_truncation (machine_mode mode, rtx op,
662 machine_mode op_mode)
664 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
665 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
666 gcc_assert (precision <= op_precision);
668 /* Optimize truncations of zero and sign extended values. */
669 if (GET_CODE (op) == ZERO_EXTEND
670 || GET_CODE (op) == SIGN_EXTEND)
672 /* There are three possibilities. If MODE is the same as the
673 origmode, we can omit both the extension and the subreg.
674 If MODE is not larger than the origmode, we can apply the
675 truncation without the extension. Finally, if the outermode
676 is larger than the origmode, we can just extend to the appropriate
677 mode. */
678 machine_mode origmode = GET_MODE (XEXP (op, 0));
679 if (mode == origmode)
680 return XEXP (op, 0);
681 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
682 return simplify_gen_unary (TRUNCATE, mode,
683 XEXP (op, 0), origmode);
684 else
685 return simplify_gen_unary (GET_CODE (op), mode,
686 XEXP (op, 0), origmode);
689 /* If the machine can perform operations in the truncated mode, distribute
690 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
691 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
692 if (1
693 #ifdef WORD_REGISTER_OPERATIONS
694 && precision >= BITS_PER_WORD
695 #endif
696 && (GET_CODE (op) == PLUS
697 || GET_CODE (op) == MINUS
698 || GET_CODE (op) == MULT))
700 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
701 if (op0)
703 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
704 if (op1)
705 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
709 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
710 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
711 the outer subreg is effectively a truncation to the original mode. */
712 if ((GET_CODE (op) == LSHIFTRT
713 || GET_CODE (op) == ASHIFTRT)
714 /* Ensure that OP_MODE is at least twice as wide as MODE
715 to avoid the possibility that an outer LSHIFTRT shifts by more
716 than the sign extension's sign_bit_copies and introduces zeros
717 into the high bits of the result. */
718 && 2 * precision <= op_precision
719 && CONST_INT_P (XEXP (op, 1))
720 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
721 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
722 && UINTVAL (XEXP (op, 1)) < precision)
723 return simplify_gen_binary (ASHIFTRT, mode,
724 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
726 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
727 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
728 the outer subreg is effectively a truncation to the original mode. */
729 if ((GET_CODE (op) == LSHIFTRT
730 || GET_CODE (op) == ASHIFTRT)
731 && CONST_INT_P (XEXP (op, 1))
732 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
733 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
734 && UINTVAL (XEXP (op, 1)) < precision)
735 return simplify_gen_binary (LSHIFTRT, mode,
736 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
738 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
739 to (ashift:QI (x:QI) C), where C is a suitable small constant and
740 the outer subreg is effectively a truncation to the original mode. */
741 if (GET_CODE (op) == ASHIFT
742 && CONST_INT_P (XEXP (op, 1))
743 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
744 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
745 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
746 && UINTVAL (XEXP (op, 1)) < precision)
747 return simplify_gen_binary (ASHIFT, mode,
748 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
750 /* Recognize a word extraction from a multi-word subreg. */
751 if ((GET_CODE (op) == LSHIFTRT
752 || GET_CODE (op) == ASHIFTRT)
753 && SCALAR_INT_MODE_P (mode)
754 && SCALAR_INT_MODE_P (op_mode)
755 && precision >= BITS_PER_WORD
756 && 2 * precision <= op_precision
757 && CONST_INT_P (XEXP (op, 1))
758 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
759 && UINTVAL (XEXP (op, 1)) < op_precision)
761 int byte = subreg_lowpart_offset (mode, op_mode);
762 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
763 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
764 (WORDS_BIG_ENDIAN
765 ? byte - shifted_bytes
766 : byte + shifted_bytes));
769 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
770 and try replacing the TRUNCATE and shift with it. Don't do this
771 if the MEM has a mode-dependent address. */
772 if ((GET_CODE (op) == LSHIFTRT
773 || GET_CODE (op) == ASHIFTRT)
774 && SCALAR_INT_MODE_P (op_mode)
775 && MEM_P (XEXP (op, 0))
776 && CONST_INT_P (XEXP (op, 1))
777 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
778 && INTVAL (XEXP (op, 1)) > 0
779 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
780 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
781 MEM_ADDR_SPACE (XEXP (op, 0)))
782 && ! MEM_VOLATILE_P (XEXP (op, 0))
783 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
784 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
786 int byte = subreg_lowpart_offset (mode, op_mode);
787 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
788 return adjust_address_nv (XEXP (op, 0), mode,
789 (WORDS_BIG_ENDIAN
790 ? byte - shifted_bytes
791 : byte + shifted_bytes));
794 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
795 (OP:SI foo:SI) if OP is NEG or ABS. */
796 if ((GET_CODE (op) == ABS
797 || GET_CODE (op) == NEG)
798 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
799 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
800 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
801 return simplify_gen_unary (GET_CODE (op), mode,
802 XEXP (XEXP (op, 0), 0), mode);
804 /* (truncate:A (subreg:B (truncate:C X) 0)) is
805 (truncate:A X). */
806 if (GET_CODE (op) == SUBREG
807 && SCALAR_INT_MODE_P (mode)
808 && SCALAR_INT_MODE_P (op_mode)
809 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
810 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
811 && subreg_lowpart_p (op))
813 rtx inner = XEXP (SUBREG_REG (op), 0);
814 if (GET_MODE_PRECISION (mode)
815 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
816 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
817 else
818 /* If subreg above is paradoxical and C is narrower
819 than A, return (subreg:A (truncate:C X) 0). */
820 return simplify_gen_subreg (mode, SUBREG_REG (op),
821 GET_MODE (SUBREG_REG (op)), 0);
824 /* (truncate:A (truncate:B X)) is (truncate:A X). */
825 if (GET_CODE (op) == TRUNCATE)
826 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
827 GET_MODE (XEXP (op, 0)));
829 return NULL_RTX;
832 /* Try to simplify a unary operation CODE whose output mode is to be
833 MODE with input operand OP whose mode was originally OP_MODE.
834 Return zero if no simplification can be made. */
836 simplify_unary_operation (enum rtx_code code, machine_mode mode,
837 rtx op, machine_mode op_mode)
839 rtx trueop, tem;
841 trueop = avoid_constant_pool_reference (op);
843 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
844 if (tem)
845 return tem;
847 return simplify_unary_operation_1 (code, mode, op);
850 /* Perform some simplifications we can do even if the operands
851 aren't constant. */
852 static rtx
853 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
855 enum rtx_code reversed;
856 rtx temp;
858 switch (code)
860 case NOT:
861 /* (not (not X)) == X. */
862 if (GET_CODE (op) == NOT)
863 return XEXP (op, 0);
865 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
866 comparison is all ones. */
867 if (COMPARISON_P (op)
868 && (mode == BImode || STORE_FLAG_VALUE == -1)
869 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
870 return simplify_gen_relational (reversed, mode, VOIDmode,
871 XEXP (op, 0), XEXP (op, 1));
873 /* (not (plus X -1)) can become (neg X). */
874 if (GET_CODE (op) == PLUS
875 && XEXP (op, 1) == constm1_rtx)
876 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
878 /* Similarly, (not (neg X)) is (plus X -1). */
879 if (GET_CODE (op) == NEG)
880 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
881 CONSTM1_RTX (mode));
883 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
884 if (GET_CODE (op) == XOR
885 && CONST_INT_P (XEXP (op, 1))
886 && (temp = simplify_unary_operation (NOT, mode,
887 XEXP (op, 1), mode)) != 0)
888 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
890 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
891 if (GET_CODE (op) == PLUS
892 && CONST_INT_P (XEXP (op, 1))
893 && mode_signbit_p (mode, XEXP (op, 1))
894 && (temp = simplify_unary_operation (NOT, mode,
895 XEXP (op, 1), mode)) != 0)
896 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
899 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
900 operands other than 1, but that is not valid. We could do a
901 similar simplification for (not (lshiftrt C X)) where C is
902 just the sign bit, but this doesn't seem common enough to
903 bother with. */
904 if (GET_CODE (op) == ASHIFT
905 && XEXP (op, 0) == const1_rtx)
907 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
908 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
911 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
912 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
913 so we can perform the above simplification. */
914 if (STORE_FLAG_VALUE == -1
915 && GET_CODE (op) == ASHIFTRT
916 && CONST_INT_P (XEXP (op, 1))
917 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
918 return simplify_gen_relational (GE, mode, VOIDmode,
919 XEXP (op, 0), const0_rtx);
922 if (GET_CODE (op) == SUBREG
923 && subreg_lowpart_p (op)
924 && (GET_MODE_SIZE (GET_MODE (op))
925 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
926 && GET_CODE (SUBREG_REG (op)) == ASHIFT
927 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
929 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
930 rtx x;
932 x = gen_rtx_ROTATE (inner_mode,
933 simplify_gen_unary (NOT, inner_mode, const1_rtx,
934 inner_mode),
935 XEXP (SUBREG_REG (op), 1));
936 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
937 if (temp)
938 return temp;
941 /* Apply De Morgan's laws to reduce number of patterns for machines
942 with negating logical insns (and-not, nand, etc.). If result has
943 only one NOT, put it first, since that is how the patterns are
944 coded. */
945 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
947 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
948 machine_mode op_mode;
950 op_mode = GET_MODE (in1);
951 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
953 op_mode = GET_MODE (in2);
954 if (op_mode == VOIDmode)
955 op_mode = mode;
956 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
958 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
960 rtx tem = in2;
961 in2 = in1; in1 = tem;
964 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
965 mode, in1, in2);
968 /* (not (bswap x)) -> (bswap (not x)). */
969 if (GET_CODE (op) == BSWAP)
971 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
972 return simplify_gen_unary (BSWAP, mode, x, mode);
974 break;
976 case NEG:
977 /* (neg (neg X)) == X. */
978 if (GET_CODE (op) == NEG)
979 return XEXP (op, 0);
981 /* (neg (plus X 1)) can become (not X). */
982 if (GET_CODE (op) == PLUS
983 && XEXP (op, 1) == const1_rtx)
984 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
986 /* Similarly, (neg (not X)) is (plus X 1). */
987 if (GET_CODE (op) == NOT)
988 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
989 CONST1_RTX (mode));
991 /* (neg (minus X Y)) can become (minus Y X). This transformation
992 isn't safe for modes with signed zeros, since if X and Y are
993 both +0, (minus Y X) is the same as (minus X Y). If the
994 rounding mode is towards +infinity (or -infinity) then the two
995 expressions will be rounded differently. */
996 if (GET_CODE (op) == MINUS
997 && !HONOR_SIGNED_ZEROS (mode)
998 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
999 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1001 if (GET_CODE (op) == PLUS
1002 && !HONOR_SIGNED_ZEROS (mode)
1003 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1005 /* (neg (plus A C)) is simplified to (minus -C A). */
1006 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1007 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1009 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1010 if (temp)
1011 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1014 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1015 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1016 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1019 /* (neg (mult A B)) becomes (mult A (neg B)).
1020 This works even for floating-point values. */
1021 if (GET_CODE (op) == MULT
1022 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1024 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1025 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1028 /* NEG commutes with ASHIFT since it is multiplication. Only do
1029 this if we can then eliminate the NEG (e.g., if the operand
1030 is a constant). */
1031 if (GET_CODE (op) == ASHIFT)
1033 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1034 if (temp)
1035 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1038 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1039 C is equal to the width of MODE minus 1. */
1040 if (GET_CODE (op) == ASHIFTRT
1041 && CONST_INT_P (XEXP (op, 1))
1042 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1043 return simplify_gen_binary (LSHIFTRT, mode,
1044 XEXP (op, 0), XEXP (op, 1));
1046 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1047 C is equal to the width of MODE minus 1. */
1048 if (GET_CODE (op) == LSHIFTRT
1049 && CONST_INT_P (XEXP (op, 1))
1050 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1051 return simplify_gen_binary (ASHIFTRT, mode,
1052 XEXP (op, 0), XEXP (op, 1));
1054 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1055 if (GET_CODE (op) == XOR
1056 && XEXP (op, 1) == const1_rtx
1057 && nonzero_bits (XEXP (op, 0), mode) == 1)
1058 return plus_constant (mode, XEXP (op, 0), -1);
1060 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1061 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1062 if (GET_CODE (op) == LT
1063 && XEXP (op, 1) == const0_rtx
1064 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1066 machine_mode inner = GET_MODE (XEXP (op, 0));
1067 int isize = GET_MODE_PRECISION (inner);
1068 if (STORE_FLAG_VALUE == 1)
1070 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1071 GEN_INT (isize - 1));
1072 if (mode == inner)
1073 return temp;
1074 if (GET_MODE_PRECISION (mode) > isize)
1075 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1076 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1078 else if (STORE_FLAG_VALUE == -1)
1080 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1081 GEN_INT (isize - 1));
1082 if (mode == inner)
1083 return temp;
1084 if (GET_MODE_PRECISION (mode) > isize)
1085 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1086 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1089 break;
1091 case TRUNCATE:
1092 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1093 with the umulXi3_highpart patterns. */
1094 if (GET_CODE (op) == LSHIFTRT
1095 && GET_CODE (XEXP (op, 0)) == MULT)
1096 break;
1098 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1100 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1102 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1103 if (temp)
1104 return temp;
1106 /* We can't handle truncation to a partial integer mode here
1107 because we don't know the real bitsize of the partial
1108 integer mode. */
1109 break;
1112 if (GET_MODE (op) != VOIDmode)
1114 temp = simplify_truncation (mode, op, GET_MODE (op));
1115 if (temp)
1116 return temp;
1119 /* If we know that the value is already truncated, we can
1120 replace the TRUNCATE with a SUBREG. */
1121 if (GET_MODE_NUNITS (mode) == 1
1122 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1123 || truncated_to_mode (mode, op)))
1125 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1126 if (temp)
1127 return temp;
1130 /* A truncate of a comparison can be replaced with a subreg if
1131 STORE_FLAG_VALUE permits. This is like the previous test,
1132 but it works even if the comparison is done in a mode larger
1133 than HOST_BITS_PER_WIDE_INT. */
1134 if (HWI_COMPUTABLE_MODE_P (mode)
1135 && COMPARISON_P (op)
1136 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1138 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1139 if (temp)
1140 return temp;
1143 /* A truncate of a memory is just loading the low part of the memory
1144 if we are not changing the meaning of the address. */
1145 if (GET_CODE (op) == MEM
1146 && !VECTOR_MODE_P (mode)
1147 && !MEM_VOLATILE_P (op)
1148 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1150 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1151 if (temp)
1152 return temp;
1155 break;
1157 case FLOAT_TRUNCATE:
1158 if (DECIMAL_FLOAT_MODE_P (mode))
1159 break;
1161 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1162 if (GET_CODE (op) == FLOAT_EXTEND
1163 && GET_MODE (XEXP (op, 0)) == mode)
1164 return XEXP (op, 0);
1166 /* (float_truncate:SF (float_truncate:DF foo:XF))
1167 = (float_truncate:SF foo:XF).
1168 This may eliminate double rounding, so it is unsafe.
1170 (float_truncate:SF (float_extend:XF foo:DF))
1171 = (float_truncate:SF foo:DF).
1173 (float_truncate:DF (float_extend:XF foo:SF))
1174 = (float_extend:DF foo:SF). */
1175 if ((GET_CODE (op) == FLOAT_TRUNCATE
1176 && flag_unsafe_math_optimizations)
1177 || GET_CODE (op) == FLOAT_EXTEND)
1178 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1179 0)))
1180 > GET_MODE_SIZE (mode)
1181 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1182 mode,
1183 XEXP (op, 0), mode);
1185 /* (float_truncate (float x)) is (float x) */
1186 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1187 && (flag_unsafe_math_optimizations
1188 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1189 && ((unsigned)significand_size (GET_MODE (op))
1190 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1191 - num_sign_bit_copies (XEXP (op, 0),
1192 GET_MODE (XEXP (op, 0))))))))
1193 return simplify_gen_unary (GET_CODE (op), mode,
1194 XEXP (op, 0),
1195 GET_MODE (XEXP (op, 0)));
1197 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1198 (OP:SF foo:SF) if OP is NEG or ABS. */
1199 if ((GET_CODE (op) == ABS
1200 || GET_CODE (op) == NEG)
1201 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1202 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1203 return simplify_gen_unary (GET_CODE (op), mode,
1204 XEXP (XEXP (op, 0), 0), mode);
1206 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1207 is (float_truncate:SF x). */
1208 if (GET_CODE (op) == SUBREG
1209 && subreg_lowpart_p (op)
1210 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1211 return SUBREG_REG (op);
1212 break;
1214 case FLOAT_EXTEND:
1215 if (DECIMAL_FLOAT_MODE_P (mode))
1216 break;
1218 /* (float_extend (float_extend x)) is (float_extend x)
1220 (float_extend (float x)) is (float x) assuming that double
1221 rounding can't happen.
1223 if (GET_CODE (op) == FLOAT_EXTEND
1224 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1225 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1226 && ((unsigned)significand_size (GET_MODE (op))
1227 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1228 - num_sign_bit_copies (XEXP (op, 0),
1229 GET_MODE (XEXP (op, 0)))))))
1230 return simplify_gen_unary (GET_CODE (op), mode,
1231 XEXP (op, 0),
1232 GET_MODE (XEXP (op, 0)));
1234 break;
1236 case ABS:
1237 /* (abs (neg <foo>)) -> (abs <foo>) */
1238 if (GET_CODE (op) == NEG)
1239 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1240 GET_MODE (XEXP (op, 0)));
1242 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1243 do nothing. */
1244 if (GET_MODE (op) == VOIDmode)
1245 break;
1247 /* If operand is something known to be positive, ignore the ABS. */
1248 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1249 || val_signbit_known_clear_p (GET_MODE (op),
1250 nonzero_bits (op, GET_MODE (op))))
1251 return op;
1253 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1254 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1255 return gen_rtx_NEG (mode, op);
1257 break;
1259 case FFS:
1260 /* (ffs (*_extend <X>)) = (ffs <X>) */
1261 if (GET_CODE (op) == SIGN_EXTEND
1262 || GET_CODE (op) == ZERO_EXTEND)
1263 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1264 GET_MODE (XEXP (op, 0)));
1265 break;
1267 case POPCOUNT:
1268 switch (GET_CODE (op))
1270 case BSWAP:
1271 case ZERO_EXTEND:
1272 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1273 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1274 GET_MODE (XEXP (op, 0)));
1276 case ROTATE:
1277 case ROTATERT:
1278 /* Rotations don't affect popcount. */
1279 if (!side_effects_p (XEXP (op, 1)))
1280 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1281 GET_MODE (XEXP (op, 0)));
1282 break;
1284 default:
1285 break;
1287 break;
1289 case PARITY:
1290 switch (GET_CODE (op))
1292 case NOT:
1293 case BSWAP:
1294 case ZERO_EXTEND:
1295 case SIGN_EXTEND:
1296 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1297 GET_MODE (XEXP (op, 0)));
1299 case ROTATE:
1300 case ROTATERT:
1301 /* Rotations don't affect parity. */
1302 if (!side_effects_p (XEXP (op, 1)))
1303 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1304 GET_MODE (XEXP (op, 0)));
1305 break;
1307 default:
1308 break;
1310 break;
1312 case BSWAP:
1313 /* (bswap (bswap x)) -> x. */
1314 if (GET_CODE (op) == BSWAP)
1315 return XEXP (op, 0);
1316 break;
1318 case FLOAT:
1319 /* (float (sign_extend <X>)) = (float <X>). */
1320 if (GET_CODE (op) == SIGN_EXTEND)
1321 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1322 GET_MODE (XEXP (op, 0)));
1323 break;
1325 case SIGN_EXTEND:
1326 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1327 becomes just the MINUS if its mode is MODE. This allows
1328 folding switch statements on machines using casesi (such as
1329 the VAX). */
1330 if (GET_CODE (op) == TRUNCATE
1331 && GET_MODE (XEXP (op, 0)) == mode
1332 && GET_CODE (XEXP (op, 0)) == MINUS
1333 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1334 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1335 return XEXP (op, 0);
1337 /* Extending a widening multiplication should be canonicalized to
1338 a wider widening multiplication. */
1339 if (GET_CODE (op) == MULT)
1341 rtx lhs = XEXP (op, 0);
1342 rtx rhs = XEXP (op, 1);
1343 enum rtx_code lcode = GET_CODE (lhs);
1344 enum rtx_code rcode = GET_CODE (rhs);
1346 /* Widening multiplies usually extend both operands, but sometimes
1347 they use a shift to extract a portion of a register. */
1348 if ((lcode == SIGN_EXTEND
1349 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1350 && (rcode == SIGN_EXTEND
1351 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1353 machine_mode lmode = GET_MODE (lhs);
1354 machine_mode rmode = GET_MODE (rhs);
1355 int bits;
1357 if (lcode == ASHIFTRT)
1358 /* Number of bits not shifted off the end. */
1359 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1360 else /* lcode == SIGN_EXTEND */
1361 /* Size of inner mode. */
1362 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1364 if (rcode == ASHIFTRT)
1365 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1366 else /* rcode == SIGN_EXTEND */
1367 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1369 /* We can only widen multiplies if the result is mathematiclly
1370 equivalent. I.e. if overflow was impossible. */
1371 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1372 return simplify_gen_binary
1373 (MULT, mode,
1374 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1375 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1379 /* Check for a sign extension of a subreg of a promoted
1380 variable, where the promotion is sign-extended, and the
1381 target mode is the same as the variable's promotion. */
1382 if (GET_CODE (op) == SUBREG
1383 && SUBREG_PROMOTED_VAR_P (op)
1384 && SUBREG_PROMOTED_SIGNED_P (op)
1385 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1387 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1388 if (temp)
1389 return temp;
1392 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1393 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1394 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1396 gcc_assert (GET_MODE_PRECISION (mode)
1397 > GET_MODE_PRECISION (GET_MODE (op)));
1398 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1399 GET_MODE (XEXP (op, 0)));
1402 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1403 is (sign_extend:M (subreg:O <X>)) if there is mode with
1404 GET_MODE_BITSIZE (N) - I bits.
1405 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1406 is similarly (zero_extend:M (subreg:O <X>)). */
1407 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1408 && GET_CODE (XEXP (op, 0)) == ASHIFT
1409 && CONST_INT_P (XEXP (op, 1))
1410 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1411 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1413 machine_mode tmode
1414 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1415 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1416 gcc_assert (GET_MODE_BITSIZE (mode)
1417 > GET_MODE_BITSIZE (GET_MODE (op)));
1418 if (tmode != BLKmode)
1420 rtx inner =
1421 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1422 if (inner)
1423 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1424 ? SIGN_EXTEND : ZERO_EXTEND,
1425 mode, inner, tmode);
1429 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1430 /* As we do not know which address space the pointer is referring to,
1431 we can do this only if the target does not support different pointer
1432 or address modes depending on the address space. */
1433 if (target_default_pointer_address_modes_p ()
1434 && ! POINTERS_EXTEND_UNSIGNED
1435 && mode == Pmode && GET_MODE (op) == ptr_mode
1436 && (CONSTANT_P (op)
1437 || (GET_CODE (op) == SUBREG
1438 && REG_P (SUBREG_REG (op))
1439 && REG_POINTER (SUBREG_REG (op))
1440 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1441 return convert_memory_address (Pmode, op);
1442 #endif
1443 break;
1445 case ZERO_EXTEND:
1446 /* Check for a zero extension of a subreg of a promoted
1447 variable, where the promotion is zero-extended, and the
1448 target mode is the same as the variable's promotion. */
1449 if (GET_CODE (op) == SUBREG
1450 && SUBREG_PROMOTED_VAR_P (op)
1451 && SUBREG_PROMOTED_UNSIGNED_P (op)
1452 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1454 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1455 if (temp)
1456 return temp;
1459 /* Extending a widening multiplication should be canonicalized to
1460 a wider widening multiplication. */
1461 if (GET_CODE (op) == MULT)
1463 rtx lhs = XEXP (op, 0);
1464 rtx rhs = XEXP (op, 1);
1465 enum rtx_code lcode = GET_CODE (lhs);
1466 enum rtx_code rcode = GET_CODE (rhs);
1468 /* Widening multiplies usually extend both operands, but sometimes
1469 they use a shift to extract a portion of a register. */
1470 if ((lcode == ZERO_EXTEND
1471 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1472 && (rcode == ZERO_EXTEND
1473 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1475 machine_mode lmode = GET_MODE (lhs);
1476 machine_mode rmode = GET_MODE (rhs);
1477 int bits;
1479 if (lcode == LSHIFTRT)
1480 /* Number of bits not shifted off the end. */
1481 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1482 else /* lcode == ZERO_EXTEND */
1483 /* Size of inner mode. */
1484 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1486 if (rcode == LSHIFTRT)
1487 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1488 else /* rcode == ZERO_EXTEND */
1489 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1491 /* We can only widen multiplies if the result is mathematiclly
1492 equivalent. I.e. if overflow was impossible. */
1493 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1494 return simplify_gen_binary
1495 (MULT, mode,
1496 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1497 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1501 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1502 if (GET_CODE (op) == ZERO_EXTEND)
1503 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1504 GET_MODE (XEXP (op, 0)));
1506 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1507 is (zero_extend:M (subreg:O <X>)) if there is mode with
1508 GET_MODE_PRECISION (N) - I bits. */
1509 if (GET_CODE (op) == LSHIFTRT
1510 && GET_CODE (XEXP (op, 0)) == ASHIFT
1511 && CONST_INT_P (XEXP (op, 1))
1512 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1513 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1515 machine_mode tmode
1516 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1517 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1518 if (tmode != BLKmode)
1520 rtx inner =
1521 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1522 if (inner)
1523 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1527 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1528 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1529 of mode N. E.g.
1530 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1531 (and:SI (reg:SI) (const_int 63)). */
1532 if (GET_CODE (op) == SUBREG
1533 && GET_MODE_PRECISION (GET_MODE (op))
1534 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1535 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1536 <= HOST_BITS_PER_WIDE_INT
1537 && GET_MODE_PRECISION (mode)
1538 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1539 && subreg_lowpart_p (op)
1540 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1541 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1543 if (GET_MODE_PRECISION (mode)
1544 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1545 return SUBREG_REG (op);
1546 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1547 GET_MODE (SUBREG_REG (op)));
1550 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1551 /* As we do not know which address space the pointer is referring to,
1552 we can do this only if the target does not support different pointer
1553 or address modes depending on the address space. */
1554 if (target_default_pointer_address_modes_p ()
1555 && POINTERS_EXTEND_UNSIGNED > 0
1556 && mode == Pmode && GET_MODE (op) == ptr_mode
1557 && (CONSTANT_P (op)
1558 || (GET_CODE (op) == SUBREG
1559 && REG_P (SUBREG_REG (op))
1560 && REG_POINTER (SUBREG_REG (op))
1561 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1562 return convert_memory_address (Pmode, op);
1563 #endif
1564 break;
1566 default:
1567 break;
1570 return 0;
1573 /* Try to compute the value of a unary operation CODE whose output mode is to
1574 be MODE with input operand OP whose mode was originally OP_MODE.
1575 Return zero if the value cannot be computed. */
1577 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1578 rtx op, machine_mode op_mode)
1580 unsigned int width = GET_MODE_PRECISION (mode);
1582 if (code == VEC_DUPLICATE)
1584 gcc_assert (VECTOR_MODE_P (mode));
1585 if (GET_MODE (op) != VOIDmode)
1587 if (!VECTOR_MODE_P (GET_MODE (op)))
1588 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1589 else
1590 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1591 (GET_MODE (op)));
1593 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1594 || GET_CODE (op) == CONST_VECTOR)
1596 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1597 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1598 rtvec v = rtvec_alloc (n_elts);
1599 unsigned int i;
1601 if (GET_CODE (op) != CONST_VECTOR)
1602 for (i = 0; i < n_elts; i++)
1603 RTVEC_ELT (v, i) = op;
1604 else
1606 machine_mode inmode = GET_MODE (op);
1607 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1608 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1610 gcc_assert (in_n_elts < n_elts);
1611 gcc_assert ((n_elts % in_n_elts) == 0);
1612 for (i = 0; i < n_elts; i++)
1613 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1615 return gen_rtx_CONST_VECTOR (mode, v);
1619 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1621 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1622 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1623 machine_mode opmode = GET_MODE (op);
1624 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1625 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1626 rtvec v = rtvec_alloc (n_elts);
1627 unsigned int i;
1629 gcc_assert (op_n_elts == n_elts);
1630 for (i = 0; i < n_elts; i++)
1632 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1633 CONST_VECTOR_ELT (op, i),
1634 GET_MODE_INNER (opmode));
1635 if (!x)
1636 return 0;
1637 RTVEC_ELT (v, i) = x;
1639 return gen_rtx_CONST_VECTOR (mode, v);
1642 /* The order of these tests is critical so that, for example, we don't
1643 check the wrong mode (input vs. output) for a conversion operation,
1644 such as FIX. At some point, this should be simplified. */
1646 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1648 REAL_VALUE_TYPE d;
1650 if (op_mode == VOIDmode)
1652 /* CONST_INT have VOIDmode as the mode. We assume that all
1653 the bits of the constant are significant, though, this is
1654 a dangerous assumption as many times CONST_INTs are
1655 created and used with garbage in the bits outside of the
1656 precision of the implied mode of the const_int. */
1657 op_mode = MAX_MODE_INT;
1660 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1661 d = real_value_truncate (mode, d);
1662 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1664 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1666 REAL_VALUE_TYPE d;
1668 if (op_mode == VOIDmode)
1670 /* CONST_INT have VOIDmode as the mode. We assume that all
1671 the bits of the constant are significant, though, this is
1672 a dangerous assumption as many times CONST_INTs are
1673 created and used with garbage in the bits outside of the
1674 precision of the implied mode of the const_int. */
1675 op_mode = MAX_MODE_INT;
1678 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1679 d = real_value_truncate (mode, d);
1680 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1683 if (CONST_SCALAR_INT_P (op) && width > 0)
1685 wide_int result;
1686 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1687 rtx_mode_t op0 = std::make_pair (op, imode);
1688 int int_value;
1690 #if TARGET_SUPPORTS_WIDE_INT == 0
1691 /* This assert keeps the simplification from producing a result
1692 that cannot be represented in a CONST_DOUBLE but a lot of
1693 upstream callers expect that this function never fails to
1694 simplify something and so you if you added this to the test
1695 above the code would die later anyway. If this assert
1696 happens, you just need to make the port support wide int. */
1697 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1698 #endif
1700 switch (code)
1702 case NOT:
1703 result = wi::bit_not (op0);
1704 break;
1706 case NEG:
1707 result = wi::neg (op0);
1708 break;
1710 case ABS:
1711 result = wi::abs (op0);
1712 break;
1714 case FFS:
1715 result = wi::shwi (wi::ffs (op0), mode);
1716 break;
1718 case CLZ:
1719 if (wi::ne_p (op0, 0))
1720 int_value = wi::clz (op0);
1721 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1722 int_value = GET_MODE_PRECISION (mode);
1723 result = wi::shwi (int_value, mode);
1724 break;
1726 case CLRSB:
1727 result = wi::shwi (wi::clrsb (op0), mode);
1728 break;
1730 case CTZ:
1731 if (wi::ne_p (op0, 0))
1732 int_value = wi::ctz (op0);
1733 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1734 int_value = GET_MODE_PRECISION (mode);
1735 result = wi::shwi (int_value, mode);
1736 break;
1738 case POPCOUNT:
1739 result = wi::shwi (wi::popcount (op0), mode);
1740 break;
1742 case PARITY:
1743 result = wi::shwi (wi::parity (op0), mode);
1744 break;
1746 case BSWAP:
1747 result = wide_int (op0).bswap ();
1748 break;
1750 case TRUNCATE:
1751 case ZERO_EXTEND:
1752 result = wide_int::from (op0, width, UNSIGNED);
1753 break;
1755 case SIGN_EXTEND:
1756 result = wide_int::from (op0, width, SIGNED);
1757 break;
1759 case SQRT:
1760 default:
1761 return 0;
1764 return immed_wide_int_const (result, mode);
1767 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1768 && SCALAR_FLOAT_MODE_P (mode)
1769 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1771 REAL_VALUE_TYPE d;
1772 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1774 switch (code)
1776 case SQRT:
1777 return 0;
1778 case ABS:
1779 d = real_value_abs (&d);
1780 break;
1781 case NEG:
1782 d = real_value_negate (&d);
1783 break;
1784 case FLOAT_TRUNCATE:
1785 d = real_value_truncate (mode, d);
1786 break;
1787 case FLOAT_EXTEND:
1788 /* All this does is change the mode, unless changing
1789 mode class. */
1790 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1791 real_convert (&d, mode, &d);
1792 break;
1793 case FIX:
1794 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1795 break;
1796 case NOT:
1798 long tmp[4];
1799 int i;
1801 real_to_target (tmp, &d, GET_MODE (op));
1802 for (i = 0; i < 4; i++)
1803 tmp[i] = ~tmp[i];
1804 real_from_target (&d, tmp, mode);
1805 break;
1807 default:
1808 gcc_unreachable ();
1810 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1812 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1813 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1814 && GET_MODE_CLASS (mode) == MODE_INT
1815 && width > 0)
1817 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1818 operators are intentionally left unspecified (to ease implementation
1819 by target backends), for consistency, this routine implements the
1820 same semantics for constant folding as used by the middle-end. */
1822 /* This was formerly used only for non-IEEE float.
1823 eggert@twinsun.com says it is safe for IEEE also. */
1824 REAL_VALUE_TYPE x, t;
1825 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1826 wide_int wmax, wmin;
1827 /* This is part of the abi to real_to_integer, but we check
1828 things before making this call. */
1829 bool fail;
1831 switch (code)
1833 case FIX:
1834 if (REAL_VALUE_ISNAN (x))
1835 return const0_rtx;
1837 /* Test against the signed upper bound. */
1838 wmax = wi::max_value (width, SIGNED);
1839 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1840 if (REAL_VALUES_LESS (t, x))
1841 return immed_wide_int_const (wmax, mode);
1843 /* Test against the signed lower bound. */
1844 wmin = wi::min_value (width, SIGNED);
1845 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1846 if (REAL_VALUES_LESS (x, t))
1847 return immed_wide_int_const (wmin, mode);
1849 return immed_wide_int_const (real_to_integer (&x, &fail, width), mode);
1850 break;
1852 case UNSIGNED_FIX:
1853 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1854 return const0_rtx;
1856 /* Test against the unsigned upper bound. */
1857 wmax = wi::max_value (width, UNSIGNED);
1858 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1859 if (REAL_VALUES_LESS (t, x))
1860 return immed_wide_int_const (wmax, mode);
1862 return immed_wide_int_const (real_to_integer (&x, &fail, width),
1863 mode);
1864 break;
1866 default:
1867 gcc_unreachable ();
1871 return NULL_RTX;
1874 /* Subroutine of simplify_binary_operation to simplify a binary operation
1875 CODE that can commute with byte swapping, with result mode MODE and
1876 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1877 Return zero if no simplification or canonicalization is possible. */
1879 static rtx
1880 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1881 rtx op0, rtx op1)
1883 rtx tem;
1885 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1886 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1888 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1889 simplify_gen_unary (BSWAP, mode, op1, mode));
1890 return simplify_gen_unary (BSWAP, mode, tem, mode);
1893 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1894 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1896 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1897 return simplify_gen_unary (BSWAP, mode, tem, mode);
1900 return NULL_RTX;
1903 /* Subroutine of simplify_binary_operation to simplify a commutative,
1904 associative binary operation CODE with result mode MODE, operating
1905 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1906 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1907 canonicalization is possible. */
1909 static rtx
1910 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1911 rtx op0, rtx op1)
1913 rtx tem;
1915 /* Linearize the operator to the left. */
1916 if (GET_CODE (op1) == code)
1918 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1919 if (GET_CODE (op0) == code)
1921 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1922 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1925 /* "a op (b op c)" becomes "(b op c) op a". */
1926 if (! swap_commutative_operands_p (op1, op0))
1927 return simplify_gen_binary (code, mode, op1, op0);
1929 std::swap (op0, op1);
1932 if (GET_CODE (op0) == code)
1934 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1935 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1937 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1938 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1941 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1942 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1943 if (tem != 0)
1944 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1946 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1947 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1948 if (tem != 0)
1949 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1952 return 0;
1956 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1957 and OP1. Return 0 if no simplification is possible.
1959 Don't use this for relational operations such as EQ or LT.
1960 Use simplify_relational_operation instead. */
1962 simplify_binary_operation (enum rtx_code code, machine_mode mode,
1963 rtx op0, rtx op1)
1965 rtx trueop0, trueop1;
1966 rtx tem;
1968 /* Relational operations don't work here. We must know the mode
1969 of the operands in order to do the comparison correctly.
1970 Assuming a full word can give incorrect results.
1971 Consider comparing 128 with -128 in QImode. */
1972 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1973 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1975 /* Make sure the constant is second. */
1976 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1977 && swap_commutative_operands_p (op0, op1))
1978 std::swap (op0, op1);
1980 trueop0 = avoid_constant_pool_reference (op0);
1981 trueop1 = avoid_constant_pool_reference (op1);
1983 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1984 if (tem)
1985 return tem;
1986 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1989 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1990 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1991 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1992 actual constants. */
1994 static rtx
1995 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
1996 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1998 rtx tem, reversed, opleft, opright;
1999 HOST_WIDE_INT val;
2000 unsigned int width = GET_MODE_PRECISION (mode);
2002 /* Even if we can't compute a constant result,
2003 there are some cases worth simplifying. */
2005 switch (code)
2007 case PLUS:
2008 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2009 when x is NaN, infinite, or finite and nonzero. They aren't
2010 when x is -0 and the rounding mode is not towards -infinity,
2011 since (-0) + 0 is then 0. */
2012 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2013 return op0;
2015 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2016 transformations are safe even for IEEE. */
2017 if (GET_CODE (op0) == NEG)
2018 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2019 else if (GET_CODE (op1) == NEG)
2020 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2022 /* (~a) + 1 -> -a */
2023 if (INTEGRAL_MODE_P (mode)
2024 && GET_CODE (op0) == NOT
2025 && trueop1 == const1_rtx)
2026 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2028 /* Handle both-operands-constant cases. We can only add
2029 CONST_INTs to constants since the sum of relocatable symbols
2030 can't be handled by most assemblers. Don't add CONST_INT
2031 to CONST_INT since overflow won't be computed properly if wider
2032 than HOST_BITS_PER_WIDE_INT. */
2034 if ((GET_CODE (op0) == CONST
2035 || GET_CODE (op0) == SYMBOL_REF
2036 || GET_CODE (op0) == LABEL_REF)
2037 && CONST_INT_P (op1))
2038 return plus_constant (mode, op0, INTVAL (op1));
2039 else if ((GET_CODE (op1) == CONST
2040 || GET_CODE (op1) == SYMBOL_REF
2041 || GET_CODE (op1) == LABEL_REF)
2042 && CONST_INT_P (op0))
2043 return plus_constant (mode, op1, INTVAL (op0));
2045 /* See if this is something like X * C - X or vice versa or
2046 if the multiplication is written as a shift. If so, we can
2047 distribute and make a new multiply, shift, or maybe just
2048 have X (if C is 2 in the example above). But don't make
2049 something more expensive than we had before. */
2051 if (SCALAR_INT_MODE_P (mode))
2053 rtx lhs = op0, rhs = op1;
2055 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2056 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2058 if (GET_CODE (lhs) == NEG)
2060 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2061 lhs = XEXP (lhs, 0);
2063 else if (GET_CODE (lhs) == MULT
2064 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2066 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2067 lhs = XEXP (lhs, 0);
2069 else if (GET_CODE (lhs) == ASHIFT
2070 && CONST_INT_P (XEXP (lhs, 1))
2071 && INTVAL (XEXP (lhs, 1)) >= 0
2072 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2074 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2075 GET_MODE_PRECISION (mode));
2076 lhs = XEXP (lhs, 0);
2079 if (GET_CODE (rhs) == NEG)
2081 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2082 rhs = XEXP (rhs, 0);
2084 else if (GET_CODE (rhs) == MULT
2085 && CONST_INT_P (XEXP (rhs, 1)))
2087 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2088 rhs = XEXP (rhs, 0);
2090 else if (GET_CODE (rhs) == ASHIFT
2091 && CONST_INT_P (XEXP (rhs, 1))
2092 && INTVAL (XEXP (rhs, 1)) >= 0
2093 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2095 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2096 GET_MODE_PRECISION (mode));
2097 rhs = XEXP (rhs, 0);
2100 if (rtx_equal_p (lhs, rhs))
2102 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2103 rtx coeff;
2104 bool speed = optimize_function_for_speed_p (cfun);
2106 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2108 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2109 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2110 ? tem : 0;
2114 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2115 if (CONST_SCALAR_INT_P (op1)
2116 && GET_CODE (op0) == XOR
2117 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2118 && mode_signbit_p (mode, op1))
2119 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2120 simplify_gen_binary (XOR, mode, op1,
2121 XEXP (op0, 1)));
2123 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2124 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2125 && GET_CODE (op0) == MULT
2126 && GET_CODE (XEXP (op0, 0)) == NEG)
2128 rtx in1, in2;
2130 in1 = XEXP (XEXP (op0, 0), 0);
2131 in2 = XEXP (op0, 1);
2132 return simplify_gen_binary (MINUS, mode, op1,
2133 simplify_gen_binary (MULT, mode,
2134 in1, in2));
2137 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2138 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2139 is 1. */
2140 if (COMPARISON_P (op0)
2141 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2142 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2143 && (reversed = reversed_comparison (op0, mode)))
2144 return
2145 simplify_gen_unary (NEG, mode, reversed, mode);
2147 /* If one of the operands is a PLUS or a MINUS, see if we can
2148 simplify this by the associative law.
2149 Don't use the associative law for floating point.
2150 The inaccuracy makes it nonassociative,
2151 and subtle programs can break if operations are associated. */
2153 if (INTEGRAL_MODE_P (mode)
2154 && (plus_minus_operand_p (op0)
2155 || plus_minus_operand_p (op1))
2156 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2157 return tem;
2159 /* Reassociate floating point addition only when the user
2160 specifies associative math operations. */
2161 if (FLOAT_MODE_P (mode)
2162 && flag_associative_math)
2164 tem = simplify_associative_operation (code, mode, op0, op1);
2165 if (tem)
2166 return tem;
2168 break;
2170 case COMPARE:
2171 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2172 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2173 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2174 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2176 rtx xop00 = XEXP (op0, 0);
2177 rtx xop10 = XEXP (op1, 0);
2179 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2180 return xop00;
2182 if (REG_P (xop00) && REG_P (xop10)
2183 && GET_MODE (xop00) == GET_MODE (xop10)
2184 && REGNO (xop00) == REGNO (xop10)
2185 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2186 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2187 return xop00;
2189 break;
2191 case MINUS:
2192 /* We can't assume x-x is 0 even with non-IEEE floating point,
2193 but since it is zero except in very strange circumstances, we
2194 will treat it as zero with -ffinite-math-only. */
2195 if (rtx_equal_p (trueop0, trueop1)
2196 && ! side_effects_p (op0)
2197 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2198 return CONST0_RTX (mode);
2200 /* Change subtraction from zero into negation. (0 - x) is the
2201 same as -x when x is NaN, infinite, or finite and nonzero.
2202 But if the mode has signed zeros, and does not round towards
2203 -infinity, then 0 - 0 is 0, not -0. */
2204 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2205 return simplify_gen_unary (NEG, mode, op1, mode);
2207 /* (-1 - a) is ~a. */
2208 if (trueop0 == constm1_rtx)
2209 return simplify_gen_unary (NOT, mode, op1, mode);
2211 /* Subtracting 0 has no effect unless the mode has signed zeros
2212 and supports rounding towards -infinity. In such a case,
2213 0 - 0 is -0. */
2214 if (!(HONOR_SIGNED_ZEROS (mode)
2215 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2216 && trueop1 == CONST0_RTX (mode))
2217 return op0;
2219 /* See if this is something like X * C - X or vice versa or
2220 if the multiplication is written as a shift. If so, we can
2221 distribute and make a new multiply, shift, or maybe just
2222 have X (if C is 2 in the example above). But don't make
2223 something more expensive than we had before. */
2225 if (SCALAR_INT_MODE_P (mode))
2227 rtx lhs = op0, rhs = op1;
2229 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2230 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2232 if (GET_CODE (lhs) == NEG)
2234 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2235 lhs = XEXP (lhs, 0);
2237 else if (GET_CODE (lhs) == MULT
2238 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2240 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2241 lhs = XEXP (lhs, 0);
2243 else if (GET_CODE (lhs) == ASHIFT
2244 && CONST_INT_P (XEXP (lhs, 1))
2245 && INTVAL (XEXP (lhs, 1)) >= 0
2246 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2248 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2249 GET_MODE_PRECISION (mode));
2250 lhs = XEXP (lhs, 0);
2253 if (GET_CODE (rhs) == NEG)
2255 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2256 rhs = XEXP (rhs, 0);
2258 else if (GET_CODE (rhs) == MULT
2259 && CONST_INT_P (XEXP (rhs, 1)))
2261 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2262 rhs = XEXP (rhs, 0);
2264 else if (GET_CODE (rhs) == ASHIFT
2265 && CONST_INT_P (XEXP (rhs, 1))
2266 && INTVAL (XEXP (rhs, 1)) >= 0
2267 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2269 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2270 GET_MODE_PRECISION (mode));
2271 negcoeff1 = -negcoeff1;
2272 rhs = XEXP (rhs, 0);
2275 if (rtx_equal_p (lhs, rhs))
2277 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2278 rtx coeff;
2279 bool speed = optimize_function_for_speed_p (cfun);
2281 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2283 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2284 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2285 ? tem : 0;
2289 /* (a - (-b)) -> (a + b). True even for IEEE. */
2290 if (GET_CODE (op1) == NEG)
2291 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2293 /* (-x - c) may be simplified as (-c - x). */
2294 if (GET_CODE (op0) == NEG
2295 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2297 tem = simplify_unary_operation (NEG, mode, op1, mode);
2298 if (tem)
2299 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2302 /* Don't let a relocatable value get a negative coeff. */
2303 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2304 return simplify_gen_binary (PLUS, mode,
2305 op0,
2306 neg_const_int (mode, op1));
2308 /* (x - (x & y)) -> (x & ~y) */
2309 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2311 if (rtx_equal_p (op0, XEXP (op1, 0)))
2313 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2314 GET_MODE (XEXP (op1, 1)));
2315 return simplify_gen_binary (AND, mode, op0, tem);
2317 if (rtx_equal_p (op0, XEXP (op1, 1)))
2319 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2320 GET_MODE (XEXP (op1, 0)));
2321 return simplify_gen_binary (AND, mode, op0, tem);
2325 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2326 by reversing the comparison code if valid. */
2327 if (STORE_FLAG_VALUE == 1
2328 && trueop0 == const1_rtx
2329 && COMPARISON_P (op1)
2330 && (reversed = reversed_comparison (op1, mode)))
2331 return reversed;
2333 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2334 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2335 && GET_CODE (op1) == MULT
2336 && GET_CODE (XEXP (op1, 0)) == NEG)
2338 rtx in1, in2;
2340 in1 = XEXP (XEXP (op1, 0), 0);
2341 in2 = XEXP (op1, 1);
2342 return simplify_gen_binary (PLUS, mode,
2343 simplify_gen_binary (MULT, mode,
2344 in1, in2),
2345 op0);
2348 /* Canonicalize (minus (neg A) (mult B C)) to
2349 (minus (mult (neg B) C) A). */
2350 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2351 && GET_CODE (op1) == MULT
2352 && GET_CODE (op0) == NEG)
2354 rtx in1, in2;
2356 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2357 in2 = XEXP (op1, 1);
2358 return simplify_gen_binary (MINUS, mode,
2359 simplify_gen_binary (MULT, mode,
2360 in1, in2),
2361 XEXP (op0, 0));
2364 /* If one of the operands is a PLUS or a MINUS, see if we can
2365 simplify this by the associative law. This will, for example,
2366 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2367 Don't use the associative law for floating point.
2368 The inaccuracy makes it nonassociative,
2369 and subtle programs can break if operations are associated. */
2371 if (INTEGRAL_MODE_P (mode)
2372 && (plus_minus_operand_p (op0)
2373 || plus_minus_operand_p (op1))
2374 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2375 return tem;
2376 break;
2378 case MULT:
2379 if (trueop1 == constm1_rtx)
2380 return simplify_gen_unary (NEG, mode, op0, mode);
2382 if (GET_CODE (op0) == NEG)
2384 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2385 /* If op1 is a MULT as well and simplify_unary_operation
2386 just moved the NEG to the second operand, simplify_gen_binary
2387 below could through simplify_associative_operation move
2388 the NEG around again and recurse endlessly. */
2389 if (temp
2390 && GET_CODE (op1) == MULT
2391 && GET_CODE (temp) == MULT
2392 && XEXP (op1, 0) == XEXP (temp, 0)
2393 && GET_CODE (XEXP (temp, 1)) == NEG
2394 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2395 temp = NULL_RTX;
2396 if (temp)
2397 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2399 if (GET_CODE (op1) == NEG)
2401 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2402 /* If op0 is a MULT as well and simplify_unary_operation
2403 just moved the NEG to the second operand, simplify_gen_binary
2404 below could through simplify_associative_operation move
2405 the NEG around again and recurse endlessly. */
2406 if (temp
2407 && GET_CODE (op0) == MULT
2408 && GET_CODE (temp) == MULT
2409 && XEXP (op0, 0) == XEXP (temp, 0)
2410 && GET_CODE (XEXP (temp, 1)) == NEG
2411 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2412 temp = NULL_RTX;
2413 if (temp)
2414 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2417 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2418 x is NaN, since x * 0 is then also NaN. Nor is it valid
2419 when the mode has signed zeros, since multiplying a negative
2420 number by 0 will give -0, not 0. */
2421 if (!HONOR_NANS (mode)
2422 && !HONOR_SIGNED_ZEROS (mode)
2423 && trueop1 == CONST0_RTX (mode)
2424 && ! side_effects_p (op0))
2425 return op1;
2427 /* In IEEE floating point, x*1 is not equivalent to x for
2428 signalling NaNs. */
2429 if (!HONOR_SNANS (mode)
2430 && trueop1 == CONST1_RTX (mode))
2431 return op0;
2433 /* Convert multiply by constant power of two into shift. */
2434 if (CONST_SCALAR_INT_P (trueop1))
2436 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2437 if (val >= 0)
2438 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2441 /* x*2 is x+x and x*(-1) is -x */
2442 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2443 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2444 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2445 && GET_MODE (op0) == mode)
2447 REAL_VALUE_TYPE d;
2448 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2450 if (REAL_VALUES_EQUAL (d, dconst2))
2451 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2453 if (!HONOR_SNANS (mode)
2454 && REAL_VALUES_EQUAL (d, dconstm1))
2455 return simplify_gen_unary (NEG, mode, op0, mode);
2458 /* Optimize -x * -x as x * x. */
2459 if (FLOAT_MODE_P (mode)
2460 && GET_CODE (op0) == NEG
2461 && GET_CODE (op1) == NEG
2462 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2463 && !side_effects_p (XEXP (op0, 0)))
2464 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2466 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2467 if (SCALAR_FLOAT_MODE_P (mode)
2468 && GET_CODE (op0) == ABS
2469 && GET_CODE (op1) == ABS
2470 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2471 && !side_effects_p (XEXP (op0, 0)))
2472 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2474 /* Reassociate multiplication, but for floating point MULTs
2475 only when the user specifies unsafe math optimizations. */
2476 if (! FLOAT_MODE_P (mode)
2477 || flag_unsafe_math_optimizations)
2479 tem = simplify_associative_operation (code, mode, op0, op1);
2480 if (tem)
2481 return tem;
2483 break;
2485 case IOR:
2486 if (trueop1 == CONST0_RTX (mode))
2487 return op0;
2488 if (INTEGRAL_MODE_P (mode)
2489 && trueop1 == CONSTM1_RTX (mode)
2490 && !side_effects_p (op0))
2491 return op1;
2492 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2493 return op0;
2494 /* A | (~A) -> -1 */
2495 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2496 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2497 && ! side_effects_p (op0)
2498 && SCALAR_INT_MODE_P (mode))
2499 return constm1_rtx;
2501 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2502 if (CONST_INT_P (op1)
2503 && HWI_COMPUTABLE_MODE_P (mode)
2504 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2505 && !side_effects_p (op0))
2506 return op1;
2508 /* Canonicalize (X & C1) | C2. */
2509 if (GET_CODE (op0) == AND
2510 && CONST_INT_P (trueop1)
2511 && CONST_INT_P (XEXP (op0, 1)))
2513 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2514 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2515 HOST_WIDE_INT c2 = INTVAL (trueop1);
2517 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2518 if ((c1 & c2) == c1
2519 && !side_effects_p (XEXP (op0, 0)))
2520 return trueop1;
2522 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2523 if (((c1|c2) & mask) == mask)
2524 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2526 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2527 if (((c1 & ~c2) & mask) != (c1 & mask))
2529 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2530 gen_int_mode (c1 & ~c2, mode));
2531 return simplify_gen_binary (IOR, mode, tem, op1);
2535 /* Convert (A & B) | A to A. */
2536 if (GET_CODE (op0) == AND
2537 && (rtx_equal_p (XEXP (op0, 0), op1)
2538 || rtx_equal_p (XEXP (op0, 1), op1))
2539 && ! side_effects_p (XEXP (op0, 0))
2540 && ! side_effects_p (XEXP (op0, 1)))
2541 return op1;
2543 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2544 mode size to (rotate A CX). */
2546 if (GET_CODE (op1) == ASHIFT
2547 || GET_CODE (op1) == SUBREG)
2549 opleft = op1;
2550 opright = op0;
2552 else
2554 opright = op1;
2555 opleft = op0;
2558 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2559 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2560 && CONST_INT_P (XEXP (opleft, 1))
2561 && CONST_INT_P (XEXP (opright, 1))
2562 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2563 == GET_MODE_PRECISION (mode)))
2564 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2566 /* Same, but for ashift that has been "simplified" to a wider mode
2567 by simplify_shift_const. */
2569 if (GET_CODE (opleft) == SUBREG
2570 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2571 && GET_CODE (opright) == LSHIFTRT
2572 && GET_CODE (XEXP (opright, 0)) == SUBREG
2573 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2574 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2575 && (GET_MODE_SIZE (GET_MODE (opleft))
2576 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2577 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2578 SUBREG_REG (XEXP (opright, 0)))
2579 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2580 && CONST_INT_P (XEXP (opright, 1))
2581 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2582 == GET_MODE_PRECISION (mode)))
2583 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2584 XEXP (SUBREG_REG (opleft), 1));
2586 /* If we have (ior (and (X C1) C2)), simplify this by making
2587 C1 as small as possible if C1 actually changes. */
2588 if (CONST_INT_P (op1)
2589 && (HWI_COMPUTABLE_MODE_P (mode)
2590 || INTVAL (op1) > 0)
2591 && GET_CODE (op0) == AND
2592 && CONST_INT_P (XEXP (op0, 1))
2593 && CONST_INT_P (op1)
2594 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2596 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2597 gen_int_mode (UINTVAL (XEXP (op0, 1))
2598 & ~UINTVAL (op1),
2599 mode));
2600 return simplify_gen_binary (IOR, mode, tmp, op1);
2603 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2604 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2605 the PLUS does not affect any of the bits in OP1: then we can do
2606 the IOR as a PLUS and we can associate. This is valid if OP1
2607 can be safely shifted left C bits. */
2608 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2609 && GET_CODE (XEXP (op0, 0)) == PLUS
2610 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2611 && CONST_INT_P (XEXP (op0, 1))
2612 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2614 int count = INTVAL (XEXP (op0, 1));
2615 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2617 if (mask >> count == INTVAL (trueop1)
2618 && trunc_int_for_mode (mask, mode) == mask
2619 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2620 return simplify_gen_binary (ASHIFTRT, mode,
2621 plus_constant (mode, XEXP (op0, 0),
2622 mask),
2623 XEXP (op0, 1));
2626 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2627 if (tem)
2628 return tem;
2630 tem = simplify_associative_operation (code, mode, op0, op1);
2631 if (tem)
2632 return tem;
2633 break;
2635 case XOR:
2636 if (trueop1 == CONST0_RTX (mode))
2637 return op0;
2638 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2639 return simplify_gen_unary (NOT, mode, op0, mode);
2640 if (rtx_equal_p (trueop0, trueop1)
2641 && ! side_effects_p (op0)
2642 && GET_MODE_CLASS (mode) != MODE_CC)
2643 return CONST0_RTX (mode);
2645 /* Canonicalize XOR of the most significant bit to PLUS. */
2646 if (CONST_SCALAR_INT_P (op1)
2647 && mode_signbit_p (mode, op1))
2648 return simplify_gen_binary (PLUS, mode, op0, op1);
2649 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2650 if (CONST_SCALAR_INT_P (op1)
2651 && GET_CODE (op0) == PLUS
2652 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2653 && mode_signbit_p (mode, XEXP (op0, 1)))
2654 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2655 simplify_gen_binary (XOR, mode, op1,
2656 XEXP (op0, 1)));
2658 /* If we are XORing two things that have no bits in common,
2659 convert them into an IOR. This helps to detect rotation encoded
2660 using those methods and possibly other simplifications. */
2662 if (HWI_COMPUTABLE_MODE_P (mode)
2663 && (nonzero_bits (op0, mode)
2664 & nonzero_bits (op1, mode)) == 0)
2665 return (simplify_gen_binary (IOR, mode, op0, op1));
2667 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2668 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2669 (NOT y). */
2671 int num_negated = 0;
2673 if (GET_CODE (op0) == NOT)
2674 num_negated++, op0 = XEXP (op0, 0);
2675 if (GET_CODE (op1) == NOT)
2676 num_negated++, op1 = XEXP (op1, 0);
2678 if (num_negated == 2)
2679 return simplify_gen_binary (XOR, mode, op0, op1);
2680 else if (num_negated == 1)
2681 return simplify_gen_unary (NOT, mode,
2682 simplify_gen_binary (XOR, mode, op0, op1),
2683 mode);
2686 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2687 correspond to a machine insn or result in further simplifications
2688 if B is a constant. */
2690 if (GET_CODE (op0) == AND
2691 && rtx_equal_p (XEXP (op0, 1), op1)
2692 && ! side_effects_p (op1))
2693 return simplify_gen_binary (AND, mode,
2694 simplify_gen_unary (NOT, mode,
2695 XEXP (op0, 0), mode),
2696 op1);
2698 else if (GET_CODE (op0) == AND
2699 && rtx_equal_p (XEXP (op0, 0), op1)
2700 && ! side_effects_p (op1))
2701 return simplify_gen_binary (AND, mode,
2702 simplify_gen_unary (NOT, mode,
2703 XEXP (op0, 1), mode),
2704 op1);
2706 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2707 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2708 out bits inverted twice and not set by C. Similarly, given
2709 (xor (and (xor A B) C) D), simplify without inverting C in
2710 the xor operand: (xor (and A C) (B&C)^D).
2712 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2713 && GET_CODE (XEXP (op0, 0)) == XOR
2714 && CONST_INT_P (op1)
2715 && CONST_INT_P (XEXP (op0, 1))
2716 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2718 enum rtx_code op = GET_CODE (op0);
2719 rtx a = XEXP (XEXP (op0, 0), 0);
2720 rtx b = XEXP (XEXP (op0, 0), 1);
2721 rtx c = XEXP (op0, 1);
2722 rtx d = op1;
2723 HOST_WIDE_INT bval = INTVAL (b);
2724 HOST_WIDE_INT cval = INTVAL (c);
2725 HOST_WIDE_INT dval = INTVAL (d);
2726 HOST_WIDE_INT xcval;
2728 if (op == IOR)
2729 xcval = ~cval;
2730 else
2731 xcval = cval;
2733 return simplify_gen_binary (XOR, mode,
2734 simplify_gen_binary (op, mode, a, c),
2735 gen_int_mode ((bval & xcval) ^ dval,
2736 mode));
2739 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2740 we can transform like this:
2741 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2742 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2743 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2744 Attempt a few simplifications when B and C are both constants. */
2745 if (GET_CODE (op0) == AND
2746 && CONST_INT_P (op1)
2747 && CONST_INT_P (XEXP (op0, 1)))
2749 rtx a = XEXP (op0, 0);
2750 rtx b = XEXP (op0, 1);
2751 rtx c = op1;
2752 HOST_WIDE_INT bval = INTVAL (b);
2753 HOST_WIDE_INT cval = INTVAL (c);
2755 /* Instead of computing ~A&C, we compute its negated value,
2756 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2757 optimize for sure. If it does not simplify, we still try
2758 to compute ~A&C below, but since that always allocates
2759 RTL, we don't try that before committing to returning a
2760 simplified expression. */
2761 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2762 GEN_INT (~cval));
2764 if ((~cval & bval) == 0)
2766 rtx na_c = NULL_RTX;
2767 if (n_na_c)
2768 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2769 else
2771 /* If ~A does not simplify, don't bother: we don't
2772 want to simplify 2 operations into 3, and if na_c
2773 were to simplify with na, n_na_c would have
2774 simplified as well. */
2775 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2776 if (na)
2777 na_c = simplify_gen_binary (AND, mode, na, c);
2780 /* Try to simplify ~A&C | ~B&C. */
2781 if (na_c != NULL_RTX)
2782 return simplify_gen_binary (IOR, mode, na_c,
2783 gen_int_mode (~bval & cval, mode));
2785 else
2787 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2788 if (n_na_c == CONSTM1_RTX (mode))
2790 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2791 gen_int_mode (~cval & bval,
2792 mode));
2793 return simplify_gen_binary (IOR, mode, a_nc_b,
2794 gen_int_mode (~bval & cval,
2795 mode));
2800 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2801 comparison if STORE_FLAG_VALUE is 1. */
2802 if (STORE_FLAG_VALUE == 1
2803 && trueop1 == const1_rtx
2804 && COMPARISON_P (op0)
2805 && (reversed = reversed_comparison (op0, mode)))
2806 return reversed;
2808 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2809 is (lt foo (const_int 0)), so we can perform the above
2810 simplification if STORE_FLAG_VALUE is 1. */
2812 if (STORE_FLAG_VALUE == 1
2813 && trueop1 == const1_rtx
2814 && GET_CODE (op0) == LSHIFTRT
2815 && CONST_INT_P (XEXP (op0, 1))
2816 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2817 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2819 /* (xor (comparison foo bar) (const_int sign-bit))
2820 when STORE_FLAG_VALUE is the sign bit. */
2821 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2822 && trueop1 == const_true_rtx
2823 && COMPARISON_P (op0)
2824 && (reversed = reversed_comparison (op0, mode)))
2825 return reversed;
2827 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2828 if (tem)
2829 return tem;
2831 tem = simplify_associative_operation (code, mode, op0, op1);
2832 if (tem)
2833 return tem;
2834 break;
2836 case AND:
2837 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2838 return trueop1;
2839 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2840 return op0;
2841 if (HWI_COMPUTABLE_MODE_P (mode))
2843 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2844 HOST_WIDE_INT nzop1;
2845 if (CONST_INT_P (trueop1))
2847 HOST_WIDE_INT val1 = INTVAL (trueop1);
2848 /* If we are turning off bits already known off in OP0, we need
2849 not do an AND. */
2850 if ((nzop0 & ~val1) == 0)
2851 return op0;
2853 nzop1 = nonzero_bits (trueop1, mode);
2854 /* If we are clearing all the nonzero bits, the result is zero. */
2855 if ((nzop1 & nzop0) == 0
2856 && !side_effects_p (op0) && !side_effects_p (op1))
2857 return CONST0_RTX (mode);
2859 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2860 && GET_MODE_CLASS (mode) != MODE_CC)
2861 return op0;
2862 /* A & (~A) -> 0 */
2863 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2864 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2865 && ! side_effects_p (op0)
2866 && GET_MODE_CLASS (mode) != MODE_CC)
2867 return CONST0_RTX (mode);
2869 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2870 there are no nonzero bits of C outside of X's mode. */
2871 if ((GET_CODE (op0) == SIGN_EXTEND
2872 || GET_CODE (op0) == ZERO_EXTEND)
2873 && CONST_INT_P (trueop1)
2874 && HWI_COMPUTABLE_MODE_P (mode)
2875 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2876 & UINTVAL (trueop1)) == 0)
2878 machine_mode imode = GET_MODE (XEXP (op0, 0));
2879 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2880 gen_int_mode (INTVAL (trueop1),
2881 imode));
2882 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2885 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2886 we might be able to further simplify the AND with X and potentially
2887 remove the truncation altogether. */
2888 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2890 rtx x = XEXP (op0, 0);
2891 machine_mode xmode = GET_MODE (x);
2892 tem = simplify_gen_binary (AND, xmode, x,
2893 gen_int_mode (INTVAL (trueop1), xmode));
2894 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2897 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2898 if (GET_CODE (op0) == IOR
2899 && CONST_INT_P (trueop1)
2900 && CONST_INT_P (XEXP (op0, 1)))
2902 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2903 return simplify_gen_binary (IOR, mode,
2904 simplify_gen_binary (AND, mode,
2905 XEXP (op0, 0), op1),
2906 gen_int_mode (tmp, mode));
2909 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2910 insn (and may simplify more). */
2911 if (GET_CODE (op0) == XOR
2912 && rtx_equal_p (XEXP (op0, 0), op1)
2913 && ! side_effects_p (op1))
2914 return simplify_gen_binary (AND, mode,
2915 simplify_gen_unary (NOT, mode,
2916 XEXP (op0, 1), mode),
2917 op1);
2919 if (GET_CODE (op0) == XOR
2920 && rtx_equal_p (XEXP (op0, 1), op1)
2921 && ! side_effects_p (op1))
2922 return simplify_gen_binary (AND, mode,
2923 simplify_gen_unary (NOT, mode,
2924 XEXP (op0, 0), mode),
2925 op1);
2927 /* Similarly for (~(A ^ B)) & A. */
2928 if (GET_CODE (op0) == NOT
2929 && GET_CODE (XEXP (op0, 0)) == XOR
2930 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2931 && ! side_effects_p (op1))
2932 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2934 if (GET_CODE (op0) == NOT
2935 && GET_CODE (XEXP (op0, 0)) == XOR
2936 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2937 && ! side_effects_p (op1))
2938 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2940 /* Convert (A | B) & A to A. */
2941 if (GET_CODE (op0) == IOR
2942 && (rtx_equal_p (XEXP (op0, 0), op1)
2943 || rtx_equal_p (XEXP (op0, 1), op1))
2944 && ! side_effects_p (XEXP (op0, 0))
2945 && ! side_effects_p (XEXP (op0, 1)))
2946 return op1;
2948 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2949 ((A & N) + B) & M -> (A + B) & M
2950 Similarly if (N & M) == 0,
2951 ((A | N) + B) & M -> (A + B) & M
2952 and for - instead of + and/or ^ instead of |.
2953 Also, if (N & M) == 0, then
2954 (A +- N) & M -> A & M. */
2955 if (CONST_INT_P (trueop1)
2956 && HWI_COMPUTABLE_MODE_P (mode)
2957 && ~UINTVAL (trueop1)
2958 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2959 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2961 rtx pmop[2];
2962 int which;
2964 pmop[0] = XEXP (op0, 0);
2965 pmop[1] = XEXP (op0, 1);
2967 if (CONST_INT_P (pmop[1])
2968 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2969 return simplify_gen_binary (AND, mode, pmop[0], op1);
2971 for (which = 0; which < 2; which++)
2973 tem = pmop[which];
2974 switch (GET_CODE (tem))
2976 case AND:
2977 if (CONST_INT_P (XEXP (tem, 1))
2978 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2979 == UINTVAL (trueop1))
2980 pmop[which] = XEXP (tem, 0);
2981 break;
2982 case IOR:
2983 case XOR:
2984 if (CONST_INT_P (XEXP (tem, 1))
2985 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2986 pmop[which] = XEXP (tem, 0);
2987 break;
2988 default:
2989 break;
2993 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2995 tem = simplify_gen_binary (GET_CODE (op0), mode,
2996 pmop[0], pmop[1]);
2997 return simplify_gen_binary (code, mode, tem, op1);
3001 /* (and X (ior (not X) Y) -> (and X Y) */
3002 if (GET_CODE (op1) == IOR
3003 && GET_CODE (XEXP (op1, 0)) == NOT
3004 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3005 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3007 /* (and (ior (not X) Y) X) -> (and X Y) */
3008 if (GET_CODE (op0) == IOR
3009 && GET_CODE (XEXP (op0, 0)) == NOT
3010 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3011 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3013 /* (and X (ior Y (not X)) -> (and X Y) */
3014 if (GET_CODE (op1) == IOR
3015 && GET_CODE (XEXP (op1, 1)) == NOT
3016 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3017 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3019 /* (and (ior Y (not X)) X) -> (and X Y) */
3020 if (GET_CODE (op0) == IOR
3021 && GET_CODE (XEXP (op0, 1)) == NOT
3022 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3023 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3025 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3026 if (tem)
3027 return tem;
3029 tem = simplify_associative_operation (code, mode, op0, op1);
3030 if (tem)
3031 return tem;
3032 break;
3034 case UDIV:
3035 /* 0/x is 0 (or x&0 if x has side-effects). */
3036 if (trueop0 == CONST0_RTX (mode))
3038 if (side_effects_p (op1))
3039 return simplify_gen_binary (AND, mode, op1, trueop0);
3040 return trueop0;
3042 /* x/1 is x. */
3043 if (trueop1 == CONST1_RTX (mode))
3045 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3046 if (tem)
3047 return tem;
3049 /* Convert divide by power of two into shift. */
3050 if (CONST_INT_P (trueop1)
3051 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3052 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3053 break;
3055 case DIV:
3056 /* Handle floating point and integers separately. */
3057 if (SCALAR_FLOAT_MODE_P (mode))
3059 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3060 safe for modes with NaNs, since 0.0 / 0.0 will then be
3061 NaN rather than 0.0. Nor is it safe for modes with signed
3062 zeros, since dividing 0 by a negative number gives -0.0 */
3063 if (trueop0 == CONST0_RTX (mode)
3064 && !HONOR_NANS (mode)
3065 && !HONOR_SIGNED_ZEROS (mode)
3066 && ! side_effects_p (op1))
3067 return op0;
3068 /* x/1.0 is x. */
3069 if (trueop1 == CONST1_RTX (mode)
3070 && !HONOR_SNANS (mode))
3071 return op0;
3073 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3074 && trueop1 != CONST0_RTX (mode))
3076 REAL_VALUE_TYPE d;
3077 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3079 /* x/-1.0 is -x. */
3080 if (REAL_VALUES_EQUAL (d, dconstm1)
3081 && !HONOR_SNANS (mode))
3082 return simplify_gen_unary (NEG, mode, op0, mode);
3084 /* Change FP division by a constant into multiplication.
3085 Only do this with -freciprocal-math. */
3086 if (flag_reciprocal_math
3087 && !REAL_VALUES_EQUAL (d, dconst0))
3089 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3090 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3091 return simplify_gen_binary (MULT, mode, op0, tem);
3095 else if (SCALAR_INT_MODE_P (mode))
3097 /* 0/x is 0 (or x&0 if x has side-effects). */
3098 if (trueop0 == CONST0_RTX (mode)
3099 && !cfun->can_throw_non_call_exceptions)
3101 if (side_effects_p (op1))
3102 return simplify_gen_binary (AND, mode, op1, trueop0);
3103 return trueop0;
3105 /* x/1 is x. */
3106 if (trueop1 == CONST1_RTX (mode))
3108 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3109 if (tem)
3110 return tem;
3112 /* x/-1 is -x. */
3113 if (trueop1 == constm1_rtx)
3115 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3116 if (x)
3117 return simplify_gen_unary (NEG, mode, x, mode);
3120 break;
3122 case UMOD:
3123 /* 0%x is 0 (or x&0 if x has side-effects). */
3124 if (trueop0 == CONST0_RTX (mode))
3126 if (side_effects_p (op1))
3127 return simplify_gen_binary (AND, mode, op1, trueop0);
3128 return trueop0;
3130 /* x%1 is 0 (of x&0 if x has side-effects). */
3131 if (trueop1 == CONST1_RTX (mode))
3133 if (side_effects_p (op0))
3134 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3135 return CONST0_RTX (mode);
3137 /* Implement modulus by power of two as AND. */
3138 if (CONST_INT_P (trueop1)
3139 && exact_log2 (UINTVAL (trueop1)) > 0)
3140 return simplify_gen_binary (AND, mode, op0,
3141 gen_int_mode (INTVAL (op1) - 1, mode));
3142 break;
3144 case MOD:
3145 /* 0%x is 0 (or x&0 if x has side-effects). */
3146 if (trueop0 == CONST0_RTX (mode))
3148 if (side_effects_p (op1))
3149 return simplify_gen_binary (AND, mode, op1, trueop0);
3150 return trueop0;
3152 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3153 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3155 if (side_effects_p (op0))
3156 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3157 return CONST0_RTX (mode);
3159 break;
3161 case ROTATERT:
3162 case ROTATE:
3163 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3164 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3165 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3166 amount instead. */
3167 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3168 if (CONST_INT_P (trueop1)
3169 && IN_RANGE (INTVAL (trueop1),
3170 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3171 GET_MODE_PRECISION (mode) - 1))
3172 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3173 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3174 - INTVAL (trueop1)));
3175 #endif
3176 /* FALLTHRU */
3177 case ASHIFTRT:
3178 if (trueop1 == CONST0_RTX (mode))
3179 return op0;
3180 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3181 return op0;
3182 /* Rotating ~0 always results in ~0. */
3183 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3184 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3185 && ! side_effects_p (op1))
3186 return op0;
3187 /* Given:
3188 scalar modes M1, M2
3189 scalar constants c1, c2
3190 size (M2) > size (M1)
3191 c1 == size (M2) - size (M1)
3192 optimize:
3193 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3194 <low_part>)
3195 (const_int <c2>))
3197 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3198 <low_part>). */
3199 if (code == ASHIFTRT
3200 && !VECTOR_MODE_P (mode)
3201 && SUBREG_P (op0)
3202 && CONST_INT_P (op1)
3203 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3204 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3205 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3206 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3207 > GET_MODE_BITSIZE (mode))
3208 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3209 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3210 - GET_MODE_BITSIZE (mode)))
3211 && subreg_lowpart_p (op0))
3213 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3214 + INTVAL (op1));
3215 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3216 tmp = simplify_gen_binary (ASHIFTRT,
3217 GET_MODE (SUBREG_REG (op0)),
3218 XEXP (SUBREG_REG (op0), 0),
3219 tmp);
3220 return simplify_gen_subreg (mode, tmp, inner_mode,
3221 subreg_lowpart_offset (mode,
3222 inner_mode));
3224 canonicalize_shift:
3225 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3227 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3228 if (val != INTVAL (op1))
3229 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3231 break;
3233 case ASHIFT:
3234 case SS_ASHIFT:
3235 case US_ASHIFT:
3236 if (trueop1 == CONST0_RTX (mode))
3237 return op0;
3238 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3239 return op0;
3240 goto canonicalize_shift;
3242 case LSHIFTRT:
3243 if (trueop1 == CONST0_RTX (mode))
3244 return op0;
3245 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3246 return op0;
3247 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3248 if (GET_CODE (op0) == CLZ
3249 && CONST_INT_P (trueop1)
3250 && STORE_FLAG_VALUE == 1
3251 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3253 machine_mode imode = GET_MODE (XEXP (op0, 0));
3254 unsigned HOST_WIDE_INT zero_val = 0;
3256 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3257 && zero_val == GET_MODE_PRECISION (imode)
3258 && INTVAL (trueop1) == exact_log2 (zero_val))
3259 return simplify_gen_relational (EQ, mode, imode,
3260 XEXP (op0, 0), const0_rtx);
3262 goto canonicalize_shift;
3264 case SMIN:
3265 if (width <= HOST_BITS_PER_WIDE_INT
3266 && mode_signbit_p (mode, trueop1)
3267 && ! side_effects_p (op0))
3268 return op1;
3269 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3270 return op0;
3271 tem = simplify_associative_operation (code, mode, op0, op1);
3272 if (tem)
3273 return tem;
3274 break;
3276 case SMAX:
3277 if (width <= HOST_BITS_PER_WIDE_INT
3278 && CONST_INT_P (trueop1)
3279 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3280 && ! side_effects_p (op0))
3281 return op1;
3282 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3283 return op0;
3284 tem = simplify_associative_operation (code, mode, op0, op1);
3285 if (tem)
3286 return tem;
3287 break;
3289 case UMIN:
3290 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3291 return op1;
3292 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3293 return op0;
3294 tem = simplify_associative_operation (code, mode, op0, op1);
3295 if (tem)
3296 return tem;
3297 break;
3299 case UMAX:
3300 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3301 return op1;
3302 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3303 return op0;
3304 tem = simplify_associative_operation (code, mode, op0, op1);
3305 if (tem)
3306 return tem;
3307 break;
3309 case SS_PLUS:
3310 case US_PLUS:
3311 case SS_MINUS:
3312 case US_MINUS:
3313 case SS_MULT:
3314 case US_MULT:
3315 case SS_DIV:
3316 case US_DIV:
3317 /* ??? There are simplifications that can be done. */
3318 return 0;
3320 case VEC_SELECT:
3321 if (!VECTOR_MODE_P (mode))
3323 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3324 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3325 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3326 gcc_assert (XVECLEN (trueop1, 0) == 1);
3327 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3329 if (GET_CODE (trueop0) == CONST_VECTOR)
3330 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3331 (trueop1, 0, 0)));
3333 /* Extract a scalar element from a nested VEC_SELECT expression
3334 (with optional nested VEC_CONCAT expression). Some targets
3335 (i386) extract scalar element from a vector using chain of
3336 nested VEC_SELECT expressions. When input operand is a memory
3337 operand, this operation can be simplified to a simple scalar
3338 load from an offseted memory address. */
3339 if (GET_CODE (trueop0) == VEC_SELECT)
3341 rtx op0 = XEXP (trueop0, 0);
3342 rtx op1 = XEXP (trueop0, 1);
3344 machine_mode opmode = GET_MODE (op0);
3345 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3346 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3348 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3349 int elem;
3351 rtvec vec;
3352 rtx tmp_op, tmp;
3354 gcc_assert (GET_CODE (op1) == PARALLEL);
3355 gcc_assert (i < n_elts);
3357 /* Select element, pointed by nested selector. */
3358 elem = INTVAL (XVECEXP (op1, 0, i));
3360 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3361 if (GET_CODE (op0) == VEC_CONCAT)
3363 rtx op00 = XEXP (op0, 0);
3364 rtx op01 = XEXP (op0, 1);
3366 machine_mode mode00, mode01;
3367 int n_elts00, n_elts01;
3369 mode00 = GET_MODE (op00);
3370 mode01 = GET_MODE (op01);
3372 /* Find out number of elements of each operand. */
3373 if (VECTOR_MODE_P (mode00))
3375 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3376 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3378 else
3379 n_elts00 = 1;
3381 if (VECTOR_MODE_P (mode01))
3383 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3384 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3386 else
3387 n_elts01 = 1;
3389 gcc_assert (n_elts == n_elts00 + n_elts01);
3391 /* Select correct operand of VEC_CONCAT
3392 and adjust selector. */
3393 if (elem < n_elts01)
3394 tmp_op = op00;
3395 else
3397 tmp_op = op01;
3398 elem -= n_elts00;
3401 else
3402 tmp_op = op0;
3404 vec = rtvec_alloc (1);
3405 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3407 tmp = gen_rtx_fmt_ee (code, mode,
3408 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3409 return tmp;
3411 if (GET_CODE (trueop0) == VEC_DUPLICATE
3412 && GET_MODE (XEXP (trueop0, 0)) == mode)
3413 return XEXP (trueop0, 0);
3415 else
3417 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3418 gcc_assert (GET_MODE_INNER (mode)
3419 == GET_MODE_INNER (GET_MODE (trueop0)));
3420 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3422 if (GET_CODE (trueop0) == CONST_VECTOR)
3424 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3425 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3426 rtvec v = rtvec_alloc (n_elts);
3427 unsigned int i;
3429 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3430 for (i = 0; i < n_elts; i++)
3432 rtx x = XVECEXP (trueop1, 0, i);
3434 gcc_assert (CONST_INT_P (x));
3435 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3436 INTVAL (x));
3439 return gen_rtx_CONST_VECTOR (mode, v);
3442 /* Recognize the identity. */
3443 if (GET_MODE (trueop0) == mode)
3445 bool maybe_ident = true;
3446 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3448 rtx j = XVECEXP (trueop1, 0, i);
3449 if (!CONST_INT_P (j) || INTVAL (j) != i)
3451 maybe_ident = false;
3452 break;
3455 if (maybe_ident)
3456 return trueop0;
3459 /* If we build {a,b} then permute it, build the result directly. */
3460 if (XVECLEN (trueop1, 0) == 2
3461 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3462 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3463 && GET_CODE (trueop0) == VEC_CONCAT
3464 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3465 && GET_MODE (XEXP (trueop0, 0)) == mode
3466 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3467 && GET_MODE (XEXP (trueop0, 1)) == mode)
3469 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3470 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3471 rtx subop0, subop1;
3473 gcc_assert (i0 < 4 && i1 < 4);
3474 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3475 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3477 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3480 if (XVECLEN (trueop1, 0) == 2
3481 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3482 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3483 && GET_CODE (trueop0) == VEC_CONCAT
3484 && GET_MODE (trueop0) == mode)
3486 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3487 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3488 rtx subop0, subop1;
3490 gcc_assert (i0 < 2 && i1 < 2);
3491 subop0 = XEXP (trueop0, i0);
3492 subop1 = XEXP (trueop0, i1);
3494 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3497 /* If we select one half of a vec_concat, return that. */
3498 if (GET_CODE (trueop0) == VEC_CONCAT
3499 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3501 rtx subop0 = XEXP (trueop0, 0);
3502 rtx subop1 = XEXP (trueop0, 1);
3503 machine_mode mode0 = GET_MODE (subop0);
3504 machine_mode mode1 = GET_MODE (subop1);
3505 int li = GET_MODE_SIZE (GET_MODE_INNER (mode0));
3506 int l0 = GET_MODE_SIZE (mode0) / li;
3507 int l1 = GET_MODE_SIZE (mode1) / li;
3508 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3509 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3511 bool success = true;
3512 for (int i = 1; i < l0; ++i)
3514 rtx j = XVECEXP (trueop1, 0, i);
3515 if (!CONST_INT_P (j) || INTVAL (j) != i)
3517 success = false;
3518 break;
3521 if (success)
3522 return subop0;
3524 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3526 bool success = true;
3527 for (int i = 1; i < l1; ++i)
3529 rtx j = XVECEXP (trueop1, 0, i);
3530 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3532 success = false;
3533 break;
3536 if (success)
3537 return subop1;
3542 if (XVECLEN (trueop1, 0) == 1
3543 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3544 && GET_CODE (trueop0) == VEC_CONCAT)
3546 rtx vec = trueop0;
3547 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3549 /* Try to find the element in the VEC_CONCAT. */
3550 while (GET_MODE (vec) != mode
3551 && GET_CODE (vec) == VEC_CONCAT)
3553 HOST_WIDE_INT vec_size;
3555 if (CONST_INT_P (XEXP (vec, 0)))
3557 /* vec_concat of two const_ints doesn't make sense with
3558 respect to modes. */
3559 if (CONST_INT_P (XEXP (vec, 1)))
3560 return 0;
3562 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3563 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3565 else
3566 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3568 if (offset < vec_size)
3569 vec = XEXP (vec, 0);
3570 else
3572 offset -= vec_size;
3573 vec = XEXP (vec, 1);
3575 vec = avoid_constant_pool_reference (vec);
3578 if (GET_MODE (vec) == mode)
3579 return vec;
3582 /* If we select elements in a vec_merge that all come from the same
3583 operand, select from that operand directly. */
3584 if (GET_CODE (op0) == VEC_MERGE)
3586 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3587 if (CONST_INT_P (trueop02))
3589 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3590 bool all_operand0 = true;
3591 bool all_operand1 = true;
3592 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3594 rtx j = XVECEXP (trueop1, 0, i);
3595 if (sel & (1 << UINTVAL (j)))
3596 all_operand1 = false;
3597 else
3598 all_operand0 = false;
3600 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3601 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3602 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3603 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3607 /* If we have two nested selects that are inverses of each
3608 other, replace them with the source operand. */
3609 if (GET_CODE (trueop0) == VEC_SELECT
3610 && GET_MODE (XEXP (trueop0, 0)) == mode)
3612 rtx op0_subop1 = XEXP (trueop0, 1);
3613 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3614 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3616 /* Apply the outer ordering vector to the inner one. (The inner
3617 ordering vector is expressly permitted to be of a different
3618 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3619 then the two VEC_SELECTs cancel. */
3620 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3622 rtx x = XVECEXP (trueop1, 0, i);
3623 if (!CONST_INT_P (x))
3624 return 0;
3625 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3626 if (!CONST_INT_P (y) || i != INTVAL (y))
3627 return 0;
3629 return XEXP (trueop0, 0);
3632 return 0;
3633 case VEC_CONCAT:
3635 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3636 ? GET_MODE (trueop0)
3637 : GET_MODE_INNER (mode));
3638 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3639 ? GET_MODE (trueop1)
3640 : GET_MODE_INNER (mode));
3642 gcc_assert (VECTOR_MODE_P (mode));
3643 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3644 == GET_MODE_SIZE (mode));
3646 if (VECTOR_MODE_P (op0_mode))
3647 gcc_assert (GET_MODE_INNER (mode)
3648 == GET_MODE_INNER (op0_mode));
3649 else
3650 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3652 if (VECTOR_MODE_P (op1_mode))
3653 gcc_assert (GET_MODE_INNER (mode)
3654 == GET_MODE_INNER (op1_mode));
3655 else
3656 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3658 if ((GET_CODE (trueop0) == CONST_VECTOR
3659 || CONST_SCALAR_INT_P (trueop0)
3660 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3661 && (GET_CODE (trueop1) == CONST_VECTOR
3662 || CONST_SCALAR_INT_P (trueop1)
3663 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3665 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3666 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3667 rtvec v = rtvec_alloc (n_elts);
3668 unsigned int i;
3669 unsigned in_n_elts = 1;
3671 if (VECTOR_MODE_P (op0_mode))
3672 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3673 for (i = 0; i < n_elts; i++)
3675 if (i < in_n_elts)
3677 if (!VECTOR_MODE_P (op0_mode))
3678 RTVEC_ELT (v, i) = trueop0;
3679 else
3680 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3682 else
3684 if (!VECTOR_MODE_P (op1_mode))
3685 RTVEC_ELT (v, i) = trueop1;
3686 else
3687 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3688 i - in_n_elts);
3692 return gen_rtx_CONST_VECTOR (mode, v);
3695 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3696 Restrict the transformation to avoid generating a VEC_SELECT with a
3697 mode unrelated to its operand. */
3698 if (GET_CODE (trueop0) == VEC_SELECT
3699 && GET_CODE (trueop1) == VEC_SELECT
3700 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3701 && GET_MODE (XEXP (trueop0, 0)) == mode)
3703 rtx par0 = XEXP (trueop0, 1);
3704 rtx par1 = XEXP (trueop1, 1);
3705 int len0 = XVECLEN (par0, 0);
3706 int len1 = XVECLEN (par1, 0);
3707 rtvec vec = rtvec_alloc (len0 + len1);
3708 for (int i = 0; i < len0; i++)
3709 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3710 for (int i = 0; i < len1; i++)
3711 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3712 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3713 gen_rtx_PARALLEL (VOIDmode, vec));
3716 return 0;
3718 default:
3719 gcc_unreachable ();
3722 return 0;
3726 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3727 rtx op0, rtx op1)
3729 unsigned int width = GET_MODE_PRECISION (mode);
3731 if (VECTOR_MODE_P (mode)
3732 && code != VEC_CONCAT
3733 && GET_CODE (op0) == CONST_VECTOR
3734 && GET_CODE (op1) == CONST_VECTOR)
3736 unsigned n_elts = GET_MODE_NUNITS (mode);
3737 machine_mode op0mode = GET_MODE (op0);
3738 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3739 machine_mode op1mode = GET_MODE (op1);
3740 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3741 rtvec v = rtvec_alloc (n_elts);
3742 unsigned int i;
3744 gcc_assert (op0_n_elts == n_elts);
3745 gcc_assert (op1_n_elts == n_elts);
3746 for (i = 0; i < n_elts; i++)
3748 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3749 CONST_VECTOR_ELT (op0, i),
3750 CONST_VECTOR_ELT (op1, i));
3751 if (!x)
3752 return 0;
3753 RTVEC_ELT (v, i) = x;
3756 return gen_rtx_CONST_VECTOR (mode, v);
3759 if (VECTOR_MODE_P (mode)
3760 && code == VEC_CONCAT
3761 && (CONST_SCALAR_INT_P (op0)
3762 || GET_CODE (op0) == CONST_FIXED
3763 || CONST_DOUBLE_AS_FLOAT_P (op0))
3764 && (CONST_SCALAR_INT_P (op1)
3765 || CONST_DOUBLE_AS_FLOAT_P (op1)
3766 || GET_CODE (op1) == CONST_FIXED))
3768 unsigned n_elts = GET_MODE_NUNITS (mode);
3769 rtvec v = rtvec_alloc (n_elts);
3771 gcc_assert (n_elts >= 2);
3772 if (n_elts == 2)
3774 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3775 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3777 RTVEC_ELT (v, 0) = op0;
3778 RTVEC_ELT (v, 1) = op1;
3780 else
3782 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3783 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3784 unsigned i;
3786 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3787 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3788 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3790 for (i = 0; i < op0_n_elts; ++i)
3791 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3792 for (i = 0; i < op1_n_elts; ++i)
3793 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3796 return gen_rtx_CONST_VECTOR (mode, v);
3799 if (SCALAR_FLOAT_MODE_P (mode)
3800 && CONST_DOUBLE_AS_FLOAT_P (op0)
3801 && CONST_DOUBLE_AS_FLOAT_P (op1)
3802 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3804 if (code == AND
3805 || code == IOR
3806 || code == XOR)
3808 long tmp0[4];
3809 long tmp1[4];
3810 REAL_VALUE_TYPE r;
3811 int i;
3813 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3814 GET_MODE (op0));
3815 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3816 GET_MODE (op1));
3817 for (i = 0; i < 4; i++)
3819 switch (code)
3821 case AND:
3822 tmp0[i] &= tmp1[i];
3823 break;
3824 case IOR:
3825 tmp0[i] |= tmp1[i];
3826 break;
3827 case XOR:
3828 tmp0[i] ^= tmp1[i];
3829 break;
3830 default:
3831 gcc_unreachable ();
3834 real_from_target (&r, tmp0, mode);
3835 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3837 else
3839 REAL_VALUE_TYPE f0, f1, value, result;
3840 bool inexact;
3842 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3843 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3844 real_convert (&f0, mode, &f0);
3845 real_convert (&f1, mode, &f1);
3847 if (HONOR_SNANS (mode)
3848 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3849 return 0;
3851 if (code == DIV
3852 && REAL_VALUES_EQUAL (f1, dconst0)
3853 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3854 return 0;
3856 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3857 && flag_trapping_math
3858 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3860 int s0 = REAL_VALUE_NEGATIVE (f0);
3861 int s1 = REAL_VALUE_NEGATIVE (f1);
3863 switch (code)
3865 case PLUS:
3866 /* Inf + -Inf = NaN plus exception. */
3867 if (s0 != s1)
3868 return 0;
3869 break;
3870 case MINUS:
3871 /* Inf - Inf = NaN plus exception. */
3872 if (s0 == s1)
3873 return 0;
3874 break;
3875 case DIV:
3876 /* Inf / Inf = NaN plus exception. */
3877 return 0;
3878 default:
3879 break;
3883 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3884 && flag_trapping_math
3885 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3886 || (REAL_VALUE_ISINF (f1)
3887 && REAL_VALUES_EQUAL (f0, dconst0))))
3888 /* Inf * 0 = NaN plus exception. */
3889 return 0;
3891 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3892 &f0, &f1);
3893 real_convert (&result, mode, &value);
3895 /* Don't constant fold this floating point operation if
3896 the result has overflowed and flag_trapping_math. */
3898 if (flag_trapping_math
3899 && MODE_HAS_INFINITIES (mode)
3900 && REAL_VALUE_ISINF (result)
3901 && !REAL_VALUE_ISINF (f0)
3902 && !REAL_VALUE_ISINF (f1))
3903 /* Overflow plus exception. */
3904 return 0;
3906 /* Don't constant fold this floating point operation if the
3907 result may dependent upon the run-time rounding mode and
3908 flag_rounding_math is set, or if GCC's software emulation
3909 is unable to accurately represent the result. */
3911 if ((flag_rounding_math
3912 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3913 && (inexact || !real_identical (&result, &value)))
3914 return NULL_RTX;
3916 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3920 /* We can fold some multi-word operations. */
3921 if ((GET_MODE_CLASS (mode) == MODE_INT
3922 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3923 && CONST_SCALAR_INT_P (op0)
3924 && CONST_SCALAR_INT_P (op1))
3926 wide_int result;
3927 bool overflow;
3928 rtx_mode_t pop0 = std::make_pair (op0, mode);
3929 rtx_mode_t pop1 = std::make_pair (op1, mode);
3931 #if TARGET_SUPPORTS_WIDE_INT == 0
3932 /* This assert keeps the simplification from producing a result
3933 that cannot be represented in a CONST_DOUBLE but a lot of
3934 upstream callers expect that this function never fails to
3935 simplify something and so you if you added this to the test
3936 above the code would die later anyway. If this assert
3937 happens, you just need to make the port support wide int. */
3938 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3939 #endif
3940 switch (code)
3942 case MINUS:
3943 result = wi::sub (pop0, pop1);
3944 break;
3946 case PLUS:
3947 result = wi::add (pop0, pop1);
3948 break;
3950 case MULT:
3951 result = wi::mul (pop0, pop1);
3952 break;
3954 case DIV:
3955 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3956 if (overflow)
3957 return NULL_RTX;
3958 break;
3960 case MOD:
3961 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3962 if (overflow)
3963 return NULL_RTX;
3964 break;
3966 case UDIV:
3967 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3968 if (overflow)
3969 return NULL_RTX;
3970 break;
3972 case UMOD:
3973 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3974 if (overflow)
3975 return NULL_RTX;
3976 break;
3978 case AND:
3979 result = wi::bit_and (pop0, pop1);
3980 break;
3982 case IOR:
3983 result = wi::bit_or (pop0, pop1);
3984 break;
3986 case XOR:
3987 result = wi::bit_xor (pop0, pop1);
3988 break;
3990 case SMIN:
3991 result = wi::smin (pop0, pop1);
3992 break;
3994 case SMAX:
3995 result = wi::smax (pop0, pop1);
3996 break;
3998 case UMIN:
3999 result = wi::umin (pop0, pop1);
4000 break;
4002 case UMAX:
4003 result = wi::umax (pop0, pop1);
4004 break;
4006 case LSHIFTRT:
4007 case ASHIFTRT:
4008 case ASHIFT:
4010 wide_int wop1 = pop1;
4011 if (SHIFT_COUNT_TRUNCATED)
4012 wop1 = wi::umod_trunc (wop1, width);
4013 else if (wi::geu_p (wop1, width))
4014 return NULL_RTX;
4016 switch (code)
4018 case LSHIFTRT:
4019 result = wi::lrshift (pop0, wop1);
4020 break;
4022 case ASHIFTRT:
4023 result = wi::arshift (pop0, wop1);
4024 break;
4026 case ASHIFT:
4027 result = wi::lshift (pop0, wop1);
4028 break;
4030 default:
4031 gcc_unreachable ();
4033 break;
4035 case ROTATE:
4036 case ROTATERT:
4038 if (wi::neg_p (pop1))
4039 return NULL_RTX;
4041 switch (code)
4043 case ROTATE:
4044 result = wi::lrotate (pop0, pop1);
4045 break;
4047 case ROTATERT:
4048 result = wi::rrotate (pop0, pop1);
4049 break;
4051 default:
4052 gcc_unreachable ();
4054 break;
4056 default:
4057 return NULL_RTX;
4059 return immed_wide_int_const (result, mode);
4062 return NULL_RTX;
4067 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4068 PLUS or MINUS.
4070 Rather than test for specific case, we do this by a brute-force method
4071 and do all possible simplifications until no more changes occur. Then
4072 we rebuild the operation. */
4074 struct simplify_plus_minus_op_data
4076 rtx op;
4077 short neg;
4080 static bool
4081 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4083 int result;
4085 result = (commutative_operand_precedence (y)
4086 - commutative_operand_precedence (x));
4087 if (result)
4088 return result > 0;
4090 /* Group together equal REGs to do more simplification. */
4091 if (REG_P (x) && REG_P (y))
4092 return REGNO (x) > REGNO (y);
4093 else
4094 return false;
4097 static rtx
4098 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4099 rtx op1)
4101 struct simplify_plus_minus_op_data ops[16];
4102 rtx result, tem;
4103 int n_ops = 2;
4104 int changed, n_constants, canonicalized = 0;
4105 int i, j;
4107 memset (ops, 0, sizeof ops);
4109 /* Set up the two operands and then expand them until nothing has been
4110 changed. If we run out of room in our array, give up; this should
4111 almost never happen. */
4113 ops[0].op = op0;
4114 ops[0].neg = 0;
4115 ops[1].op = op1;
4116 ops[1].neg = (code == MINUS);
4120 changed = 0;
4121 n_constants = 0;
4123 for (i = 0; i < n_ops; i++)
4125 rtx this_op = ops[i].op;
4126 int this_neg = ops[i].neg;
4127 enum rtx_code this_code = GET_CODE (this_op);
4129 switch (this_code)
4131 case PLUS:
4132 case MINUS:
4133 if (n_ops == ARRAY_SIZE (ops))
4134 return NULL_RTX;
4136 ops[n_ops].op = XEXP (this_op, 1);
4137 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4138 n_ops++;
4140 ops[i].op = XEXP (this_op, 0);
4141 changed = 1;
4142 canonicalized |= this_neg || i != n_ops - 2;
4143 break;
4145 case NEG:
4146 ops[i].op = XEXP (this_op, 0);
4147 ops[i].neg = ! this_neg;
4148 changed = 1;
4149 canonicalized = 1;
4150 break;
4152 case CONST:
4153 if (n_ops != ARRAY_SIZE (ops)
4154 && GET_CODE (XEXP (this_op, 0)) == PLUS
4155 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4156 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4158 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4159 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4160 ops[n_ops].neg = this_neg;
4161 n_ops++;
4162 changed = 1;
4163 canonicalized = 1;
4165 break;
4167 case NOT:
4168 /* ~a -> (-a - 1) */
4169 if (n_ops != ARRAY_SIZE (ops))
4171 ops[n_ops].op = CONSTM1_RTX (mode);
4172 ops[n_ops++].neg = this_neg;
4173 ops[i].op = XEXP (this_op, 0);
4174 ops[i].neg = !this_neg;
4175 changed = 1;
4176 canonicalized = 1;
4178 break;
4180 case CONST_INT:
4181 n_constants++;
4182 if (this_neg)
4184 ops[i].op = neg_const_int (mode, this_op);
4185 ops[i].neg = 0;
4186 changed = 1;
4187 canonicalized = 1;
4189 break;
4191 default:
4192 break;
4196 while (changed);
4198 if (n_constants > 1)
4199 canonicalized = 1;
4201 gcc_assert (n_ops >= 2);
4203 /* If we only have two operands, we can avoid the loops. */
4204 if (n_ops == 2)
4206 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4207 rtx lhs, rhs;
4209 /* Get the two operands. Be careful with the order, especially for
4210 the cases where code == MINUS. */
4211 if (ops[0].neg && ops[1].neg)
4213 lhs = gen_rtx_NEG (mode, ops[0].op);
4214 rhs = ops[1].op;
4216 else if (ops[0].neg)
4218 lhs = ops[1].op;
4219 rhs = ops[0].op;
4221 else
4223 lhs = ops[0].op;
4224 rhs = ops[1].op;
4227 return simplify_const_binary_operation (code, mode, lhs, rhs);
4230 /* Now simplify each pair of operands until nothing changes. */
4233 /* Insertion sort is good enough for a small array. */
4234 for (i = 1; i < n_ops; i++)
4236 struct simplify_plus_minus_op_data save;
4237 j = i - 1;
4238 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4239 continue;
4241 canonicalized = 1;
4242 save = ops[i];
4244 ops[j + 1] = ops[j];
4245 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4246 ops[j + 1] = save;
4249 changed = 0;
4250 for (i = n_ops - 1; i > 0; i--)
4251 for (j = i - 1; j >= 0; j--)
4253 rtx lhs = ops[j].op, rhs = ops[i].op;
4254 int lneg = ops[j].neg, rneg = ops[i].neg;
4256 if (lhs != 0 && rhs != 0)
4258 enum rtx_code ncode = PLUS;
4260 if (lneg != rneg)
4262 ncode = MINUS;
4263 if (lneg)
4264 std::swap (lhs, rhs);
4266 else if (swap_commutative_operands_p (lhs, rhs))
4267 std::swap (lhs, rhs);
4269 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4270 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4272 rtx tem_lhs, tem_rhs;
4274 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4275 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4276 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4278 if (tem && !CONSTANT_P (tem))
4279 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4281 else
4282 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4284 if (tem)
4286 /* Reject "simplifications" that just wrap the two
4287 arguments in a CONST. Failure to do so can result
4288 in infinite recursion with simplify_binary_operation
4289 when it calls us to simplify CONST operations.
4290 Also, if we find such a simplification, don't try
4291 any more combinations with this rhs: We must have
4292 something like symbol+offset, ie. one of the
4293 trivial CONST expressions we handle later. */
4294 if (GET_CODE (tem) == CONST
4295 && GET_CODE (XEXP (tem, 0)) == ncode
4296 && XEXP (XEXP (tem, 0), 0) == lhs
4297 && XEXP (XEXP (tem, 0), 1) == rhs)
4298 break;
4299 lneg &= rneg;
4300 if (GET_CODE (tem) == NEG)
4301 tem = XEXP (tem, 0), lneg = !lneg;
4302 if (CONST_INT_P (tem) && lneg)
4303 tem = neg_const_int (mode, tem), lneg = 0;
4305 ops[i].op = tem;
4306 ops[i].neg = lneg;
4307 ops[j].op = NULL_RTX;
4308 changed = 1;
4309 canonicalized = 1;
4314 /* If nothing changed, fail. */
4315 if (!canonicalized)
4316 return NULL_RTX;
4318 /* Pack all the operands to the lower-numbered entries. */
4319 for (i = 0, j = 0; j < n_ops; j++)
4320 if (ops[j].op)
4322 ops[i] = ops[j];
4323 i++;
4325 n_ops = i;
4327 while (changed);
4329 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4330 if (n_ops == 2
4331 && CONST_INT_P (ops[1].op)
4332 && CONSTANT_P (ops[0].op)
4333 && ops[0].neg)
4334 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4336 /* We suppressed creation of trivial CONST expressions in the
4337 combination loop to avoid recursion. Create one manually now.
4338 The combination loop should have ensured that there is exactly
4339 one CONST_INT, and the sort will have ensured that it is last
4340 in the array and that any other constant will be next-to-last. */
4342 if (n_ops > 1
4343 && CONST_INT_P (ops[n_ops - 1].op)
4344 && CONSTANT_P (ops[n_ops - 2].op))
4346 rtx value = ops[n_ops - 1].op;
4347 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4348 value = neg_const_int (mode, value);
4349 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4350 INTVAL (value));
4351 n_ops--;
4354 /* Put a non-negated operand first, if possible. */
4356 for (i = 0; i < n_ops && ops[i].neg; i++)
4357 continue;
4358 if (i == n_ops)
4359 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4360 else if (i != 0)
4362 tem = ops[0].op;
4363 ops[0] = ops[i];
4364 ops[i].op = tem;
4365 ops[i].neg = 1;
4368 /* Now make the result by performing the requested operations. */
4369 result = ops[0].op;
4370 for (i = 1; i < n_ops; i++)
4371 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4372 mode, result, ops[i].op);
4374 return result;
4377 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4378 static bool
4379 plus_minus_operand_p (const_rtx x)
4381 return GET_CODE (x) == PLUS
4382 || GET_CODE (x) == MINUS
4383 || (GET_CODE (x) == CONST
4384 && GET_CODE (XEXP (x, 0)) == PLUS
4385 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4386 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4389 /* Like simplify_binary_operation except used for relational operators.
4390 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4391 not also be VOIDmode.
4393 CMP_MODE specifies in which mode the comparison is done in, so it is
4394 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4395 the operands or, if both are VOIDmode, the operands are compared in
4396 "infinite precision". */
4398 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4399 machine_mode cmp_mode, rtx op0, rtx op1)
4401 rtx tem, trueop0, trueop1;
4403 if (cmp_mode == VOIDmode)
4404 cmp_mode = GET_MODE (op0);
4405 if (cmp_mode == VOIDmode)
4406 cmp_mode = GET_MODE (op1);
4408 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4409 if (tem)
4411 if (SCALAR_FLOAT_MODE_P (mode))
4413 if (tem == const0_rtx)
4414 return CONST0_RTX (mode);
4415 #ifdef FLOAT_STORE_FLAG_VALUE
4417 REAL_VALUE_TYPE val;
4418 val = FLOAT_STORE_FLAG_VALUE (mode);
4419 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4421 #else
4422 return NULL_RTX;
4423 #endif
4425 if (VECTOR_MODE_P (mode))
4427 if (tem == const0_rtx)
4428 return CONST0_RTX (mode);
4429 #ifdef VECTOR_STORE_FLAG_VALUE
4431 int i, units;
4432 rtvec v;
4434 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4435 if (val == NULL_RTX)
4436 return NULL_RTX;
4437 if (val == const1_rtx)
4438 return CONST1_RTX (mode);
4440 units = GET_MODE_NUNITS (mode);
4441 v = rtvec_alloc (units);
4442 for (i = 0; i < units; i++)
4443 RTVEC_ELT (v, i) = val;
4444 return gen_rtx_raw_CONST_VECTOR (mode, v);
4446 #else
4447 return NULL_RTX;
4448 #endif
4451 return tem;
4454 /* For the following tests, ensure const0_rtx is op1. */
4455 if (swap_commutative_operands_p (op0, op1)
4456 || (op0 == const0_rtx && op1 != const0_rtx))
4457 std::swap (op0, op1), code = swap_condition (code);
4459 /* If op0 is a compare, extract the comparison arguments from it. */
4460 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4461 return simplify_gen_relational (code, mode, VOIDmode,
4462 XEXP (op0, 0), XEXP (op0, 1));
4464 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4465 || CC0_P (op0))
4466 return NULL_RTX;
4468 trueop0 = avoid_constant_pool_reference (op0);
4469 trueop1 = avoid_constant_pool_reference (op1);
4470 return simplify_relational_operation_1 (code, mode, cmp_mode,
4471 trueop0, trueop1);
4474 /* This part of simplify_relational_operation is only used when CMP_MODE
4475 is not in class MODE_CC (i.e. it is a real comparison).
4477 MODE is the mode of the result, while CMP_MODE specifies in which
4478 mode the comparison is done in, so it is the mode of the operands. */
4480 static rtx
4481 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4482 machine_mode cmp_mode, rtx op0, rtx op1)
4484 enum rtx_code op0code = GET_CODE (op0);
4486 if (op1 == const0_rtx && COMPARISON_P (op0))
4488 /* If op0 is a comparison, extract the comparison arguments
4489 from it. */
4490 if (code == NE)
4492 if (GET_MODE (op0) == mode)
4493 return simplify_rtx (op0);
4494 else
4495 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4496 XEXP (op0, 0), XEXP (op0, 1));
4498 else if (code == EQ)
4500 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4501 if (new_code != UNKNOWN)
4502 return simplify_gen_relational (new_code, mode, VOIDmode,
4503 XEXP (op0, 0), XEXP (op0, 1));
4507 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4508 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4509 if ((code == LTU || code == GEU)
4510 && GET_CODE (op0) == PLUS
4511 && CONST_INT_P (XEXP (op0, 1))
4512 && (rtx_equal_p (op1, XEXP (op0, 0))
4513 || rtx_equal_p (op1, XEXP (op0, 1)))
4514 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4515 && XEXP (op0, 1) != const0_rtx)
4517 rtx new_cmp
4518 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4519 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4520 cmp_mode, XEXP (op0, 0), new_cmp);
4523 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4524 if ((code == LTU || code == GEU)
4525 && GET_CODE (op0) == PLUS
4526 && rtx_equal_p (op1, XEXP (op0, 1))
4527 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4528 && !rtx_equal_p (op1, XEXP (op0, 0)))
4529 return simplify_gen_relational (code, mode, cmp_mode, op0,
4530 copy_rtx (XEXP (op0, 0)));
4532 if (op1 == const0_rtx)
4534 /* Canonicalize (GTU x 0) as (NE x 0). */
4535 if (code == GTU)
4536 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4537 /* Canonicalize (LEU x 0) as (EQ x 0). */
4538 if (code == LEU)
4539 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4541 else if (op1 == const1_rtx)
4543 switch (code)
4545 case GE:
4546 /* Canonicalize (GE x 1) as (GT x 0). */
4547 return simplify_gen_relational (GT, mode, cmp_mode,
4548 op0, const0_rtx);
4549 case GEU:
4550 /* Canonicalize (GEU x 1) as (NE x 0). */
4551 return simplify_gen_relational (NE, mode, cmp_mode,
4552 op0, const0_rtx);
4553 case LT:
4554 /* Canonicalize (LT x 1) as (LE x 0). */
4555 return simplify_gen_relational (LE, mode, cmp_mode,
4556 op0, const0_rtx);
4557 case LTU:
4558 /* Canonicalize (LTU x 1) as (EQ x 0). */
4559 return simplify_gen_relational (EQ, mode, cmp_mode,
4560 op0, const0_rtx);
4561 default:
4562 break;
4565 else if (op1 == constm1_rtx)
4567 /* Canonicalize (LE x -1) as (LT x 0). */
4568 if (code == LE)
4569 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4570 /* Canonicalize (GT x -1) as (GE x 0). */
4571 if (code == GT)
4572 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4575 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4576 if ((code == EQ || code == NE)
4577 && (op0code == PLUS || op0code == MINUS)
4578 && CONSTANT_P (op1)
4579 && CONSTANT_P (XEXP (op0, 1))
4580 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4582 rtx x = XEXP (op0, 0);
4583 rtx c = XEXP (op0, 1);
4584 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4585 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4587 /* Detect an infinite recursive condition, where we oscillate at this
4588 simplification case between:
4589 A + B == C <---> C - B == A,
4590 where A, B, and C are all constants with non-simplifiable expressions,
4591 usually SYMBOL_REFs. */
4592 if (GET_CODE (tem) == invcode
4593 && CONSTANT_P (x)
4594 && rtx_equal_p (c, XEXP (tem, 1)))
4595 return NULL_RTX;
4597 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4600 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4601 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4602 if (code == NE
4603 && op1 == const0_rtx
4604 && GET_MODE_CLASS (mode) == MODE_INT
4605 && cmp_mode != VOIDmode
4606 /* ??? Work-around BImode bugs in the ia64 backend. */
4607 && mode != BImode
4608 && cmp_mode != BImode
4609 && nonzero_bits (op0, cmp_mode) == 1
4610 && STORE_FLAG_VALUE == 1)
4611 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4612 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4613 : lowpart_subreg (mode, op0, cmp_mode);
4615 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4616 if ((code == EQ || code == NE)
4617 && op1 == const0_rtx
4618 && op0code == XOR)
4619 return simplify_gen_relational (code, mode, cmp_mode,
4620 XEXP (op0, 0), XEXP (op0, 1));
4622 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4623 if ((code == EQ || code == NE)
4624 && op0code == XOR
4625 && rtx_equal_p (XEXP (op0, 0), op1)
4626 && !side_effects_p (XEXP (op0, 0)))
4627 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4628 CONST0_RTX (mode));
4630 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4631 if ((code == EQ || code == NE)
4632 && op0code == XOR
4633 && rtx_equal_p (XEXP (op0, 1), op1)
4634 && !side_effects_p (XEXP (op0, 1)))
4635 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4636 CONST0_RTX (mode));
4638 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4639 if ((code == EQ || code == NE)
4640 && op0code == XOR
4641 && CONST_SCALAR_INT_P (op1)
4642 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4643 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4644 simplify_gen_binary (XOR, cmp_mode,
4645 XEXP (op0, 1), op1));
4647 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4648 can be implemented with a BICS instruction on some targets, or
4649 constant-folded if y is a constant. */
4650 if ((code == EQ || code == NE)
4651 && op0code == AND
4652 && rtx_equal_p (XEXP (op0, 0), op1)
4653 && !side_effects_p (op1)
4654 && op1 != CONST0_RTX (cmp_mode))
4656 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4657 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4659 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4660 CONST0_RTX (cmp_mode));
4663 /* Likewise for (eq/ne (and x y) y). */
4664 if ((code == EQ || code == NE)
4665 && op0code == AND
4666 && rtx_equal_p (XEXP (op0, 1), op1)
4667 && !side_effects_p (op1)
4668 && op1 != CONST0_RTX (cmp_mode))
4670 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4671 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4673 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4674 CONST0_RTX (cmp_mode));
4677 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4678 if ((code == EQ || code == NE)
4679 && GET_CODE (op0) == BSWAP
4680 && CONST_SCALAR_INT_P (op1))
4681 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4682 simplify_gen_unary (BSWAP, cmp_mode,
4683 op1, cmp_mode));
4685 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4686 if ((code == EQ || code == NE)
4687 && GET_CODE (op0) == BSWAP
4688 && GET_CODE (op1) == BSWAP)
4689 return simplify_gen_relational (code, mode, cmp_mode,
4690 XEXP (op0, 0), XEXP (op1, 0));
4692 if (op0code == POPCOUNT && op1 == const0_rtx)
4693 switch (code)
4695 case EQ:
4696 case LE:
4697 case LEU:
4698 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4699 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4700 XEXP (op0, 0), const0_rtx);
4702 case NE:
4703 case GT:
4704 case GTU:
4705 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4706 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4707 XEXP (op0, 0), const0_rtx);
4709 default:
4710 break;
4713 return NULL_RTX;
4716 enum
4718 CMP_EQ = 1,
4719 CMP_LT = 2,
4720 CMP_GT = 4,
4721 CMP_LTU = 8,
4722 CMP_GTU = 16
4726 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4727 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4728 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4729 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4730 For floating-point comparisons, assume that the operands were ordered. */
4732 static rtx
4733 comparison_result (enum rtx_code code, int known_results)
4735 switch (code)
4737 case EQ:
4738 case UNEQ:
4739 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4740 case NE:
4741 case LTGT:
4742 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4744 case LT:
4745 case UNLT:
4746 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4747 case GE:
4748 case UNGE:
4749 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4751 case GT:
4752 case UNGT:
4753 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4754 case LE:
4755 case UNLE:
4756 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4758 case LTU:
4759 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4760 case GEU:
4761 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4763 case GTU:
4764 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4765 case LEU:
4766 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4768 case ORDERED:
4769 return const_true_rtx;
4770 case UNORDERED:
4771 return const0_rtx;
4772 default:
4773 gcc_unreachable ();
4777 /* Check if the given comparison (done in the given MODE) is actually
4778 a tautology or a contradiction. If the mode is VOID_mode, the
4779 comparison is done in "infinite precision". If no simplification
4780 is possible, this function returns zero. Otherwise, it returns
4781 either const_true_rtx or const0_rtx. */
4784 simplify_const_relational_operation (enum rtx_code code,
4785 machine_mode mode,
4786 rtx op0, rtx op1)
4788 rtx tem;
4789 rtx trueop0;
4790 rtx trueop1;
4792 gcc_assert (mode != VOIDmode
4793 || (GET_MODE (op0) == VOIDmode
4794 && GET_MODE (op1) == VOIDmode));
4796 /* If op0 is a compare, extract the comparison arguments from it. */
4797 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4799 op1 = XEXP (op0, 1);
4800 op0 = XEXP (op0, 0);
4802 if (GET_MODE (op0) != VOIDmode)
4803 mode = GET_MODE (op0);
4804 else if (GET_MODE (op1) != VOIDmode)
4805 mode = GET_MODE (op1);
4806 else
4807 return 0;
4810 /* We can't simplify MODE_CC values since we don't know what the
4811 actual comparison is. */
4812 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4813 return 0;
4815 /* Make sure the constant is second. */
4816 if (swap_commutative_operands_p (op0, op1))
4818 std::swap (op0, op1);
4819 code = swap_condition (code);
4822 trueop0 = avoid_constant_pool_reference (op0);
4823 trueop1 = avoid_constant_pool_reference (op1);
4825 /* For integer comparisons of A and B maybe we can simplify A - B and can
4826 then simplify a comparison of that with zero. If A and B are both either
4827 a register or a CONST_INT, this can't help; testing for these cases will
4828 prevent infinite recursion here and speed things up.
4830 We can only do this for EQ and NE comparisons as otherwise we may
4831 lose or introduce overflow which we cannot disregard as undefined as
4832 we do not know the signedness of the operation on either the left or
4833 the right hand side of the comparison. */
4835 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4836 && (code == EQ || code == NE)
4837 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4838 && (REG_P (op1) || CONST_INT_P (trueop1)))
4839 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4840 /* We cannot do this if tem is a nonzero address. */
4841 && ! nonzero_address_p (tem))
4842 return simplify_const_relational_operation (signed_condition (code),
4843 mode, tem, const0_rtx);
4845 if (! HONOR_NANS (mode) && code == ORDERED)
4846 return const_true_rtx;
4848 if (! HONOR_NANS (mode) && code == UNORDERED)
4849 return const0_rtx;
4851 /* For modes without NaNs, if the two operands are equal, we know the
4852 result except if they have side-effects. Even with NaNs we know
4853 the result of unordered comparisons and, if signaling NaNs are
4854 irrelevant, also the result of LT/GT/LTGT. */
4855 if ((! HONOR_NANS (trueop0)
4856 || code == UNEQ || code == UNLE || code == UNGE
4857 || ((code == LT || code == GT || code == LTGT)
4858 && ! HONOR_SNANS (trueop0)))
4859 && rtx_equal_p (trueop0, trueop1)
4860 && ! side_effects_p (trueop0))
4861 return comparison_result (code, CMP_EQ);
4863 /* If the operands are floating-point constants, see if we can fold
4864 the result. */
4865 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4866 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4867 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4869 REAL_VALUE_TYPE d0, d1;
4871 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4872 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4874 /* Comparisons are unordered iff at least one of the values is NaN. */
4875 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4876 switch (code)
4878 case UNEQ:
4879 case UNLT:
4880 case UNGT:
4881 case UNLE:
4882 case UNGE:
4883 case NE:
4884 case UNORDERED:
4885 return const_true_rtx;
4886 case EQ:
4887 case LT:
4888 case GT:
4889 case LE:
4890 case GE:
4891 case LTGT:
4892 case ORDERED:
4893 return const0_rtx;
4894 default:
4895 return 0;
4898 return comparison_result (code,
4899 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4900 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4903 /* Otherwise, see if the operands are both integers. */
4904 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4905 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4907 /* It would be nice if we really had a mode here. However, the
4908 largest int representable on the target is as good as
4909 infinite. */
4910 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4911 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4912 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4914 if (wi::eq_p (ptrueop0, ptrueop1))
4915 return comparison_result (code, CMP_EQ);
4916 else
4918 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4919 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4920 return comparison_result (code, cr);
4924 /* Optimize comparisons with upper and lower bounds. */
4925 if (HWI_COMPUTABLE_MODE_P (mode)
4926 && CONST_INT_P (trueop1))
4928 int sign;
4929 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4930 HOST_WIDE_INT val = INTVAL (trueop1);
4931 HOST_WIDE_INT mmin, mmax;
4933 if (code == GEU
4934 || code == LEU
4935 || code == GTU
4936 || code == LTU)
4937 sign = 0;
4938 else
4939 sign = 1;
4941 /* Get a reduced range if the sign bit is zero. */
4942 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4944 mmin = 0;
4945 mmax = nonzero;
4947 else
4949 rtx mmin_rtx, mmax_rtx;
4950 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4952 mmin = INTVAL (mmin_rtx);
4953 mmax = INTVAL (mmax_rtx);
4954 if (sign)
4956 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4958 mmin >>= (sign_copies - 1);
4959 mmax >>= (sign_copies - 1);
4963 switch (code)
4965 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4966 case GEU:
4967 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4968 return const_true_rtx;
4969 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4970 return const0_rtx;
4971 break;
4972 case GE:
4973 if (val <= mmin)
4974 return const_true_rtx;
4975 if (val > mmax)
4976 return const0_rtx;
4977 break;
4979 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4980 case LEU:
4981 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4982 return const_true_rtx;
4983 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4984 return const0_rtx;
4985 break;
4986 case LE:
4987 if (val >= mmax)
4988 return const_true_rtx;
4989 if (val < mmin)
4990 return const0_rtx;
4991 break;
4993 case EQ:
4994 /* x == y is always false for y out of range. */
4995 if (val < mmin || val > mmax)
4996 return const0_rtx;
4997 break;
4999 /* x > y is always false for y >= mmax, always true for y < mmin. */
5000 case GTU:
5001 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5002 return const0_rtx;
5003 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5004 return const_true_rtx;
5005 break;
5006 case GT:
5007 if (val >= mmax)
5008 return const0_rtx;
5009 if (val < mmin)
5010 return const_true_rtx;
5011 break;
5013 /* x < y is always false for y <= mmin, always true for y > mmax. */
5014 case LTU:
5015 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5016 return const0_rtx;
5017 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5018 return const_true_rtx;
5019 break;
5020 case LT:
5021 if (val <= mmin)
5022 return const0_rtx;
5023 if (val > mmax)
5024 return const_true_rtx;
5025 break;
5027 case NE:
5028 /* x != y is always true for y out of range. */
5029 if (val < mmin || val > mmax)
5030 return const_true_rtx;
5031 break;
5033 default:
5034 break;
5038 /* Optimize integer comparisons with zero. */
5039 if (trueop1 == const0_rtx)
5041 /* Some addresses are known to be nonzero. We don't know
5042 their sign, but equality comparisons are known. */
5043 if (nonzero_address_p (trueop0))
5045 if (code == EQ || code == LEU)
5046 return const0_rtx;
5047 if (code == NE || code == GTU)
5048 return const_true_rtx;
5051 /* See if the first operand is an IOR with a constant. If so, we
5052 may be able to determine the result of this comparison. */
5053 if (GET_CODE (op0) == IOR)
5055 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5056 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5058 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5059 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5060 && (UINTVAL (inner_const)
5061 & ((unsigned HOST_WIDE_INT) 1
5062 << sign_bitnum)));
5064 switch (code)
5066 case EQ:
5067 case LEU:
5068 return const0_rtx;
5069 case NE:
5070 case GTU:
5071 return const_true_rtx;
5072 case LT:
5073 case LE:
5074 if (has_sign)
5075 return const_true_rtx;
5076 break;
5077 case GT:
5078 case GE:
5079 if (has_sign)
5080 return const0_rtx;
5081 break;
5082 default:
5083 break;
5089 /* Optimize comparison of ABS with zero. */
5090 if (trueop1 == CONST0_RTX (mode)
5091 && (GET_CODE (trueop0) == ABS
5092 || (GET_CODE (trueop0) == FLOAT_EXTEND
5093 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5095 switch (code)
5097 case LT:
5098 /* Optimize abs(x) < 0.0. */
5099 if (!HONOR_SNANS (mode)
5100 && (!INTEGRAL_MODE_P (mode)
5101 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5103 if (INTEGRAL_MODE_P (mode)
5104 && (issue_strict_overflow_warning
5105 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5106 warning (OPT_Wstrict_overflow,
5107 ("assuming signed overflow does not occur when "
5108 "assuming abs (x) < 0 is false"));
5109 return const0_rtx;
5111 break;
5113 case GE:
5114 /* Optimize abs(x) >= 0.0. */
5115 if (!HONOR_NANS (mode)
5116 && (!INTEGRAL_MODE_P (mode)
5117 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5119 if (INTEGRAL_MODE_P (mode)
5120 && (issue_strict_overflow_warning
5121 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5122 warning (OPT_Wstrict_overflow,
5123 ("assuming signed overflow does not occur when "
5124 "assuming abs (x) >= 0 is true"));
5125 return const_true_rtx;
5127 break;
5129 case UNGE:
5130 /* Optimize ! (abs(x) < 0.0). */
5131 return const_true_rtx;
5133 default:
5134 break;
5138 return 0;
5141 /* Simplify CODE, an operation with result mode MODE and three operands,
5142 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5143 a constant. Return 0 if no simplifications is possible. */
5146 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5147 machine_mode op0_mode, rtx op0, rtx op1,
5148 rtx op2)
5150 unsigned int width = GET_MODE_PRECISION (mode);
5151 bool any_change = false;
5152 rtx tem, trueop2;
5154 /* VOIDmode means "infinite" precision. */
5155 if (width == 0)
5156 width = HOST_BITS_PER_WIDE_INT;
5158 switch (code)
5160 case FMA:
5161 /* Simplify negations around the multiplication. */
5162 /* -a * -b + c => a * b + c. */
5163 if (GET_CODE (op0) == NEG)
5165 tem = simplify_unary_operation (NEG, mode, op1, mode);
5166 if (tem)
5167 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5169 else if (GET_CODE (op1) == NEG)
5171 tem = simplify_unary_operation (NEG, mode, op0, mode);
5172 if (tem)
5173 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5176 /* Canonicalize the two multiplication operands. */
5177 /* a * -b + c => -b * a + c. */
5178 if (swap_commutative_operands_p (op0, op1))
5179 std::swap (op0, op1), any_change = true;
5181 if (any_change)
5182 return gen_rtx_FMA (mode, op0, op1, op2);
5183 return NULL_RTX;
5185 case SIGN_EXTRACT:
5186 case ZERO_EXTRACT:
5187 if (CONST_INT_P (op0)
5188 && CONST_INT_P (op1)
5189 && CONST_INT_P (op2)
5190 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5191 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5193 /* Extracting a bit-field from a constant */
5194 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5195 HOST_WIDE_INT op1val = INTVAL (op1);
5196 HOST_WIDE_INT op2val = INTVAL (op2);
5197 if (BITS_BIG_ENDIAN)
5198 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5199 else
5200 val >>= op2val;
5202 if (HOST_BITS_PER_WIDE_INT != op1val)
5204 /* First zero-extend. */
5205 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5206 /* If desired, propagate sign bit. */
5207 if (code == SIGN_EXTRACT
5208 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5209 != 0)
5210 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5213 return gen_int_mode (val, mode);
5215 break;
5217 case IF_THEN_ELSE:
5218 if (CONST_INT_P (op0))
5219 return op0 != const0_rtx ? op1 : op2;
5221 /* Convert c ? a : a into "a". */
5222 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5223 return op1;
5225 /* Convert a != b ? a : b into "a". */
5226 if (GET_CODE (op0) == NE
5227 && ! side_effects_p (op0)
5228 && ! HONOR_NANS (mode)
5229 && ! HONOR_SIGNED_ZEROS (mode)
5230 && ((rtx_equal_p (XEXP (op0, 0), op1)
5231 && rtx_equal_p (XEXP (op0, 1), op2))
5232 || (rtx_equal_p (XEXP (op0, 0), op2)
5233 && rtx_equal_p (XEXP (op0, 1), op1))))
5234 return op1;
5236 /* Convert a == b ? a : b into "b". */
5237 if (GET_CODE (op0) == EQ
5238 && ! side_effects_p (op0)
5239 && ! HONOR_NANS (mode)
5240 && ! HONOR_SIGNED_ZEROS (mode)
5241 && ((rtx_equal_p (XEXP (op0, 0), op1)
5242 && rtx_equal_p (XEXP (op0, 1), op2))
5243 || (rtx_equal_p (XEXP (op0, 0), op2)
5244 && rtx_equal_p (XEXP (op0, 1), op1))))
5245 return op2;
5247 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5249 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5250 ? GET_MODE (XEXP (op0, 1))
5251 : GET_MODE (XEXP (op0, 0)));
5252 rtx temp;
5254 /* Look for happy constants in op1 and op2. */
5255 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5257 HOST_WIDE_INT t = INTVAL (op1);
5258 HOST_WIDE_INT f = INTVAL (op2);
5260 if (t == STORE_FLAG_VALUE && f == 0)
5261 code = GET_CODE (op0);
5262 else if (t == 0 && f == STORE_FLAG_VALUE)
5264 enum rtx_code tmp;
5265 tmp = reversed_comparison_code (op0, NULL_RTX);
5266 if (tmp == UNKNOWN)
5267 break;
5268 code = tmp;
5270 else
5271 break;
5273 return simplify_gen_relational (code, mode, cmp_mode,
5274 XEXP (op0, 0), XEXP (op0, 1));
5277 if (cmp_mode == VOIDmode)
5278 cmp_mode = op0_mode;
5279 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5280 cmp_mode, XEXP (op0, 0),
5281 XEXP (op0, 1));
5283 /* See if any simplifications were possible. */
5284 if (temp)
5286 if (CONST_INT_P (temp))
5287 return temp == const0_rtx ? op2 : op1;
5288 else if (temp)
5289 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5292 break;
5294 case VEC_MERGE:
5295 gcc_assert (GET_MODE (op0) == mode);
5296 gcc_assert (GET_MODE (op1) == mode);
5297 gcc_assert (VECTOR_MODE_P (mode));
5298 trueop2 = avoid_constant_pool_reference (op2);
5299 if (CONST_INT_P (trueop2))
5301 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5302 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5303 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5304 unsigned HOST_WIDE_INT mask;
5305 if (n_elts == HOST_BITS_PER_WIDE_INT)
5306 mask = -1;
5307 else
5308 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5310 if (!(sel & mask) && !side_effects_p (op0))
5311 return op1;
5312 if ((sel & mask) == mask && !side_effects_p (op1))
5313 return op0;
5315 rtx trueop0 = avoid_constant_pool_reference (op0);
5316 rtx trueop1 = avoid_constant_pool_reference (op1);
5317 if (GET_CODE (trueop0) == CONST_VECTOR
5318 && GET_CODE (trueop1) == CONST_VECTOR)
5320 rtvec v = rtvec_alloc (n_elts);
5321 unsigned int i;
5323 for (i = 0; i < n_elts; i++)
5324 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5325 ? CONST_VECTOR_ELT (trueop0, i)
5326 : CONST_VECTOR_ELT (trueop1, i));
5327 return gen_rtx_CONST_VECTOR (mode, v);
5330 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5331 if no element from a appears in the result. */
5332 if (GET_CODE (op0) == VEC_MERGE)
5334 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5335 if (CONST_INT_P (tem))
5337 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5338 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5339 return simplify_gen_ternary (code, mode, mode,
5340 XEXP (op0, 1), op1, op2);
5341 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5342 return simplify_gen_ternary (code, mode, mode,
5343 XEXP (op0, 0), op1, op2);
5346 if (GET_CODE (op1) == VEC_MERGE)
5348 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5349 if (CONST_INT_P (tem))
5351 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5352 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5353 return simplify_gen_ternary (code, mode, mode,
5354 op0, XEXP (op1, 1), op2);
5355 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5356 return simplify_gen_ternary (code, mode, mode,
5357 op0, XEXP (op1, 0), op2);
5361 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5362 with a. */
5363 if (GET_CODE (op0) == VEC_DUPLICATE
5364 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5365 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5366 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5368 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5369 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5371 if (XEXP (XEXP (op0, 0), 0) == op1
5372 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5373 return op1;
5378 if (rtx_equal_p (op0, op1)
5379 && !side_effects_p (op2) && !side_effects_p (op1))
5380 return op0;
5382 break;
5384 default:
5385 gcc_unreachable ();
5388 return 0;
5391 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5392 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5393 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5395 Works by unpacking OP into a collection of 8-bit values
5396 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5397 and then repacking them again for OUTERMODE. */
5399 static rtx
5400 simplify_immed_subreg (machine_mode outermode, rtx op,
5401 machine_mode innermode, unsigned int byte)
5403 enum {
5404 value_bit = 8,
5405 value_mask = (1 << value_bit) - 1
5407 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5408 int value_start;
5409 int i;
5410 int elem;
5412 int num_elem;
5413 rtx * elems;
5414 int elem_bitsize;
5415 rtx result_s;
5416 rtvec result_v = NULL;
5417 enum mode_class outer_class;
5418 machine_mode outer_submode;
5419 int max_bitsize;
5421 /* Some ports misuse CCmode. */
5422 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5423 return op;
5425 /* We have no way to represent a complex constant at the rtl level. */
5426 if (COMPLEX_MODE_P (outermode))
5427 return NULL_RTX;
5429 /* We support any size mode. */
5430 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5431 GET_MODE_BITSIZE (innermode));
5433 /* Unpack the value. */
5435 if (GET_CODE (op) == CONST_VECTOR)
5437 num_elem = CONST_VECTOR_NUNITS (op);
5438 elems = &CONST_VECTOR_ELT (op, 0);
5439 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5441 else
5443 num_elem = 1;
5444 elems = &op;
5445 elem_bitsize = max_bitsize;
5447 /* If this asserts, it is too complicated; reducing value_bit may help. */
5448 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5449 /* I don't know how to handle endianness of sub-units. */
5450 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5452 for (elem = 0; elem < num_elem; elem++)
5454 unsigned char * vp;
5455 rtx el = elems[elem];
5457 /* Vectors are kept in target memory order. (This is probably
5458 a mistake.) */
5460 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5461 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5462 / BITS_PER_UNIT);
5463 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5464 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5465 unsigned bytele = (subword_byte % UNITS_PER_WORD
5466 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5467 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5470 switch (GET_CODE (el))
5472 case CONST_INT:
5473 for (i = 0;
5474 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5475 i += value_bit)
5476 *vp++ = INTVAL (el) >> i;
5477 /* CONST_INTs are always logically sign-extended. */
5478 for (; i < elem_bitsize; i += value_bit)
5479 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5480 break;
5482 case CONST_WIDE_INT:
5484 rtx_mode_t val = std::make_pair (el, innermode);
5485 unsigned char extend = wi::sign_mask (val);
5487 for (i = 0; i < elem_bitsize; i += value_bit)
5488 *vp++ = wi::extract_uhwi (val, i, value_bit);
5489 for (; i < elem_bitsize; i += value_bit)
5490 *vp++ = extend;
5492 break;
5494 case CONST_DOUBLE:
5495 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5497 unsigned char extend = 0;
5498 /* If this triggers, someone should have generated a
5499 CONST_INT instead. */
5500 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5502 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5503 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5504 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5506 *vp++
5507 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5508 i += value_bit;
5511 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5512 extend = -1;
5513 for (; i < elem_bitsize; i += value_bit)
5514 *vp++ = extend;
5516 else
5518 /* This is big enough for anything on the platform. */
5519 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5520 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5522 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5523 gcc_assert (bitsize <= elem_bitsize);
5524 gcc_assert (bitsize % value_bit == 0);
5526 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5527 GET_MODE (el));
5529 /* real_to_target produces its result in words affected by
5530 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5531 and use WORDS_BIG_ENDIAN instead; see the documentation
5532 of SUBREG in rtl.texi. */
5533 for (i = 0; i < bitsize; i += value_bit)
5535 int ibase;
5536 if (WORDS_BIG_ENDIAN)
5537 ibase = bitsize - 1 - i;
5538 else
5539 ibase = i;
5540 *vp++ = tmp[ibase / 32] >> i % 32;
5543 /* It shouldn't matter what's done here, so fill it with
5544 zero. */
5545 for (; i < elem_bitsize; i += value_bit)
5546 *vp++ = 0;
5548 break;
5550 case CONST_FIXED:
5551 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5553 for (i = 0; i < elem_bitsize; i += value_bit)
5554 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5556 else
5558 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5559 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5560 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5561 i += value_bit)
5562 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5563 >> (i - HOST_BITS_PER_WIDE_INT);
5564 for (; i < elem_bitsize; i += value_bit)
5565 *vp++ = 0;
5567 break;
5569 default:
5570 gcc_unreachable ();
5574 /* Now, pick the right byte to start with. */
5575 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5576 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5577 will already have offset 0. */
5578 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5580 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5581 - byte);
5582 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5583 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5584 byte = (subword_byte % UNITS_PER_WORD
5585 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5588 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5589 so if it's become negative it will instead be very large.) */
5590 gcc_assert (byte < GET_MODE_SIZE (innermode));
5592 /* Convert from bytes to chunks of size value_bit. */
5593 value_start = byte * (BITS_PER_UNIT / value_bit);
5595 /* Re-pack the value. */
5597 if (VECTOR_MODE_P (outermode))
5599 num_elem = GET_MODE_NUNITS (outermode);
5600 result_v = rtvec_alloc (num_elem);
5601 elems = &RTVEC_ELT (result_v, 0);
5602 outer_submode = GET_MODE_INNER (outermode);
5604 else
5606 num_elem = 1;
5607 elems = &result_s;
5608 outer_submode = outermode;
5611 outer_class = GET_MODE_CLASS (outer_submode);
5612 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5614 gcc_assert (elem_bitsize % value_bit == 0);
5615 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5617 for (elem = 0; elem < num_elem; elem++)
5619 unsigned char *vp;
5621 /* Vectors are stored in target memory order. (This is probably
5622 a mistake.) */
5624 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5625 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5626 / BITS_PER_UNIT);
5627 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5628 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5629 unsigned bytele = (subword_byte % UNITS_PER_WORD
5630 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5631 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5634 switch (outer_class)
5636 case MODE_INT:
5637 case MODE_PARTIAL_INT:
5639 int u;
5640 int base = 0;
5641 int units
5642 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5643 / HOST_BITS_PER_WIDE_INT;
5644 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5645 wide_int r;
5647 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5648 return NULL_RTX;
5649 for (u = 0; u < units; u++)
5651 unsigned HOST_WIDE_INT buf = 0;
5652 for (i = 0;
5653 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5654 i += value_bit)
5655 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5657 tmp[u] = buf;
5658 base += HOST_BITS_PER_WIDE_INT;
5660 r = wide_int::from_array (tmp, units,
5661 GET_MODE_PRECISION (outer_submode));
5662 #if TARGET_SUPPORTS_WIDE_INT == 0
5663 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5664 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5665 return NULL_RTX;
5666 #endif
5667 elems[elem] = immed_wide_int_const (r, outer_submode);
5669 break;
5671 case MODE_FLOAT:
5672 case MODE_DECIMAL_FLOAT:
5674 REAL_VALUE_TYPE r;
5675 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5677 /* real_from_target wants its input in words affected by
5678 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5679 and use WORDS_BIG_ENDIAN instead; see the documentation
5680 of SUBREG in rtl.texi. */
5681 for (i = 0; i < max_bitsize / 32; i++)
5682 tmp[i] = 0;
5683 for (i = 0; i < elem_bitsize; i += value_bit)
5685 int ibase;
5686 if (WORDS_BIG_ENDIAN)
5687 ibase = elem_bitsize - 1 - i;
5688 else
5689 ibase = i;
5690 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5693 real_from_target (&r, tmp, outer_submode);
5694 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5696 break;
5698 case MODE_FRACT:
5699 case MODE_UFRACT:
5700 case MODE_ACCUM:
5701 case MODE_UACCUM:
5703 FIXED_VALUE_TYPE f;
5704 f.data.low = 0;
5705 f.data.high = 0;
5706 f.mode = outer_submode;
5708 for (i = 0;
5709 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5710 i += value_bit)
5711 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5712 for (; i < elem_bitsize; i += value_bit)
5713 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5714 << (i - HOST_BITS_PER_WIDE_INT));
5716 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5718 break;
5720 default:
5721 gcc_unreachable ();
5724 if (VECTOR_MODE_P (outermode))
5725 return gen_rtx_CONST_VECTOR (outermode, result_v);
5726 else
5727 return result_s;
5730 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5731 Return 0 if no simplifications are possible. */
5733 simplify_subreg (machine_mode outermode, rtx op,
5734 machine_mode innermode, unsigned int byte)
5736 /* Little bit of sanity checking. */
5737 gcc_assert (innermode != VOIDmode);
5738 gcc_assert (outermode != VOIDmode);
5739 gcc_assert (innermode != BLKmode);
5740 gcc_assert (outermode != BLKmode);
5742 gcc_assert (GET_MODE (op) == innermode
5743 || GET_MODE (op) == VOIDmode);
5745 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5746 return NULL_RTX;
5748 if (byte >= GET_MODE_SIZE (innermode))
5749 return NULL_RTX;
5751 if (outermode == innermode && !byte)
5752 return op;
5754 if (CONST_SCALAR_INT_P (op)
5755 || CONST_DOUBLE_AS_FLOAT_P (op)
5756 || GET_CODE (op) == CONST_FIXED
5757 || GET_CODE (op) == CONST_VECTOR)
5758 return simplify_immed_subreg (outermode, op, innermode, byte);
5760 /* Changing mode twice with SUBREG => just change it once,
5761 or not at all if changing back op starting mode. */
5762 if (GET_CODE (op) == SUBREG)
5764 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5765 int final_offset = byte + SUBREG_BYTE (op);
5766 rtx newx;
5768 if (outermode == innermostmode
5769 && byte == 0 && SUBREG_BYTE (op) == 0)
5770 return SUBREG_REG (op);
5772 /* The SUBREG_BYTE represents offset, as if the value were stored
5773 in memory. Irritating exception is paradoxical subreg, where
5774 we define SUBREG_BYTE to be 0. On big endian machines, this
5775 value should be negative. For a moment, undo this exception. */
5776 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5778 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5779 if (WORDS_BIG_ENDIAN)
5780 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5781 if (BYTES_BIG_ENDIAN)
5782 final_offset += difference % UNITS_PER_WORD;
5784 if (SUBREG_BYTE (op) == 0
5785 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5787 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5788 if (WORDS_BIG_ENDIAN)
5789 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5790 if (BYTES_BIG_ENDIAN)
5791 final_offset += difference % UNITS_PER_WORD;
5794 /* See whether resulting subreg will be paradoxical. */
5795 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5797 /* In nonparadoxical subregs we can't handle negative offsets. */
5798 if (final_offset < 0)
5799 return NULL_RTX;
5800 /* Bail out in case resulting subreg would be incorrect. */
5801 if (final_offset % GET_MODE_SIZE (outermode)
5802 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5803 return NULL_RTX;
5805 else
5807 int offset = 0;
5808 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5810 /* In paradoxical subreg, see if we are still looking on lower part.
5811 If so, our SUBREG_BYTE will be 0. */
5812 if (WORDS_BIG_ENDIAN)
5813 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5814 if (BYTES_BIG_ENDIAN)
5815 offset += difference % UNITS_PER_WORD;
5816 if (offset == final_offset)
5817 final_offset = 0;
5818 else
5819 return NULL_RTX;
5822 /* Recurse for further possible simplifications. */
5823 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5824 final_offset);
5825 if (newx)
5826 return newx;
5827 if (validate_subreg (outermode, innermostmode,
5828 SUBREG_REG (op), final_offset))
5830 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5831 if (SUBREG_PROMOTED_VAR_P (op)
5832 && SUBREG_PROMOTED_SIGN (op) >= 0
5833 && GET_MODE_CLASS (outermode) == MODE_INT
5834 && IN_RANGE (GET_MODE_SIZE (outermode),
5835 GET_MODE_SIZE (innermode),
5836 GET_MODE_SIZE (innermostmode))
5837 && subreg_lowpart_p (newx))
5839 SUBREG_PROMOTED_VAR_P (newx) = 1;
5840 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5842 return newx;
5844 return NULL_RTX;
5847 /* SUBREG of a hard register => just change the register number
5848 and/or mode. If the hard register is not valid in that mode,
5849 suppress this simplification. If the hard register is the stack,
5850 frame, or argument pointer, leave this as a SUBREG. */
5852 if (REG_P (op) && HARD_REGISTER_P (op))
5854 unsigned int regno, final_regno;
5856 regno = REGNO (op);
5857 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5858 if (HARD_REGISTER_NUM_P (final_regno))
5860 rtx x;
5861 int final_offset = byte;
5863 /* Adjust offset for paradoxical subregs. */
5864 if (byte == 0
5865 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5867 int difference = (GET_MODE_SIZE (innermode)
5868 - GET_MODE_SIZE (outermode));
5869 if (WORDS_BIG_ENDIAN)
5870 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5871 if (BYTES_BIG_ENDIAN)
5872 final_offset += difference % UNITS_PER_WORD;
5875 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5877 /* Propagate original regno. We don't have any way to specify
5878 the offset inside original regno, so do so only for lowpart.
5879 The information is used only by alias analysis that can not
5880 grog partial register anyway. */
5882 if (subreg_lowpart_offset (outermode, innermode) == byte)
5883 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5884 return x;
5888 /* If we have a SUBREG of a register that we are replacing and we are
5889 replacing it with a MEM, make a new MEM and try replacing the
5890 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5891 or if we would be widening it. */
5893 if (MEM_P (op)
5894 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5895 /* Allow splitting of volatile memory references in case we don't
5896 have instruction to move the whole thing. */
5897 && (! MEM_VOLATILE_P (op)
5898 || ! have_insn_for (SET, innermode))
5899 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5900 return adjust_address_nv (op, outermode, byte);
5902 /* Handle complex values represented as CONCAT
5903 of real and imaginary part. */
5904 if (GET_CODE (op) == CONCAT)
5906 unsigned int part_size, final_offset;
5907 rtx part, res;
5909 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5910 if (byte < part_size)
5912 part = XEXP (op, 0);
5913 final_offset = byte;
5915 else
5917 part = XEXP (op, 1);
5918 final_offset = byte - part_size;
5921 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5922 return NULL_RTX;
5924 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5925 if (res)
5926 return res;
5927 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5928 return gen_rtx_SUBREG (outermode, part, final_offset);
5929 return NULL_RTX;
5932 /* A SUBREG resulting from a zero extension may fold to zero if
5933 it extracts higher bits that the ZERO_EXTEND's source bits. */
5934 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5936 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5937 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5938 return CONST0_RTX (outermode);
5941 if (SCALAR_INT_MODE_P (outermode)
5942 && SCALAR_INT_MODE_P (innermode)
5943 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5944 && byte == subreg_lowpart_offset (outermode, innermode))
5946 rtx tem = simplify_truncation (outermode, op, innermode);
5947 if (tem)
5948 return tem;
5951 return NULL_RTX;
5954 /* Make a SUBREG operation or equivalent if it folds. */
5957 simplify_gen_subreg (machine_mode outermode, rtx op,
5958 machine_mode innermode, unsigned int byte)
5960 rtx newx;
5962 newx = simplify_subreg (outermode, op, innermode, byte);
5963 if (newx)
5964 return newx;
5966 if (GET_CODE (op) == SUBREG
5967 || GET_CODE (op) == CONCAT
5968 || GET_MODE (op) == VOIDmode)
5969 return NULL_RTX;
5971 if (validate_subreg (outermode, innermode, op, byte))
5972 return gen_rtx_SUBREG (outermode, op, byte);
5974 return NULL_RTX;
5977 /* Simplify X, an rtx expression.
5979 Return the simplified expression or NULL if no simplifications
5980 were possible.
5982 This is the preferred entry point into the simplification routines;
5983 however, we still allow passes to call the more specific routines.
5985 Right now GCC has three (yes, three) major bodies of RTL simplification
5986 code that need to be unified.
5988 1. fold_rtx in cse.c. This code uses various CSE specific
5989 information to aid in RTL simplification.
5991 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5992 it uses combine specific information to aid in RTL
5993 simplification.
5995 3. The routines in this file.
5998 Long term we want to only have one body of simplification code; to
5999 get to that state I recommend the following steps:
6001 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6002 which are not pass dependent state into these routines.
6004 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6005 use this routine whenever possible.
6007 3. Allow for pass dependent state to be provided to these
6008 routines and add simplifications based on the pass dependent
6009 state. Remove code from cse.c & combine.c that becomes
6010 redundant/dead.
6012 It will take time, but ultimately the compiler will be easier to
6013 maintain and improve. It's totally silly that when we add a
6014 simplification that it needs to be added to 4 places (3 for RTL
6015 simplification and 1 for tree simplification. */
6018 simplify_rtx (const_rtx x)
6020 const enum rtx_code code = GET_CODE (x);
6021 const machine_mode mode = GET_MODE (x);
6023 switch (GET_RTX_CLASS (code))
6025 case RTX_UNARY:
6026 return simplify_unary_operation (code, mode,
6027 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6028 case RTX_COMM_ARITH:
6029 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6030 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6032 /* Fall through.... */
6034 case RTX_BIN_ARITH:
6035 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6037 case RTX_TERNARY:
6038 case RTX_BITFIELD_OPS:
6039 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6040 XEXP (x, 0), XEXP (x, 1),
6041 XEXP (x, 2));
6043 case RTX_COMPARE:
6044 case RTX_COMM_COMPARE:
6045 return simplify_relational_operation (code, mode,
6046 ((GET_MODE (XEXP (x, 0))
6047 != VOIDmode)
6048 ? GET_MODE (XEXP (x, 0))
6049 : GET_MODE (XEXP (x, 1))),
6050 XEXP (x, 0),
6051 XEXP (x, 1));
6053 case RTX_EXTRA:
6054 if (code == SUBREG)
6055 return simplify_subreg (mode, SUBREG_REG (x),
6056 GET_MODE (SUBREG_REG (x)),
6057 SUBREG_BYTE (x));
6058 break;
6060 case RTX_OBJ:
6061 if (code == LO_SUM)
6063 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6064 if (GET_CODE (XEXP (x, 0)) == HIGH
6065 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6066 return XEXP (x, 1);
6068 break;
6070 default:
6071 break;
6073 return NULL;