2016-01-15 Vladimir Makarov <vmakarov@redhat.com>
[official-gcc.git] / gcc / simplify-rtx.c
blob1e6e46daf7b4446d18a25fa2412ea640d3bd9578
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "optabs.h"
30 #include "emit-rtl.h"
31 #include "recog.h"
32 #include "diagnostic-core.h"
33 #include "varasm.h"
34 #include "flags.h"
36 /* Simplification and canonicalization of RTL. */
38 /* Much code operates on (low, high) pairs; the low value is an
39 unsigned wide int, the high value a signed wide int. We
40 occasionally need to sign extend from low to high as if low were a
41 signed wide int. */
42 #define HWI_SIGN_EXTEND(low) \
43 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
45 static rtx neg_const_int (machine_mode, const_rtx);
46 static bool plus_minus_operand_p (const_rtx);
47 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
48 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
49 unsigned int);
50 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
51 rtx, rtx);
52 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
53 machine_mode, rtx, rtx);
54 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
55 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
56 rtx, rtx, rtx, rtx);
58 /* Negate a CONST_INT rtx, truncating (because a conversion from a
59 maximally negative number can overflow). */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
63 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
66 /* Test whether expression, X, is an immediate constant that represents
67 the most significant bit of machine mode MODE. */
69 bool
70 mode_signbit_p (machine_mode mode, const_rtx x)
72 unsigned HOST_WIDE_INT val;
73 unsigned int width;
75 if (GET_MODE_CLASS (mode) != MODE_INT)
76 return false;
78 width = GET_MODE_PRECISION (mode);
79 if (width == 0)
80 return false;
82 if (width <= HOST_BITS_PER_WIDE_INT
83 && CONST_INT_P (x))
84 val = INTVAL (x);
85 #if TARGET_SUPPORTS_WIDE_INT
86 else if (CONST_WIDE_INT_P (x))
88 unsigned int i;
89 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
90 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
91 return false;
92 for (i = 0; i < elts - 1; i++)
93 if (CONST_WIDE_INT_ELT (x, i) != 0)
94 return false;
95 val = CONST_WIDE_INT_ELT (x, elts - 1);
96 width %= HOST_BITS_PER_WIDE_INT;
97 if (width == 0)
98 width = HOST_BITS_PER_WIDE_INT;
100 #else
101 else if (width <= HOST_BITS_PER_DOUBLE_INT
102 && CONST_DOUBLE_AS_INT_P (x)
103 && CONST_DOUBLE_LOW (x) == 0)
105 val = CONST_DOUBLE_HIGH (x);
106 width -= HOST_BITS_PER_WIDE_INT;
108 #endif
109 else
110 /* X is not an integer constant. */
111 return false;
113 if (width < HOST_BITS_PER_WIDE_INT)
114 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
115 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
118 /* Test whether VAL is equal to the most significant bit of mode MODE
119 (after masking with the mode mask of MODE). Returns false if the
120 precision of MODE is too large to handle. */
122 bool
123 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
125 unsigned int width;
127 if (GET_MODE_CLASS (mode) != MODE_INT)
128 return false;
130 width = GET_MODE_PRECISION (mode);
131 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
132 return false;
134 val &= GET_MODE_MASK (mode);
135 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
138 /* Test whether the most significant bit of mode MODE is set in VAL.
139 Returns false if the precision of MODE is too large to handle. */
140 bool
141 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
143 unsigned int width;
145 if (GET_MODE_CLASS (mode) != MODE_INT)
146 return false;
148 width = GET_MODE_PRECISION (mode);
149 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
150 return false;
152 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
153 return val != 0;
156 /* Test whether the most significant bit of mode MODE is clear in VAL.
157 Returns false if the precision of MODE is too large to handle. */
158 bool
159 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
161 unsigned int width;
163 if (GET_MODE_CLASS (mode) != MODE_INT)
164 return false;
166 width = GET_MODE_PRECISION (mode);
167 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
168 return false;
170 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
171 return val == 0;
174 /* Make a binary operation by properly ordering the operands and
175 seeing if the expression folds. */
178 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
179 rtx op1)
181 rtx tem;
183 /* If this simplifies, do it. */
184 tem = simplify_binary_operation (code, mode, op0, op1);
185 if (tem)
186 return tem;
188 /* Put complex operands first and constants second if commutative. */
189 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
190 && swap_commutative_operands_p (op0, op1))
191 std::swap (op0, op1);
193 return gen_rtx_fmt_ee (code, mode, op0, op1);
196 /* If X is a MEM referencing the constant pool, return the real value.
197 Otherwise return X. */
199 avoid_constant_pool_reference (rtx x)
201 rtx c, tmp, addr;
202 machine_mode cmode;
203 HOST_WIDE_INT offset = 0;
205 switch (GET_CODE (x))
207 case MEM:
208 break;
210 case FLOAT_EXTEND:
211 /* Handle float extensions of constant pool references. */
212 tmp = XEXP (x, 0);
213 c = avoid_constant_pool_reference (tmp);
214 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
215 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
216 GET_MODE (x));
217 return x;
219 default:
220 return x;
223 if (GET_MODE (x) == BLKmode)
224 return x;
226 addr = XEXP (x, 0);
228 /* Call target hook to avoid the effects of -fpic etc.... */
229 addr = targetm.delegitimize_address (addr);
231 /* Split the address into a base and integer offset. */
232 if (GET_CODE (addr) == CONST
233 && GET_CODE (XEXP (addr, 0)) == PLUS
234 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
236 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
237 addr = XEXP (XEXP (addr, 0), 0);
240 if (GET_CODE (addr) == LO_SUM)
241 addr = XEXP (addr, 1);
243 /* If this is a constant pool reference, we can turn it into its
244 constant and hope that simplifications happen. */
245 if (GET_CODE (addr) == SYMBOL_REF
246 && CONSTANT_POOL_ADDRESS_P (addr))
248 c = get_pool_constant (addr);
249 cmode = get_pool_mode (addr);
251 /* If we're accessing the constant in a different mode than it was
252 originally stored, attempt to fix that up via subreg simplifications.
253 If that fails we have no choice but to return the original memory. */
254 if ((offset != 0 || cmode != GET_MODE (x))
255 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
257 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
258 if (tem && CONSTANT_P (tem))
259 return tem;
261 else
262 return c;
265 return x;
268 /* Simplify a MEM based on its attributes. This is the default
269 delegitimize_address target hook, and it's recommended that every
270 overrider call it. */
273 delegitimize_mem_from_attrs (rtx x)
275 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
276 use their base addresses as equivalent. */
277 if (MEM_P (x)
278 && MEM_EXPR (x)
279 && MEM_OFFSET_KNOWN_P (x))
281 tree decl = MEM_EXPR (x);
282 machine_mode mode = GET_MODE (x);
283 HOST_WIDE_INT offset = 0;
285 switch (TREE_CODE (decl))
287 default:
288 decl = NULL;
289 break;
291 case VAR_DECL:
292 break;
294 case ARRAY_REF:
295 case ARRAY_RANGE_REF:
296 case COMPONENT_REF:
297 case BIT_FIELD_REF:
298 case REALPART_EXPR:
299 case IMAGPART_EXPR:
300 case VIEW_CONVERT_EXPR:
302 HOST_WIDE_INT bitsize, bitpos;
303 tree toffset;
304 int unsignedp, reversep, volatilep = 0;
306 decl
307 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
308 &unsignedp, &reversep, &volatilep, false);
309 if (bitsize != GET_MODE_BITSIZE (mode)
310 || (bitpos % BITS_PER_UNIT)
311 || (toffset && !tree_fits_shwi_p (toffset)))
312 decl = NULL;
313 else
315 offset += bitpos / BITS_PER_UNIT;
316 if (toffset)
317 offset += tree_to_shwi (toffset);
319 break;
323 if (decl
324 && mode == GET_MODE (x)
325 && TREE_CODE (decl) == VAR_DECL
326 && (TREE_STATIC (decl)
327 || DECL_THREAD_LOCAL_P (decl))
328 && DECL_RTL_SET_P (decl)
329 && MEM_P (DECL_RTL (decl)))
331 rtx newx;
333 offset += MEM_OFFSET (x);
335 newx = DECL_RTL (decl);
337 if (MEM_P (newx))
339 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
341 /* Avoid creating a new MEM needlessly if we already had
342 the same address. We do if there's no OFFSET and the
343 old address X is identical to NEWX, or if X is of the
344 form (plus NEWX OFFSET), or the NEWX is of the form
345 (plus Y (const_int Z)) and X is that with the offset
346 added: (plus Y (const_int Z+OFFSET)). */
347 if (!((offset == 0
348 || (GET_CODE (o) == PLUS
349 && GET_CODE (XEXP (o, 1)) == CONST_INT
350 && (offset == INTVAL (XEXP (o, 1))
351 || (GET_CODE (n) == PLUS
352 && GET_CODE (XEXP (n, 1)) == CONST_INT
353 && (INTVAL (XEXP (n, 1)) + offset
354 == INTVAL (XEXP (o, 1)))
355 && (n = XEXP (n, 0))))
356 && (o = XEXP (o, 0))))
357 && rtx_equal_p (o, n)))
358 x = adjust_address_nv (newx, mode, offset);
360 else if (GET_MODE (x) == GET_MODE (newx)
361 && offset == 0)
362 x = newx;
366 return x;
369 /* Make a unary operation by first seeing if it folds and otherwise making
370 the specified operation. */
373 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
374 machine_mode op_mode)
376 rtx tem;
378 /* If this simplifies, use it. */
379 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
380 return tem;
382 return gen_rtx_fmt_e (code, mode, op);
385 /* Likewise for ternary operations. */
388 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
389 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
391 rtx tem;
393 /* If this simplifies, use it. */
394 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
395 op0, op1, op2)))
396 return tem;
398 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
401 /* Likewise, for relational operations.
402 CMP_MODE specifies mode comparison is done in. */
405 simplify_gen_relational (enum rtx_code code, machine_mode mode,
406 machine_mode cmp_mode, rtx op0, rtx op1)
408 rtx tem;
410 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
411 op0, op1)))
412 return tem;
414 return gen_rtx_fmt_ee (code, mode, op0, op1);
417 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
418 and simplify the result. If FN is non-NULL, call this callback on each
419 X, if it returns non-NULL, replace X with its return value and simplify the
420 result. */
423 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
424 rtx (*fn) (rtx, const_rtx, void *), void *data)
426 enum rtx_code code = GET_CODE (x);
427 machine_mode mode = GET_MODE (x);
428 machine_mode op_mode;
429 const char *fmt;
430 rtx op0, op1, op2, newx, op;
431 rtvec vec, newvec;
432 int i, j;
434 if (__builtin_expect (fn != NULL, 0))
436 newx = fn (x, old_rtx, data);
437 if (newx)
438 return newx;
440 else if (rtx_equal_p (x, old_rtx))
441 return copy_rtx ((rtx) data);
443 switch (GET_RTX_CLASS (code))
445 case RTX_UNARY:
446 op0 = XEXP (x, 0);
447 op_mode = GET_MODE (op0);
448 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
449 if (op0 == XEXP (x, 0))
450 return x;
451 return simplify_gen_unary (code, mode, op0, op_mode);
453 case RTX_BIN_ARITH:
454 case RTX_COMM_ARITH:
455 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
456 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
457 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
458 return x;
459 return simplify_gen_binary (code, mode, op0, op1);
461 case RTX_COMPARE:
462 case RTX_COMM_COMPARE:
463 op0 = XEXP (x, 0);
464 op1 = XEXP (x, 1);
465 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
466 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
467 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
468 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
469 return x;
470 return simplify_gen_relational (code, mode, op_mode, op0, op1);
472 case RTX_TERNARY:
473 case RTX_BITFIELD_OPS:
474 op0 = XEXP (x, 0);
475 op_mode = GET_MODE (op0);
476 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
477 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
478 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
479 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
480 return x;
481 if (op_mode == VOIDmode)
482 op_mode = GET_MODE (op0);
483 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
485 case RTX_EXTRA:
486 if (code == SUBREG)
488 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
489 if (op0 == SUBREG_REG (x))
490 return x;
491 op0 = simplify_gen_subreg (GET_MODE (x), op0,
492 GET_MODE (SUBREG_REG (x)),
493 SUBREG_BYTE (x));
494 return op0 ? op0 : x;
496 break;
498 case RTX_OBJ:
499 if (code == MEM)
501 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
502 if (op0 == XEXP (x, 0))
503 return x;
504 return replace_equiv_address_nv (x, op0);
506 else if (code == LO_SUM)
508 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
509 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
511 /* (lo_sum (high x) y) -> y where x and y have the same base. */
512 if (GET_CODE (op0) == HIGH)
514 rtx base0, base1, offset0, offset1;
515 split_const (XEXP (op0, 0), &base0, &offset0);
516 split_const (op1, &base1, &offset1);
517 if (rtx_equal_p (base0, base1))
518 return op1;
521 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
522 return x;
523 return gen_rtx_LO_SUM (mode, op0, op1);
525 break;
527 default:
528 break;
531 newx = x;
532 fmt = GET_RTX_FORMAT (code);
533 for (i = 0; fmt[i]; i++)
534 switch (fmt[i])
536 case 'E':
537 vec = XVEC (x, i);
538 newvec = XVEC (newx, i);
539 for (j = 0; j < GET_NUM_ELEM (vec); j++)
541 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
542 old_rtx, fn, data);
543 if (op != RTVEC_ELT (vec, j))
545 if (newvec == vec)
547 newvec = shallow_copy_rtvec (vec);
548 if (x == newx)
549 newx = shallow_copy_rtx (x);
550 XVEC (newx, i) = newvec;
552 RTVEC_ELT (newvec, j) = op;
555 break;
557 case 'e':
558 if (XEXP (x, i))
560 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
561 if (op != XEXP (x, i))
563 if (x == newx)
564 newx = shallow_copy_rtx (x);
565 XEXP (newx, i) = op;
568 break;
570 return newx;
573 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
574 resulting RTX. Return a new RTX which is as simplified as possible. */
577 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
579 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
582 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
583 Only handle cases where the truncated value is inherently an rvalue.
585 RTL provides two ways of truncating a value:
587 1. a lowpart subreg. This form is only a truncation when both
588 the outer and inner modes (here MODE and OP_MODE respectively)
589 are scalar integers, and only then when the subreg is used as
590 an rvalue.
592 It is only valid to form such truncating subregs if the
593 truncation requires no action by the target. The onus for
594 proving this is on the creator of the subreg -- e.g. the
595 caller to simplify_subreg or simplify_gen_subreg -- and typically
596 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
598 2. a TRUNCATE. This form handles both scalar and compound integers.
600 The first form is preferred where valid. However, the TRUNCATE
601 handling in simplify_unary_operation turns the second form into the
602 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
603 so it is generally safe to form rvalue truncations using:
605 simplify_gen_unary (TRUNCATE, ...)
607 and leave simplify_unary_operation to work out which representation
608 should be used.
610 Because of the proof requirements on (1), simplify_truncation must
611 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
612 regardless of whether the outer truncation came from a SUBREG or a
613 TRUNCATE. For example, if the caller has proven that an SImode
614 truncation of:
616 (and:DI X Y)
618 is a no-op and can be represented as a subreg, it does not follow
619 that SImode truncations of X and Y are also no-ops. On a target
620 like 64-bit MIPS that requires SImode values to be stored in
621 sign-extended form, an SImode truncation of:
623 (and:DI (reg:DI X) (const_int 63))
625 is trivially a no-op because only the lower 6 bits can be set.
626 However, X is still an arbitrary 64-bit number and so we cannot
627 assume that truncating it too is a no-op. */
629 static rtx
630 simplify_truncation (machine_mode mode, rtx op,
631 machine_mode op_mode)
633 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
634 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
635 gcc_assert (precision <= op_precision);
637 /* Optimize truncations of zero and sign extended values. */
638 if (GET_CODE (op) == ZERO_EXTEND
639 || GET_CODE (op) == SIGN_EXTEND)
641 /* There are three possibilities. If MODE is the same as the
642 origmode, we can omit both the extension and the subreg.
643 If MODE is not larger than the origmode, we can apply the
644 truncation without the extension. Finally, if the outermode
645 is larger than the origmode, we can just extend to the appropriate
646 mode. */
647 machine_mode origmode = GET_MODE (XEXP (op, 0));
648 if (mode == origmode)
649 return XEXP (op, 0);
650 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
651 return simplify_gen_unary (TRUNCATE, mode,
652 XEXP (op, 0), origmode);
653 else
654 return simplify_gen_unary (GET_CODE (op), mode,
655 XEXP (op, 0), origmode);
658 /* If the machine can perform operations in the truncated mode, distribute
659 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
660 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
661 if (1
662 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
663 && (GET_CODE (op) == PLUS
664 || GET_CODE (op) == MINUS
665 || GET_CODE (op) == MULT))
667 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
668 if (op0)
670 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
671 if (op1)
672 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
676 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
677 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
678 the outer subreg is effectively a truncation to the original mode. */
679 if ((GET_CODE (op) == LSHIFTRT
680 || GET_CODE (op) == ASHIFTRT)
681 /* Ensure that OP_MODE is at least twice as wide as MODE
682 to avoid the possibility that an outer LSHIFTRT shifts by more
683 than the sign extension's sign_bit_copies and introduces zeros
684 into the high bits of the result. */
685 && 2 * precision <= op_precision
686 && CONST_INT_P (XEXP (op, 1))
687 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
688 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
689 && UINTVAL (XEXP (op, 1)) < precision)
690 return simplify_gen_binary (ASHIFTRT, mode,
691 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
693 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
694 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
695 the outer subreg is effectively a truncation to the original mode. */
696 if ((GET_CODE (op) == LSHIFTRT
697 || GET_CODE (op) == ASHIFTRT)
698 && CONST_INT_P (XEXP (op, 1))
699 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
700 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
701 && UINTVAL (XEXP (op, 1)) < precision)
702 return simplify_gen_binary (LSHIFTRT, mode,
703 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
705 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
706 to (ashift:QI (x:QI) C), where C is a suitable small constant and
707 the outer subreg is effectively a truncation to the original mode. */
708 if (GET_CODE (op) == ASHIFT
709 && CONST_INT_P (XEXP (op, 1))
710 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
711 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
712 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
713 && UINTVAL (XEXP (op, 1)) < precision)
714 return simplify_gen_binary (ASHIFT, mode,
715 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
717 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
718 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
719 and C2. */
720 if (GET_CODE (op) == AND
721 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
722 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
723 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
724 && CONST_INT_P (XEXP (op, 1)))
726 rtx op0 = (XEXP (XEXP (op, 0), 0));
727 rtx shift_op = XEXP (XEXP (op, 0), 1);
728 rtx mask_op = XEXP (op, 1);
729 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
730 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
732 if (shift < precision
733 /* If doing this transform works for an X with all bits set,
734 it works for any X. */
735 && ((GET_MODE_MASK (mode) >> shift) & mask)
736 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
737 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
738 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
740 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
741 return simplify_gen_binary (AND, mode, op0, mask_op);
745 /* Recognize a word extraction from a multi-word subreg. */
746 if ((GET_CODE (op) == LSHIFTRT
747 || GET_CODE (op) == ASHIFTRT)
748 && SCALAR_INT_MODE_P (mode)
749 && SCALAR_INT_MODE_P (op_mode)
750 && precision >= BITS_PER_WORD
751 && 2 * precision <= op_precision
752 && CONST_INT_P (XEXP (op, 1))
753 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
754 && UINTVAL (XEXP (op, 1)) < op_precision)
756 int byte = subreg_lowpart_offset (mode, op_mode);
757 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
758 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
759 (WORDS_BIG_ENDIAN
760 ? byte - shifted_bytes
761 : byte + shifted_bytes));
764 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
765 and try replacing the TRUNCATE and shift with it. Don't do this
766 if the MEM has a mode-dependent address. */
767 if ((GET_CODE (op) == LSHIFTRT
768 || GET_CODE (op) == ASHIFTRT)
769 && SCALAR_INT_MODE_P (op_mode)
770 && MEM_P (XEXP (op, 0))
771 && CONST_INT_P (XEXP (op, 1))
772 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
773 && INTVAL (XEXP (op, 1)) > 0
774 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
775 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
776 MEM_ADDR_SPACE (XEXP (op, 0)))
777 && ! MEM_VOLATILE_P (XEXP (op, 0))
778 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
779 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
781 int byte = subreg_lowpart_offset (mode, op_mode);
782 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
783 return adjust_address_nv (XEXP (op, 0), mode,
784 (WORDS_BIG_ENDIAN
785 ? byte - shifted_bytes
786 : byte + shifted_bytes));
789 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
790 (OP:SI foo:SI) if OP is NEG or ABS. */
791 if ((GET_CODE (op) == ABS
792 || GET_CODE (op) == NEG)
793 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
794 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
795 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
796 return simplify_gen_unary (GET_CODE (op), mode,
797 XEXP (XEXP (op, 0), 0), mode);
799 /* (truncate:A (subreg:B (truncate:C X) 0)) is
800 (truncate:A X). */
801 if (GET_CODE (op) == SUBREG
802 && SCALAR_INT_MODE_P (mode)
803 && SCALAR_INT_MODE_P (op_mode)
804 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
805 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
806 && subreg_lowpart_p (op))
808 rtx inner = XEXP (SUBREG_REG (op), 0);
809 if (GET_MODE_PRECISION (mode)
810 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
811 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
812 else
813 /* If subreg above is paradoxical and C is narrower
814 than A, return (subreg:A (truncate:C X) 0). */
815 return simplify_gen_subreg (mode, SUBREG_REG (op),
816 GET_MODE (SUBREG_REG (op)), 0);
819 /* (truncate:A (truncate:B X)) is (truncate:A X). */
820 if (GET_CODE (op) == TRUNCATE)
821 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
822 GET_MODE (XEXP (op, 0)));
824 return NULL_RTX;
827 /* Try to simplify a unary operation CODE whose output mode is to be
828 MODE with input operand OP whose mode was originally OP_MODE.
829 Return zero if no simplification can be made. */
831 simplify_unary_operation (enum rtx_code code, machine_mode mode,
832 rtx op, machine_mode op_mode)
834 rtx trueop, tem;
836 trueop = avoid_constant_pool_reference (op);
838 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
839 if (tem)
840 return tem;
842 return simplify_unary_operation_1 (code, mode, op);
845 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
846 to be exact. */
848 static bool
849 exact_int_to_float_conversion_p (const_rtx op)
851 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
852 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
853 /* Constants shouldn't reach here. */
854 gcc_assert (op0_mode != VOIDmode);
855 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
856 int in_bits = in_prec;
857 if (HWI_COMPUTABLE_MODE_P (op0_mode))
859 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
860 if (GET_CODE (op) == FLOAT)
861 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
862 else if (GET_CODE (op) == UNSIGNED_FLOAT)
863 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
864 else
865 gcc_unreachable ();
866 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
868 return in_bits <= out_bits;
871 /* Perform some simplifications we can do even if the operands
872 aren't constant. */
873 static rtx
874 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
876 enum rtx_code reversed;
877 rtx temp;
879 switch (code)
881 case NOT:
882 /* (not (not X)) == X. */
883 if (GET_CODE (op) == NOT)
884 return XEXP (op, 0);
886 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
887 comparison is all ones. */
888 if (COMPARISON_P (op)
889 && (mode == BImode || STORE_FLAG_VALUE == -1)
890 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
891 return simplify_gen_relational (reversed, mode, VOIDmode,
892 XEXP (op, 0), XEXP (op, 1));
894 /* (not (plus X -1)) can become (neg X). */
895 if (GET_CODE (op) == PLUS
896 && XEXP (op, 1) == constm1_rtx)
897 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
899 /* Similarly, (not (neg X)) is (plus X -1). */
900 if (GET_CODE (op) == NEG)
901 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
902 CONSTM1_RTX (mode));
904 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
905 if (GET_CODE (op) == XOR
906 && CONST_INT_P (XEXP (op, 1))
907 && (temp = simplify_unary_operation (NOT, mode,
908 XEXP (op, 1), mode)) != 0)
909 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
911 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
912 if (GET_CODE (op) == PLUS
913 && CONST_INT_P (XEXP (op, 1))
914 && mode_signbit_p (mode, XEXP (op, 1))
915 && (temp = simplify_unary_operation (NOT, mode,
916 XEXP (op, 1), mode)) != 0)
917 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
920 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
921 operands other than 1, but that is not valid. We could do a
922 similar simplification for (not (lshiftrt C X)) where C is
923 just the sign bit, but this doesn't seem common enough to
924 bother with. */
925 if (GET_CODE (op) == ASHIFT
926 && XEXP (op, 0) == const1_rtx)
928 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
929 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
932 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
933 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
934 so we can perform the above simplification. */
935 if (STORE_FLAG_VALUE == -1
936 && GET_CODE (op) == ASHIFTRT
937 && CONST_INT_P (XEXP (op, 1))
938 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
939 return simplify_gen_relational (GE, mode, VOIDmode,
940 XEXP (op, 0), const0_rtx);
943 if (GET_CODE (op) == SUBREG
944 && subreg_lowpart_p (op)
945 && (GET_MODE_SIZE (GET_MODE (op))
946 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
947 && GET_CODE (SUBREG_REG (op)) == ASHIFT
948 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
950 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
951 rtx x;
953 x = gen_rtx_ROTATE (inner_mode,
954 simplify_gen_unary (NOT, inner_mode, const1_rtx,
955 inner_mode),
956 XEXP (SUBREG_REG (op), 1));
957 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
958 if (temp)
959 return temp;
962 /* Apply De Morgan's laws to reduce number of patterns for machines
963 with negating logical insns (and-not, nand, etc.). If result has
964 only one NOT, put it first, since that is how the patterns are
965 coded. */
966 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
968 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
969 machine_mode op_mode;
971 op_mode = GET_MODE (in1);
972 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
974 op_mode = GET_MODE (in2);
975 if (op_mode == VOIDmode)
976 op_mode = mode;
977 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
979 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
980 std::swap (in1, in2);
982 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
983 mode, in1, in2);
986 /* (not (bswap x)) -> (bswap (not x)). */
987 if (GET_CODE (op) == BSWAP)
989 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
990 return simplify_gen_unary (BSWAP, mode, x, mode);
992 break;
994 case NEG:
995 /* (neg (neg X)) == X. */
996 if (GET_CODE (op) == NEG)
997 return XEXP (op, 0);
999 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1000 If comparison is not reversible use
1001 x ? y : (neg y). */
1002 if (GET_CODE (op) == IF_THEN_ELSE)
1004 rtx cond = XEXP (op, 0);
1005 rtx true_rtx = XEXP (op, 1);
1006 rtx false_rtx = XEXP (op, 2);
1008 if ((GET_CODE (true_rtx) == NEG
1009 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1010 || (GET_CODE (false_rtx) == NEG
1011 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1013 if (reversed_comparison_code (cond, NULL_RTX) != UNKNOWN)
1014 temp = reversed_comparison (cond, mode);
1015 else
1017 temp = cond;
1018 std::swap (true_rtx, false_rtx);
1020 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1021 mode, temp, true_rtx, false_rtx);
1025 /* (neg (plus X 1)) can become (not X). */
1026 if (GET_CODE (op) == PLUS
1027 && XEXP (op, 1) == const1_rtx)
1028 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1030 /* Similarly, (neg (not X)) is (plus X 1). */
1031 if (GET_CODE (op) == NOT)
1032 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1033 CONST1_RTX (mode));
1035 /* (neg (minus X Y)) can become (minus Y X). This transformation
1036 isn't safe for modes with signed zeros, since if X and Y are
1037 both +0, (minus Y X) is the same as (minus X Y). If the
1038 rounding mode is towards +infinity (or -infinity) then the two
1039 expressions will be rounded differently. */
1040 if (GET_CODE (op) == MINUS
1041 && !HONOR_SIGNED_ZEROS (mode)
1042 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1043 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1045 if (GET_CODE (op) == PLUS
1046 && !HONOR_SIGNED_ZEROS (mode)
1047 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1049 /* (neg (plus A C)) is simplified to (minus -C A). */
1050 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1051 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1053 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1054 if (temp)
1055 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1058 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1059 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1060 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1063 /* (neg (mult A B)) becomes (mult A (neg B)).
1064 This works even for floating-point values. */
1065 if (GET_CODE (op) == MULT
1066 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1068 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1069 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1072 /* NEG commutes with ASHIFT since it is multiplication. Only do
1073 this if we can then eliminate the NEG (e.g., if the operand
1074 is a constant). */
1075 if (GET_CODE (op) == ASHIFT)
1077 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1078 if (temp)
1079 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1082 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1083 C is equal to the width of MODE minus 1. */
1084 if (GET_CODE (op) == ASHIFTRT
1085 && CONST_INT_P (XEXP (op, 1))
1086 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1087 return simplify_gen_binary (LSHIFTRT, mode,
1088 XEXP (op, 0), XEXP (op, 1));
1090 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1091 C is equal to the width of MODE minus 1. */
1092 if (GET_CODE (op) == LSHIFTRT
1093 && CONST_INT_P (XEXP (op, 1))
1094 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1095 return simplify_gen_binary (ASHIFTRT, mode,
1096 XEXP (op, 0), XEXP (op, 1));
1098 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1099 if (GET_CODE (op) == XOR
1100 && XEXP (op, 1) == const1_rtx
1101 && nonzero_bits (XEXP (op, 0), mode) == 1)
1102 return plus_constant (mode, XEXP (op, 0), -1);
1104 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1105 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1106 if (GET_CODE (op) == LT
1107 && XEXP (op, 1) == const0_rtx
1108 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1110 machine_mode inner = GET_MODE (XEXP (op, 0));
1111 int isize = GET_MODE_PRECISION (inner);
1112 if (STORE_FLAG_VALUE == 1)
1114 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1115 GEN_INT (isize - 1));
1116 if (mode == inner)
1117 return temp;
1118 if (GET_MODE_PRECISION (mode) > isize)
1119 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1120 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1122 else if (STORE_FLAG_VALUE == -1)
1124 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1125 GEN_INT (isize - 1));
1126 if (mode == inner)
1127 return temp;
1128 if (GET_MODE_PRECISION (mode) > isize)
1129 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1130 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1133 break;
1135 case TRUNCATE:
1136 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1137 with the umulXi3_highpart patterns. */
1138 if (GET_CODE (op) == LSHIFTRT
1139 && GET_CODE (XEXP (op, 0)) == MULT)
1140 break;
1142 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1144 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1146 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1147 if (temp)
1148 return temp;
1150 /* We can't handle truncation to a partial integer mode here
1151 because we don't know the real bitsize of the partial
1152 integer mode. */
1153 break;
1156 if (GET_MODE (op) != VOIDmode)
1158 temp = simplify_truncation (mode, op, GET_MODE (op));
1159 if (temp)
1160 return temp;
1163 /* If we know that the value is already truncated, we can
1164 replace the TRUNCATE with a SUBREG. */
1165 if (GET_MODE_NUNITS (mode) == 1
1166 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1167 || truncated_to_mode (mode, op)))
1169 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1170 if (temp)
1171 return temp;
1174 /* A truncate of a comparison can be replaced with a subreg if
1175 STORE_FLAG_VALUE permits. This is like the previous test,
1176 but it works even if the comparison is done in a mode larger
1177 than HOST_BITS_PER_WIDE_INT. */
1178 if (HWI_COMPUTABLE_MODE_P (mode)
1179 && COMPARISON_P (op)
1180 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1182 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1183 if (temp)
1184 return temp;
1187 /* A truncate of a memory is just loading the low part of the memory
1188 if we are not changing the meaning of the address. */
1189 if (GET_CODE (op) == MEM
1190 && !VECTOR_MODE_P (mode)
1191 && !MEM_VOLATILE_P (op)
1192 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1194 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1195 if (temp)
1196 return temp;
1199 break;
1201 case FLOAT_TRUNCATE:
1202 if (DECIMAL_FLOAT_MODE_P (mode))
1203 break;
1205 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1206 if (GET_CODE (op) == FLOAT_EXTEND
1207 && GET_MODE (XEXP (op, 0)) == mode)
1208 return XEXP (op, 0);
1210 /* (float_truncate:SF (float_truncate:DF foo:XF))
1211 = (float_truncate:SF foo:XF).
1212 This may eliminate double rounding, so it is unsafe.
1214 (float_truncate:SF (float_extend:XF foo:DF))
1215 = (float_truncate:SF foo:DF).
1217 (float_truncate:DF (float_extend:XF foo:SF))
1218 = (float_extend:DF foo:SF). */
1219 if ((GET_CODE (op) == FLOAT_TRUNCATE
1220 && flag_unsafe_math_optimizations)
1221 || GET_CODE (op) == FLOAT_EXTEND)
1222 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1223 0)))
1224 > GET_MODE_SIZE (mode)
1225 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1226 mode,
1227 XEXP (op, 0), mode);
1229 /* (float_truncate (float x)) is (float x) */
1230 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1231 && (flag_unsafe_math_optimizations
1232 || exact_int_to_float_conversion_p (op)))
1233 return simplify_gen_unary (GET_CODE (op), mode,
1234 XEXP (op, 0),
1235 GET_MODE (XEXP (op, 0)));
1237 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1238 (OP:SF foo:SF) if OP is NEG or ABS. */
1239 if ((GET_CODE (op) == ABS
1240 || GET_CODE (op) == NEG)
1241 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1242 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1243 return simplify_gen_unary (GET_CODE (op), mode,
1244 XEXP (XEXP (op, 0), 0), mode);
1246 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1247 is (float_truncate:SF x). */
1248 if (GET_CODE (op) == SUBREG
1249 && subreg_lowpart_p (op)
1250 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1251 return SUBREG_REG (op);
1252 break;
1254 case FLOAT_EXTEND:
1255 if (DECIMAL_FLOAT_MODE_P (mode))
1256 break;
1258 /* (float_extend (float_extend x)) is (float_extend x)
1260 (float_extend (float x)) is (float x) assuming that double
1261 rounding can't happen.
1263 if (GET_CODE (op) == FLOAT_EXTEND
1264 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1265 && exact_int_to_float_conversion_p (op)))
1266 return simplify_gen_unary (GET_CODE (op), mode,
1267 XEXP (op, 0),
1268 GET_MODE (XEXP (op, 0)));
1270 break;
1272 case ABS:
1273 /* (abs (neg <foo>)) -> (abs <foo>) */
1274 if (GET_CODE (op) == NEG)
1275 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1276 GET_MODE (XEXP (op, 0)));
1278 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1279 do nothing. */
1280 if (GET_MODE (op) == VOIDmode)
1281 break;
1283 /* If operand is something known to be positive, ignore the ABS. */
1284 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1285 || val_signbit_known_clear_p (GET_MODE (op),
1286 nonzero_bits (op, GET_MODE (op))))
1287 return op;
1289 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1290 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1291 return gen_rtx_NEG (mode, op);
1293 break;
1295 case FFS:
1296 /* (ffs (*_extend <X>)) = (ffs <X>) */
1297 if (GET_CODE (op) == SIGN_EXTEND
1298 || GET_CODE (op) == ZERO_EXTEND)
1299 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1300 GET_MODE (XEXP (op, 0)));
1301 break;
1303 case POPCOUNT:
1304 switch (GET_CODE (op))
1306 case BSWAP:
1307 case ZERO_EXTEND:
1308 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1309 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1310 GET_MODE (XEXP (op, 0)));
1312 case ROTATE:
1313 case ROTATERT:
1314 /* Rotations don't affect popcount. */
1315 if (!side_effects_p (XEXP (op, 1)))
1316 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1317 GET_MODE (XEXP (op, 0)));
1318 break;
1320 default:
1321 break;
1323 break;
1325 case PARITY:
1326 switch (GET_CODE (op))
1328 case NOT:
1329 case BSWAP:
1330 case ZERO_EXTEND:
1331 case SIGN_EXTEND:
1332 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1333 GET_MODE (XEXP (op, 0)));
1335 case ROTATE:
1336 case ROTATERT:
1337 /* Rotations don't affect parity. */
1338 if (!side_effects_p (XEXP (op, 1)))
1339 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1340 GET_MODE (XEXP (op, 0)));
1341 break;
1343 default:
1344 break;
1346 break;
1348 case BSWAP:
1349 /* (bswap (bswap x)) -> x. */
1350 if (GET_CODE (op) == BSWAP)
1351 return XEXP (op, 0);
1352 break;
1354 case FLOAT:
1355 /* (float (sign_extend <X>)) = (float <X>). */
1356 if (GET_CODE (op) == SIGN_EXTEND)
1357 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1358 GET_MODE (XEXP (op, 0)));
1359 break;
1361 case SIGN_EXTEND:
1362 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1363 becomes just the MINUS if its mode is MODE. This allows
1364 folding switch statements on machines using casesi (such as
1365 the VAX). */
1366 if (GET_CODE (op) == TRUNCATE
1367 && GET_MODE (XEXP (op, 0)) == mode
1368 && GET_CODE (XEXP (op, 0)) == MINUS
1369 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1370 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1371 return XEXP (op, 0);
1373 /* Extending a widening multiplication should be canonicalized to
1374 a wider widening multiplication. */
1375 if (GET_CODE (op) == MULT)
1377 rtx lhs = XEXP (op, 0);
1378 rtx rhs = XEXP (op, 1);
1379 enum rtx_code lcode = GET_CODE (lhs);
1380 enum rtx_code rcode = GET_CODE (rhs);
1382 /* Widening multiplies usually extend both operands, but sometimes
1383 they use a shift to extract a portion of a register. */
1384 if ((lcode == SIGN_EXTEND
1385 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1386 && (rcode == SIGN_EXTEND
1387 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1389 machine_mode lmode = GET_MODE (lhs);
1390 machine_mode rmode = GET_MODE (rhs);
1391 int bits;
1393 if (lcode == ASHIFTRT)
1394 /* Number of bits not shifted off the end. */
1395 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1396 else /* lcode == SIGN_EXTEND */
1397 /* Size of inner mode. */
1398 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1400 if (rcode == ASHIFTRT)
1401 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1402 else /* rcode == SIGN_EXTEND */
1403 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1405 /* We can only widen multiplies if the result is mathematiclly
1406 equivalent. I.e. if overflow was impossible. */
1407 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1408 return simplify_gen_binary
1409 (MULT, mode,
1410 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1411 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1415 /* Check for a sign extension of a subreg of a promoted
1416 variable, where the promotion is sign-extended, and the
1417 target mode is the same as the variable's promotion. */
1418 if (GET_CODE (op) == SUBREG
1419 && SUBREG_PROMOTED_VAR_P (op)
1420 && SUBREG_PROMOTED_SIGNED_P (op)
1421 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1423 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1424 if (temp)
1425 return temp;
1428 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1429 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1430 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1432 gcc_assert (GET_MODE_PRECISION (mode)
1433 > GET_MODE_PRECISION (GET_MODE (op)));
1434 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1435 GET_MODE (XEXP (op, 0)));
1438 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1439 is (sign_extend:M (subreg:O <X>)) if there is mode with
1440 GET_MODE_BITSIZE (N) - I bits.
1441 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1442 is similarly (zero_extend:M (subreg:O <X>)). */
1443 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1444 && GET_CODE (XEXP (op, 0)) == ASHIFT
1445 && CONST_INT_P (XEXP (op, 1))
1446 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1447 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1449 machine_mode tmode
1450 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1451 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1452 gcc_assert (GET_MODE_BITSIZE (mode)
1453 > GET_MODE_BITSIZE (GET_MODE (op)));
1454 if (tmode != BLKmode)
1456 rtx inner =
1457 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1458 if (inner)
1459 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1460 ? SIGN_EXTEND : ZERO_EXTEND,
1461 mode, inner, tmode);
1465 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1466 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1467 if (GET_CODE (op) == LSHIFTRT
1468 && CONST_INT_P (XEXP (op, 1))
1469 && XEXP (op, 1) != const0_rtx)
1470 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1472 #if defined(POINTERS_EXTEND_UNSIGNED)
1473 /* As we do not know which address space the pointer is referring to,
1474 we can do this only if the target does not support different pointer
1475 or address modes depending on the address space. */
1476 if (target_default_pointer_address_modes_p ()
1477 && ! POINTERS_EXTEND_UNSIGNED
1478 && mode == Pmode && GET_MODE (op) == ptr_mode
1479 && (CONSTANT_P (op)
1480 || (GET_CODE (op) == SUBREG
1481 && REG_P (SUBREG_REG (op))
1482 && REG_POINTER (SUBREG_REG (op))
1483 && GET_MODE (SUBREG_REG (op)) == Pmode))
1484 && !targetm.have_ptr_extend ())
1485 return convert_memory_address (Pmode, op);
1486 #endif
1487 break;
1489 case ZERO_EXTEND:
1490 /* Check for a zero extension of a subreg of a promoted
1491 variable, where the promotion is zero-extended, and the
1492 target mode is the same as the variable's promotion. */
1493 if (GET_CODE (op) == SUBREG
1494 && SUBREG_PROMOTED_VAR_P (op)
1495 && SUBREG_PROMOTED_UNSIGNED_P (op)
1496 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1498 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1499 if (temp)
1500 return temp;
1503 /* Extending a widening multiplication should be canonicalized to
1504 a wider widening multiplication. */
1505 if (GET_CODE (op) == MULT)
1507 rtx lhs = XEXP (op, 0);
1508 rtx rhs = XEXP (op, 1);
1509 enum rtx_code lcode = GET_CODE (lhs);
1510 enum rtx_code rcode = GET_CODE (rhs);
1512 /* Widening multiplies usually extend both operands, but sometimes
1513 they use a shift to extract a portion of a register. */
1514 if ((lcode == ZERO_EXTEND
1515 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1516 && (rcode == ZERO_EXTEND
1517 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1519 machine_mode lmode = GET_MODE (lhs);
1520 machine_mode rmode = GET_MODE (rhs);
1521 int bits;
1523 if (lcode == LSHIFTRT)
1524 /* Number of bits not shifted off the end. */
1525 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1526 else /* lcode == ZERO_EXTEND */
1527 /* Size of inner mode. */
1528 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1530 if (rcode == LSHIFTRT)
1531 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1532 else /* rcode == ZERO_EXTEND */
1533 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1535 /* We can only widen multiplies if the result is mathematiclly
1536 equivalent. I.e. if overflow was impossible. */
1537 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1538 return simplify_gen_binary
1539 (MULT, mode,
1540 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1541 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1545 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1546 if (GET_CODE (op) == ZERO_EXTEND)
1547 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1548 GET_MODE (XEXP (op, 0)));
1550 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1551 is (zero_extend:M (subreg:O <X>)) if there is mode with
1552 GET_MODE_PRECISION (N) - I bits. */
1553 if (GET_CODE (op) == LSHIFTRT
1554 && GET_CODE (XEXP (op, 0)) == ASHIFT
1555 && CONST_INT_P (XEXP (op, 1))
1556 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1557 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1559 machine_mode tmode
1560 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1561 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1562 if (tmode != BLKmode)
1564 rtx inner =
1565 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1566 if (inner)
1567 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1571 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1572 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1573 of mode N. E.g.
1574 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1575 (and:SI (reg:SI) (const_int 63)). */
1576 if (GET_CODE (op) == SUBREG
1577 && GET_MODE_PRECISION (GET_MODE (op))
1578 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1579 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1580 <= HOST_BITS_PER_WIDE_INT
1581 && GET_MODE_PRECISION (mode)
1582 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1583 && subreg_lowpart_p (op)
1584 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1585 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1587 if (GET_MODE_PRECISION (mode)
1588 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1589 return SUBREG_REG (op);
1590 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1591 GET_MODE (SUBREG_REG (op)));
1594 #if defined(POINTERS_EXTEND_UNSIGNED)
1595 /* As we do not know which address space the pointer is referring to,
1596 we can do this only if the target does not support different pointer
1597 or address modes depending on the address space. */
1598 if (target_default_pointer_address_modes_p ()
1599 && POINTERS_EXTEND_UNSIGNED > 0
1600 && mode == Pmode && GET_MODE (op) == ptr_mode
1601 && (CONSTANT_P (op)
1602 || (GET_CODE (op) == SUBREG
1603 && REG_P (SUBREG_REG (op))
1604 && REG_POINTER (SUBREG_REG (op))
1605 && GET_MODE (SUBREG_REG (op)) == Pmode))
1606 && !targetm.have_ptr_extend ())
1607 return convert_memory_address (Pmode, op);
1608 #endif
1609 break;
1611 default:
1612 break;
1615 return 0;
1618 /* Try to compute the value of a unary operation CODE whose output mode is to
1619 be MODE with input operand OP whose mode was originally OP_MODE.
1620 Return zero if the value cannot be computed. */
1622 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1623 rtx op, machine_mode op_mode)
1625 unsigned int width = GET_MODE_PRECISION (mode);
1627 if (code == VEC_DUPLICATE)
1629 gcc_assert (VECTOR_MODE_P (mode));
1630 if (GET_MODE (op) != VOIDmode)
1632 if (!VECTOR_MODE_P (GET_MODE (op)))
1633 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1634 else
1635 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1636 (GET_MODE (op)));
1638 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1639 || GET_CODE (op) == CONST_VECTOR)
1641 int elt_size = GET_MODE_UNIT_SIZE (mode);
1642 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1643 rtvec v = rtvec_alloc (n_elts);
1644 unsigned int i;
1646 if (GET_CODE (op) != CONST_VECTOR)
1647 for (i = 0; i < n_elts; i++)
1648 RTVEC_ELT (v, i) = op;
1649 else
1651 machine_mode inmode = GET_MODE (op);
1652 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1653 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1655 gcc_assert (in_n_elts < n_elts);
1656 gcc_assert ((n_elts % in_n_elts) == 0);
1657 for (i = 0; i < n_elts; i++)
1658 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1660 return gen_rtx_CONST_VECTOR (mode, v);
1664 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1666 int elt_size = GET_MODE_UNIT_SIZE (mode);
1667 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1668 machine_mode opmode = GET_MODE (op);
1669 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1670 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1671 rtvec v = rtvec_alloc (n_elts);
1672 unsigned int i;
1674 gcc_assert (op_n_elts == n_elts);
1675 for (i = 0; i < n_elts; i++)
1677 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1678 CONST_VECTOR_ELT (op, i),
1679 GET_MODE_INNER (opmode));
1680 if (!x)
1681 return 0;
1682 RTVEC_ELT (v, i) = x;
1684 return gen_rtx_CONST_VECTOR (mode, v);
1687 /* The order of these tests is critical so that, for example, we don't
1688 check the wrong mode (input vs. output) for a conversion operation,
1689 such as FIX. At some point, this should be simplified. */
1691 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1693 REAL_VALUE_TYPE d;
1695 if (op_mode == VOIDmode)
1697 /* CONST_INT have VOIDmode as the mode. We assume that all
1698 the bits of the constant are significant, though, this is
1699 a dangerous assumption as many times CONST_INTs are
1700 created and used with garbage in the bits outside of the
1701 precision of the implied mode of the const_int. */
1702 op_mode = MAX_MODE_INT;
1705 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1707 /* Avoid the folding if flag_signaling_nans is on and
1708 operand is a signaling NaN. */
1709 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1710 return 0;
1712 d = real_value_truncate (mode, d);
1713 return const_double_from_real_value (d, mode);
1715 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1717 REAL_VALUE_TYPE d;
1719 if (op_mode == VOIDmode)
1721 /* CONST_INT have VOIDmode as the mode. We assume that all
1722 the bits of the constant are significant, though, this is
1723 a dangerous assumption as many times CONST_INTs are
1724 created and used with garbage in the bits outside of the
1725 precision of the implied mode of the const_int. */
1726 op_mode = MAX_MODE_INT;
1729 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1731 /* Avoid the folding if flag_signaling_nans is on and
1732 operand is a signaling NaN. */
1733 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1734 return 0;
1736 d = real_value_truncate (mode, d);
1737 return const_double_from_real_value (d, mode);
1740 if (CONST_SCALAR_INT_P (op) && width > 0)
1742 wide_int result;
1743 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1744 rtx_mode_t op0 = std::make_pair (op, imode);
1745 int int_value;
1747 #if TARGET_SUPPORTS_WIDE_INT == 0
1748 /* This assert keeps the simplification from producing a result
1749 that cannot be represented in a CONST_DOUBLE but a lot of
1750 upstream callers expect that this function never fails to
1751 simplify something and so you if you added this to the test
1752 above the code would die later anyway. If this assert
1753 happens, you just need to make the port support wide int. */
1754 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1755 #endif
1757 switch (code)
1759 case NOT:
1760 result = wi::bit_not (op0);
1761 break;
1763 case NEG:
1764 result = wi::neg (op0);
1765 break;
1767 case ABS:
1768 result = wi::abs (op0);
1769 break;
1771 case FFS:
1772 result = wi::shwi (wi::ffs (op0), mode);
1773 break;
1775 case CLZ:
1776 if (wi::ne_p (op0, 0))
1777 int_value = wi::clz (op0);
1778 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1779 int_value = GET_MODE_PRECISION (mode);
1780 result = wi::shwi (int_value, mode);
1781 break;
1783 case CLRSB:
1784 result = wi::shwi (wi::clrsb (op0), mode);
1785 break;
1787 case CTZ:
1788 if (wi::ne_p (op0, 0))
1789 int_value = wi::ctz (op0);
1790 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1791 int_value = GET_MODE_PRECISION (mode);
1792 result = wi::shwi (int_value, mode);
1793 break;
1795 case POPCOUNT:
1796 result = wi::shwi (wi::popcount (op0), mode);
1797 break;
1799 case PARITY:
1800 result = wi::shwi (wi::parity (op0), mode);
1801 break;
1803 case BSWAP:
1804 result = wide_int (op0).bswap ();
1805 break;
1807 case TRUNCATE:
1808 case ZERO_EXTEND:
1809 result = wide_int::from (op0, width, UNSIGNED);
1810 break;
1812 case SIGN_EXTEND:
1813 result = wide_int::from (op0, width, SIGNED);
1814 break;
1816 case SQRT:
1817 default:
1818 return 0;
1821 return immed_wide_int_const (result, mode);
1824 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1825 && SCALAR_FLOAT_MODE_P (mode)
1826 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1828 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1829 switch (code)
1831 case SQRT:
1832 return 0;
1833 case ABS:
1834 d = real_value_abs (&d);
1835 break;
1836 case NEG:
1837 d = real_value_negate (&d);
1838 break;
1839 case FLOAT_TRUNCATE:
1840 /* Don't perform the operation if flag_signaling_nans is on
1841 and the operand is a signaling NaN. */
1842 if (!(HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)))
1843 d = real_value_truncate (mode, d);
1844 break;
1845 case FLOAT_EXTEND:
1846 /* All this does is change the mode, unless changing
1847 mode class. */
1848 /* Don't perform the operation if flag_signaling_nans is on
1849 and the operand is a signaling NaN. */
1850 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op))
1851 && !(HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)))
1852 real_convert (&d, mode, &d);
1853 break;
1854 case FIX:
1855 /* Don't perform the operation if flag_signaling_nans is on
1856 and the operand is a signaling NaN. */
1857 if (!(HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)))
1858 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1859 break;
1860 case NOT:
1862 long tmp[4];
1863 int i;
1865 real_to_target (tmp, &d, GET_MODE (op));
1866 for (i = 0; i < 4; i++)
1867 tmp[i] = ~tmp[i];
1868 real_from_target (&d, tmp, mode);
1869 break;
1871 default:
1872 gcc_unreachable ();
1874 return const_double_from_real_value (d, mode);
1876 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1877 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1878 && GET_MODE_CLASS (mode) == MODE_INT
1879 && width > 0)
1881 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1882 operators are intentionally left unspecified (to ease implementation
1883 by target backends), for consistency, this routine implements the
1884 same semantics for constant folding as used by the middle-end. */
1886 /* This was formerly used only for non-IEEE float.
1887 eggert@twinsun.com says it is safe for IEEE also. */
1888 REAL_VALUE_TYPE t;
1889 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1890 wide_int wmax, wmin;
1891 /* This is part of the abi to real_to_integer, but we check
1892 things before making this call. */
1893 bool fail;
1895 switch (code)
1897 case FIX:
1898 if (REAL_VALUE_ISNAN (*x))
1899 return const0_rtx;
1901 /* Test against the signed upper bound. */
1902 wmax = wi::max_value (width, SIGNED);
1903 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1904 if (real_less (&t, x))
1905 return immed_wide_int_const (wmax, mode);
1907 /* Test against the signed lower bound. */
1908 wmin = wi::min_value (width, SIGNED);
1909 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1910 if (real_less (x, &t))
1911 return immed_wide_int_const (wmin, mode);
1913 return immed_wide_int_const (real_to_integer (x, &fail, width),
1914 mode);
1916 case UNSIGNED_FIX:
1917 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1918 return const0_rtx;
1920 /* Test against the unsigned upper bound. */
1921 wmax = wi::max_value (width, UNSIGNED);
1922 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1923 if (real_less (&t, x))
1924 return immed_wide_int_const (wmax, mode);
1926 return immed_wide_int_const (real_to_integer (x, &fail, width),
1927 mode);
1929 default:
1930 gcc_unreachable ();
1934 return NULL_RTX;
1937 /* Subroutine of simplify_binary_operation to simplify a binary operation
1938 CODE that can commute with byte swapping, with result mode MODE and
1939 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1940 Return zero if no simplification or canonicalization is possible. */
1942 static rtx
1943 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1944 rtx op0, rtx op1)
1946 rtx tem;
1948 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1949 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1951 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1952 simplify_gen_unary (BSWAP, mode, op1, mode));
1953 return simplify_gen_unary (BSWAP, mode, tem, mode);
1956 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1957 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1959 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1960 return simplify_gen_unary (BSWAP, mode, tem, mode);
1963 return NULL_RTX;
1966 /* Subroutine of simplify_binary_operation to simplify a commutative,
1967 associative binary operation CODE with result mode MODE, operating
1968 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1969 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1970 canonicalization is possible. */
1972 static rtx
1973 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1974 rtx op0, rtx op1)
1976 rtx tem;
1978 /* Linearize the operator to the left. */
1979 if (GET_CODE (op1) == code)
1981 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1982 if (GET_CODE (op0) == code)
1984 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1985 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1988 /* "a op (b op c)" becomes "(b op c) op a". */
1989 if (! swap_commutative_operands_p (op1, op0))
1990 return simplify_gen_binary (code, mode, op1, op0);
1992 std::swap (op0, op1);
1995 if (GET_CODE (op0) == code)
1997 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1998 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2000 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2001 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2004 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2005 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2006 if (tem != 0)
2007 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2009 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2010 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2011 if (tem != 0)
2012 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2015 return 0;
2019 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2020 and OP1. Return 0 if no simplification is possible.
2022 Don't use this for relational operations such as EQ or LT.
2023 Use simplify_relational_operation instead. */
2025 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2026 rtx op0, rtx op1)
2028 rtx trueop0, trueop1;
2029 rtx tem;
2031 /* Relational operations don't work here. We must know the mode
2032 of the operands in order to do the comparison correctly.
2033 Assuming a full word can give incorrect results.
2034 Consider comparing 128 with -128 in QImode. */
2035 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2036 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2038 /* Make sure the constant is second. */
2039 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2040 && swap_commutative_operands_p (op0, op1))
2041 std::swap (op0, op1);
2043 trueop0 = avoid_constant_pool_reference (op0);
2044 trueop1 = avoid_constant_pool_reference (op1);
2046 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2047 if (tem)
2048 return tem;
2049 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2051 if (tem)
2052 return tem;
2054 /* If the above steps did not result in a simplification and op0 or op1
2055 were constant pool references, use the referenced constants directly. */
2056 if (trueop0 != op0 || trueop1 != op1)
2057 return simplify_gen_binary (code, mode, trueop0, trueop1);
2059 return NULL_RTX;
2062 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2063 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2064 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2065 actual constants. */
2067 static rtx
2068 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2069 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2071 rtx tem, reversed, opleft, opright;
2072 HOST_WIDE_INT val;
2073 unsigned int width = GET_MODE_PRECISION (mode);
2075 /* Even if we can't compute a constant result,
2076 there are some cases worth simplifying. */
2078 switch (code)
2080 case PLUS:
2081 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2082 when x is NaN, infinite, or finite and nonzero. They aren't
2083 when x is -0 and the rounding mode is not towards -infinity,
2084 since (-0) + 0 is then 0. */
2085 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2086 return op0;
2088 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2089 transformations are safe even for IEEE. */
2090 if (GET_CODE (op0) == NEG)
2091 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2092 else if (GET_CODE (op1) == NEG)
2093 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2095 /* (~a) + 1 -> -a */
2096 if (INTEGRAL_MODE_P (mode)
2097 && GET_CODE (op0) == NOT
2098 && trueop1 == const1_rtx)
2099 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2101 /* Handle both-operands-constant cases. We can only add
2102 CONST_INTs to constants since the sum of relocatable symbols
2103 can't be handled by most assemblers. Don't add CONST_INT
2104 to CONST_INT since overflow won't be computed properly if wider
2105 than HOST_BITS_PER_WIDE_INT. */
2107 if ((GET_CODE (op0) == CONST
2108 || GET_CODE (op0) == SYMBOL_REF
2109 || GET_CODE (op0) == LABEL_REF)
2110 && CONST_INT_P (op1))
2111 return plus_constant (mode, op0, INTVAL (op1));
2112 else if ((GET_CODE (op1) == CONST
2113 || GET_CODE (op1) == SYMBOL_REF
2114 || GET_CODE (op1) == LABEL_REF)
2115 && CONST_INT_P (op0))
2116 return plus_constant (mode, op1, INTVAL (op0));
2118 /* See if this is something like X * C - X or vice versa or
2119 if the multiplication is written as a shift. If so, we can
2120 distribute and make a new multiply, shift, or maybe just
2121 have X (if C is 2 in the example above). But don't make
2122 something more expensive than we had before. */
2124 if (SCALAR_INT_MODE_P (mode))
2126 rtx lhs = op0, rhs = op1;
2128 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2129 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2131 if (GET_CODE (lhs) == NEG)
2133 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2134 lhs = XEXP (lhs, 0);
2136 else if (GET_CODE (lhs) == MULT
2137 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2139 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2140 lhs = XEXP (lhs, 0);
2142 else if (GET_CODE (lhs) == ASHIFT
2143 && CONST_INT_P (XEXP (lhs, 1))
2144 && INTVAL (XEXP (lhs, 1)) >= 0
2145 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2147 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2148 GET_MODE_PRECISION (mode));
2149 lhs = XEXP (lhs, 0);
2152 if (GET_CODE (rhs) == NEG)
2154 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2155 rhs = XEXP (rhs, 0);
2157 else if (GET_CODE (rhs) == MULT
2158 && CONST_INT_P (XEXP (rhs, 1)))
2160 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2161 rhs = XEXP (rhs, 0);
2163 else if (GET_CODE (rhs) == ASHIFT
2164 && CONST_INT_P (XEXP (rhs, 1))
2165 && INTVAL (XEXP (rhs, 1)) >= 0
2166 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2168 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2169 GET_MODE_PRECISION (mode));
2170 rhs = XEXP (rhs, 0);
2173 if (rtx_equal_p (lhs, rhs))
2175 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2176 rtx coeff;
2177 bool speed = optimize_function_for_speed_p (cfun);
2179 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2181 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2182 return (set_src_cost (tem, mode, speed)
2183 <= set_src_cost (orig, mode, speed) ? tem : 0);
2187 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2188 if (CONST_SCALAR_INT_P (op1)
2189 && GET_CODE (op0) == XOR
2190 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2191 && mode_signbit_p (mode, op1))
2192 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2193 simplify_gen_binary (XOR, mode, op1,
2194 XEXP (op0, 1)));
2196 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2197 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2198 && GET_CODE (op0) == MULT
2199 && GET_CODE (XEXP (op0, 0)) == NEG)
2201 rtx in1, in2;
2203 in1 = XEXP (XEXP (op0, 0), 0);
2204 in2 = XEXP (op0, 1);
2205 return simplify_gen_binary (MINUS, mode, op1,
2206 simplify_gen_binary (MULT, mode,
2207 in1, in2));
2210 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2211 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2212 is 1. */
2213 if (COMPARISON_P (op0)
2214 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2215 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2216 && (reversed = reversed_comparison (op0, mode)))
2217 return
2218 simplify_gen_unary (NEG, mode, reversed, mode);
2220 /* If one of the operands is a PLUS or a MINUS, see if we can
2221 simplify this by the associative law.
2222 Don't use the associative law for floating point.
2223 The inaccuracy makes it nonassociative,
2224 and subtle programs can break if operations are associated. */
2226 if (INTEGRAL_MODE_P (mode)
2227 && (plus_minus_operand_p (op0)
2228 || plus_minus_operand_p (op1))
2229 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2230 return tem;
2232 /* Reassociate floating point addition only when the user
2233 specifies associative math operations. */
2234 if (FLOAT_MODE_P (mode)
2235 && flag_associative_math)
2237 tem = simplify_associative_operation (code, mode, op0, op1);
2238 if (tem)
2239 return tem;
2241 break;
2243 case COMPARE:
2244 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2245 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2246 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2247 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2249 rtx xop00 = XEXP (op0, 0);
2250 rtx xop10 = XEXP (op1, 0);
2252 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2253 return xop00;
2255 if (REG_P (xop00) && REG_P (xop10)
2256 && GET_MODE (xop00) == GET_MODE (xop10)
2257 && REGNO (xop00) == REGNO (xop10)
2258 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2259 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2260 return xop00;
2262 break;
2264 case MINUS:
2265 /* We can't assume x-x is 0 even with non-IEEE floating point,
2266 but since it is zero except in very strange circumstances, we
2267 will treat it as zero with -ffinite-math-only. */
2268 if (rtx_equal_p (trueop0, trueop1)
2269 && ! side_effects_p (op0)
2270 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2271 return CONST0_RTX (mode);
2273 /* Change subtraction from zero into negation. (0 - x) is the
2274 same as -x when x is NaN, infinite, or finite and nonzero.
2275 But if the mode has signed zeros, and does not round towards
2276 -infinity, then 0 - 0 is 0, not -0. */
2277 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2278 return simplify_gen_unary (NEG, mode, op1, mode);
2280 /* (-1 - a) is ~a. */
2281 if (trueop0 == constm1_rtx)
2282 return simplify_gen_unary (NOT, mode, op1, mode);
2284 /* Subtracting 0 has no effect unless the mode has signed zeros
2285 and supports rounding towards -infinity. In such a case,
2286 0 - 0 is -0. */
2287 if (!(HONOR_SIGNED_ZEROS (mode)
2288 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2289 && trueop1 == CONST0_RTX (mode))
2290 return op0;
2292 /* See if this is something like X * C - X or vice versa or
2293 if the multiplication is written as a shift. If so, we can
2294 distribute and make a new multiply, shift, or maybe just
2295 have X (if C is 2 in the example above). But don't make
2296 something more expensive than we had before. */
2298 if (SCALAR_INT_MODE_P (mode))
2300 rtx lhs = op0, rhs = op1;
2302 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2303 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2305 if (GET_CODE (lhs) == NEG)
2307 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2308 lhs = XEXP (lhs, 0);
2310 else if (GET_CODE (lhs) == MULT
2311 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2313 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2314 lhs = XEXP (lhs, 0);
2316 else if (GET_CODE (lhs) == ASHIFT
2317 && CONST_INT_P (XEXP (lhs, 1))
2318 && INTVAL (XEXP (lhs, 1)) >= 0
2319 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2321 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2322 GET_MODE_PRECISION (mode));
2323 lhs = XEXP (lhs, 0);
2326 if (GET_CODE (rhs) == NEG)
2328 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2329 rhs = XEXP (rhs, 0);
2331 else if (GET_CODE (rhs) == MULT
2332 && CONST_INT_P (XEXP (rhs, 1)))
2334 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2335 rhs = XEXP (rhs, 0);
2337 else if (GET_CODE (rhs) == ASHIFT
2338 && CONST_INT_P (XEXP (rhs, 1))
2339 && INTVAL (XEXP (rhs, 1)) >= 0
2340 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2342 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2343 GET_MODE_PRECISION (mode));
2344 negcoeff1 = -negcoeff1;
2345 rhs = XEXP (rhs, 0);
2348 if (rtx_equal_p (lhs, rhs))
2350 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2351 rtx coeff;
2352 bool speed = optimize_function_for_speed_p (cfun);
2354 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2356 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2357 return (set_src_cost (tem, mode, speed)
2358 <= set_src_cost (orig, mode, speed) ? tem : 0);
2362 /* (a - (-b)) -> (a + b). True even for IEEE. */
2363 if (GET_CODE (op1) == NEG)
2364 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2366 /* (-x - c) may be simplified as (-c - x). */
2367 if (GET_CODE (op0) == NEG
2368 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2370 tem = simplify_unary_operation (NEG, mode, op1, mode);
2371 if (tem)
2372 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2375 /* Don't let a relocatable value get a negative coeff. */
2376 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2377 return simplify_gen_binary (PLUS, mode,
2378 op0,
2379 neg_const_int (mode, op1));
2381 /* (x - (x & y)) -> (x & ~y) */
2382 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2384 if (rtx_equal_p (op0, XEXP (op1, 0)))
2386 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2387 GET_MODE (XEXP (op1, 1)));
2388 return simplify_gen_binary (AND, mode, op0, tem);
2390 if (rtx_equal_p (op0, XEXP (op1, 1)))
2392 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2393 GET_MODE (XEXP (op1, 0)));
2394 return simplify_gen_binary (AND, mode, op0, tem);
2398 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2399 by reversing the comparison code if valid. */
2400 if (STORE_FLAG_VALUE == 1
2401 && trueop0 == const1_rtx
2402 && COMPARISON_P (op1)
2403 && (reversed = reversed_comparison (op1, mode)))
2404 return reversed;
2406 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2407 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2408 && GET_CODE (op1) == MULT
2409 && GET_CODE (XEXP (op1, 0)) == NEG)
2411 rtx in1, in2;
2413 in1 = XEXP (XEXP (op1, 0), 0);
2414 in2 = XEXP (op1, 1);
2415 return simplify_gen_binary (PLUS, mode,
2416 simplify_gen_binary (MULT, mode,
2417 in1, in2),
2418 op0);
2421 /* Canonicalize (minus (neg A) (mult B C)) to
2422 (minus (mult (neg B) C) A). */
2423 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2424 && GET_CODE (op1) == MULT
2425 && GET_CODE (op0) == NEG)
2427 rtx in1, in2;
2429 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2430 in2 = XEXP (op1, 1);
2431 return simplify_gen_binary (MINUS, mode,
2432 simplify_gen_binary (MULT, mode,
2433 in1, in2),
2434 XEXP (op0, 0));
2437 /* If one of the operands is a PLUS or a MINUS, see if we can
2438 simplify this by the associative law. This will, for example,
2439 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2440 Don't use the associative law for floating point.
2441 The inaccuracy makes it nonassociative,
2442 and subtle programs can break if operations are associated. */
2444 if (INTEGRAL_MODE_P (mode)
2445 && (plus_minus_operand_p (op0)
2446 || plus_minus_operand_p (op1))
2447 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2448 return tem;
2449 break;
2451 case MULT:
2452 if (trueop1 == constm1_rtx)
2453 return simplify_gen_unary (NEG, mode, op0, mode);
2455 if (GET_CODE (op0) == NEG)
2457 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2458 /* If op1 is a MULT as well and simplify_unary_operation
2459 just moved the NEG to the second operand, simplify_gen_binary
2460 below could through simplify_associative_operation move
2461 the NEG around again and recurse endlessly. */
2462 if (temp
2463 && GET_CODE (op1) == MULT
2464 && GET_CODE (temp) == MULT
2465 && XEXP (op1, 0) == XEXP (temp, 0)
2466 && GET_CODE (XEXP (temp, 1)) == NEG
2467 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2468 temp = NULL_RTX;
2469 if (temp)
2470 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2472 if (GET_CODE (op1) == NEG)
2474 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2475 /* If op0 is a MULT as well and simplify_unary_operation
2476 just moved the NEG to the second operand, simplify_gen_binary
2477 below could through simplify_associative_operation move
2478 the NEG around again and recurse endlessly. */
2479 if (temp
2480 && GET_CODE (op0) == MULT
2481 && GET_CODE (temp) == MULT
2482 && XEXP (op0, 0) == XEXP (temp, 0)
2483 && GET_CODE (XEXP (temp, 1)) == NEG
2484 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2485 temp = NULL_RTX;
2486 if (temp)
2487 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2490 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2491 x is NaN, since x * 0 is then also NaN. Nor is it valid
2492 when the mode has signed zeros, since multiplying a negative
2493 number by 0 will give -0, not 0. */
2494 if (!HONOR_NANS (mode)
2495 && !HONOR_SIGNED_ZEROS (mode)
2496 && trueop1 == CONST0_RTX (mode)
2497 && ! side_effects_p (op0))
2498 return op1;
2500 /* In IEEE floating point, x*1 is not equivalent to x for
2501 signalling NaNs. */
2502 if (!HONOR_SNANS (mode)
2503 && trueop1 == CONST1_RTX (mode))
2504 return op0;
2506 /* Convert multiply by constant power of two into shift. */
2507 if (CONST_SCALAR_INT_P (trueop1))
2509 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2510 if (val >= 0)
2511 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2514 /* x*2 is x+x and x*(-1) is -x */
2515 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2516 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2517 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2518 && GET_MODE (op0) == mode)
2520 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2522 if (real_equal (d1, &dconst2))
2523 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2525 if (!HONOR_SNANS (mode)
2526 && real_equal (d1, &dconstm1))
2527 return simplify_gen_unary (NEG, mode, op0, mode);
2530 /* Optimize -x * -x as x * x. */
2531 if (FLOAT_MODE_P (mode)
2532 && GET_CODE (op0) == NEG
2533 && GET_CODE (op1) == NEG
2534 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2535 && !side_effects_p (XEXP (op0, 0)))
2536 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2538 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2539 if (SCALAR_FLOAT_MODE_P (mode)
2540 && GET_CODE (op0) == ABS
2541 && GET_CODE (op1) == ABS
2542 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2543 && !side_effects_p (XEXP (op0, 0)))
2544 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2546 /* Reassociate multiplication, but for floating point MULTs
2547 only when the user specifies unsafe math optimizations. */
2548 if (! FLOAT_MODE_P (mode)
2549 || flag_unsafe_math_optimizations)
2551 tem = simplify_associative_operation (code, mode, op0, op1);
2552 if (tem)
2553 return tem;
2555 break;
2557 case IOR:
2558 if (trueop1 == CONST0_RTX (mode))
2559 return op0;
2560 if (INTEGRAL_MODE_P (mode)
2561 && trueop1 == CONSTM1_RTX (mode)
2562 && !side_effects_p (op0))
2563 return op1;
2564 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2565 return op0;
2566 /* A | (~A) -> -1 */
2567 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2568 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2569 && ! side_effects_p (op0)
2570 && SCALAR_INT_MODE_P (mode))
2571 return constm1_rtx;
2573 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2574 if (CONST_INT_P (op1)
2575 && HWI_COMPUTABLE_MODE_P (mode)
2576 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2577 && !side_effects_p (op0))
2578 return op1;
2580 /* Canonicalize (X & C1) | C2. */
2581 if (GET_CODE (op0) == AND
2582 && CONST_INT_P (trueop1)
2583 && CONST_INT_P (XEXP (op0, 1)))
2585 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2586 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2587 HOST_WIDE_INT c2 = INTVAL (trueop1);
2589 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2590 if ((c1 & c2) == c1
2591 && !side_effects_p (XEXP (op0, 0)))
2592 return trueop1;
2594 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2595 if (((c1|c2) & mask) == mask)
2596 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2598 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2599 if (((c1 & ~c2) & mask) != (c1 & mask))
2601 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2602 gen_int_mode (c1 & ~c2, mode));
2603 return simplify_gen_binary (IOR, mode, tem, op1);
2607 /* Convert (A & B) | A to A. */
2608 if (GET_CODE (op0) == AND
2609 && (rtx_equal_p (XEXP (op0, 0), op1)
2610 || rtx_equal_p (XEXP (op0, 1), op1))
2611 && ! side_effects_p (XEXP (op0, 0))
2612 && ! side_effects_p (XEXP (op0, 1)))
2613 return op1;
2615 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2616 mode size to (rotate A CX). */
2618 if (GET_CODE (op1) == ASHIFT
2619 || GET_CODE (op1) == SUBREG)
2621 opleft = op1;
2622 opright = op0;
2624 else
2626 opright = op1;
2627 opleft = op0;
2630 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2631 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2632 && CONST_INT_P (XEXP (opleft, 1))
2633 && CONST_INT_P (XEXP (opright, 1))
2634 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2635 == GET_MODE_PRECISION (mode)))
2636 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2638 /* Same, but for ashift that has been "simplified" to a wider mode
2639 by simplify_shift_const. */
2641 if (GET_CODE (opleft) == SUBREG
2642 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2643 && GET_CODE (opright) == LSHIFTRT
2644 && GET_CODE (XEXP (opright, 0)) == SUBREG
2645 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2646 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2647 && (GET_MODE_SIZE (GET_MODE (opleft))
2648 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2649 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2650 SUBREG_REG (XEXP (opright, 0)))
2651 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2652 && CONST_INT_P (XEXP (opright, 1))
2653 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2654 == GET_MODE_PRECISION (mode)))
2655 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2656 XEXP (SUBREG_REG (opleft), 1));
2658 /* If we have (ior (and (X C1) C2)), simplify this by making
2659 C1 as small as possible if C1 actually changes. */
2660 if (CONST_INT_P (op1)
2661 && (HWI_COMPUTABLE_MODE_P (mode)
2662 || INTVAL (op1) > 0)
2663 && GET_CODE (op0) == AND
2664 && CONST_INT_P (XEXP (op0, 1))
2665 && CONST_INT_P (op1)
2666 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2668 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2669 gen_int_mode (UINTVAL (XEXP (op0, 1))
2670 & ~UINTVAL (op1),
2671 mode));
2672 return simplify_gen_binary (IOR, mode, tmp, op1);
2675 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2676 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2677 the PLUS does not affect any of the bits in OP1: then we can do
2678 the IOR as a PLUS and we can associate. This is valid if OP1
2679 can be safely shifted left C bits. */
2680 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2681 && GET_CODE (XEXP (op0, 0)) == PLUS
2682 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2683 && CONST_INT_P (XEXP (op0, 1))
2684 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2686 int count = INTVAL (XEXP (op0, 1));
2687 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2689 if (mask >> count == INTVAL (trueop1)
2690 && trunc_int_for_mode (mask, mode) == mask
2691 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2692 return simplify_gen_binary (ASHIFTRT, mode,
2693 plus_constant (mode, XEXP (op0, 0),
2694 mask),
2695 XEXP (op0, 1));
2698 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2699 if (tem)
2700 return tem;
2702 tem = simplify_associative_operation (code, mode, op0, op1);
2703 if (tem)
2704 return tem;
2705 break;
2707 case XOR:
2708 if (trueop1 == CONST0_RTX (mode))
2709 return op0;
2710 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2711 return simplify_gen_unary (NOT, mode, op0, mode);
2712 if (rtx_equal_p (trueop0, trueop1)
2713 && ! side_effects_p (op0)
2714 && GET_MODE_CLASS (mode) != MODE_CC)
2715 return CONST0_RTX (mode);
2717 /* Canonicalize XOR of the most significant bit to PLUS. */
2718 if (CONST_SCALAR_INT_P (op1)
2719 && mode_signbit_p (mode, op1))
2720 return simplify_gen_binary (PLUS, mode, op0, op1);
2721 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2722 if (CONST_SCALAR_INT_P (op1)
2723 && GET_CODE (op0) == PLUS
2724 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2725 && mode_signbit_p (mode, XEXP (op0, 1)))
2726 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2727 simplify_gen_binary (XOR, mode, op1,
2728 XEXP (op0, 1)));
2730 /* If we are XORing two things that have no bits in common,
2731 convert them into an IOR. This helps to detect rotation encoded
2732 using those methods and possibly other simplifications. */
2734 if (HWI_COMPUTABLE_MODE_P (mode)
2735 && (nonzero_bits (op0, mode)
2736 & nonzero_bits (op1, mode)) == 0)
2737 return (simplify_gen_binary (IOR, mode, op0, op1));
2739 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2740 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2741 (NOT y). */
2743 int num_negated = 0;
2745 if (GET_CODE (op0) == NOT)
2746 num_negated++, op0 = XEXP (op0, 0);
2747 if (GET_CODE (op1) == NOT)
2748 num_negated++, op1 = XEXP (op1, 0);
2750 if (num_negated == 2)
2751 return simplify_gen_binary (XOR, mode, op0, op1);
2752 else if (num_negated == 1)
2753 return simplify_gen_unary (NOT, mode,
2754 simplify_gen_binary (XOR, mode, op0, op1),
2755 mode);
2758 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2759 correspond to a machine insn or result in further simplifications
2760 if B is a constant. */
2762 if (GET_CODE (op0) == AND
2763 && rtx_equal_p (XEXP (op0, 1), op1)
2764 && ! side_effects_p (op1))
2765 return simplify_gen_binary (AND, mode,
2766 simplify_gen_unary (NOT, mode,
2767 XEXP (op0, 0), mode),
2768 op1);
2770 else if (GET_CODE (op0) == AND
2771 && rtx_equal_p (XEXP (op0, 0), op1)
2772 && ! side_effects_p (op1))
2773 return simplify_gen_binary (AND, mode,
2774 simplify_gen_unary (NOT, mode,
2775 XEXP (op0, 1), mode),
2776 op1);
2778 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2779 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2780 out bits inverted twice and not set by C. Similarly, given
2781 (xor (and (xor A B) C) D), simplify without inverting C in
2782 the xor operand: (xor (and A C) (B&C)^D).
2784 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2785 && GET_CODE (XEXP (op0, 0)) == XOR
2786 && CONST_INT_P (op1)
2787 && CONST_INT_P (XEXP (op0, 1))
2788 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2790 enum rtx_code op = GET_CODE (op0);
2791 rtx a = XEXP (XEXP (op0, 0), 0);
2792 rtx b = XEXP (XEXP (op0, 0), 1);
2793 rtx c = XEXP (op0, 1);
2794 rtx d = op1;
2795 HOST_WIDE_INT bval = INTVAL (b);
2796 HOST_WIDE_INT cval = INTVAL (c);
2797 HOST_WIDE_INT dval = INTVAL (d);
2798 HOST_WIDE_INT xcval;
2800 if (op == IOR)
2801 xcval = ~cval;
2802 else
2803 xcval = cval;
2805 return simplify_gen_binary (XOR, mode,
2806 simplify_gen_binary (op, mode, a, c),
2807 gen_int_mode ((bval & xcval) ^ dval,
2808 mode));
2811 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2812 we can transform like this:
2813 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2814 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2815 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2816 Attempt a few simplifications when B and C are both constants. */
2817 if (GET_CODE (op0) == AND
2818 && CONST_INT_P (op1)
2819 && CONST_INT_P (XEXP (op0, 1)))
2821 rtx a = XEXP (op0, 0);
2822 rtx b = XEXP (op0, 1);
2823 rtx c = op1;
2824 HOST_WIDE_INT bval = INTVAL (b);
2825 HOST_WIDE_INT cval = INTVAL (c);
2827 /* Instead of computing ~A&C, we compute its negated value,
2828 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2829 optimize for sure. If it does not simplify, we still try
2830 to compute ~A&C below, but since that always allocates
2831 RTL, we don't try that before committing to returning a
2832 simplified expression. */
2833 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2834 GEN_INT (~cval));
2836 if ((~cval & bval) == 0)
2838 rtx na_c = NULL_RTX;
2839 if (n_na_c)
2840 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2841 else
2843 /* If ~A does not simplify, don't bother: we don't
2844 want to simplify 2 operations into 3, and if na_c
2845 were to simplify with na, n_na_c would have
2846 simplified as well. */
2847 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2848 if (na)
2849 na_c = simplify_gen_binary (AND, mode, na, c);
2852 /* Try to simplify ~A&C | ~B&C. */
2853 if (na_c != NULL_RTX)
2854 return simplify_gen_binary (IOR, mode, na_c,
2855 gen_int_mode (~bval & cval, mode));
2857 else
2859 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2860 if (n_na_c == CONSTM1_RTX (mode))
2862 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2863 gen_int_mode (~cval & bval,
2864 mode));
2865 return simplify_gen_binary (IOR, mode, a_nc_b,
2866 gen_int_mode (~bval & cval,
2867 mode));
2872 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2873 comparison if STORE_FLAG_VALUE is 1. */
2874 if (STORE_FLAG_VALUE == 1
2875 && trueop1 == const1_rtx
2876 && COMPARISON_P (op0)
2877 && (reversed = reversed_comparison (op0, mode)))
2878 return reversed;
2880 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2881 is (lt foo (const_int 0)), so we can perform the above
2882 simplification if STORE_FLAG_VALUE is 1. */
2884 if (STORE_FLAG_VALUE == 1
2885 && trueop1 == const1_rtx
2886 && GET_CODE (op0) == LSHIFTRT
2887 && CONST_INT_P (XEXP (op0, 1))
2888 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2889 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2891 /* (xor (comparison foo bar) (const_int sign-bit))
2892 when STORE_FLAG_VALUE is the sign bit. */
2893 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2894 && trueop1 == const_true_rtx
2895 && COMPARISON_P (op0)
2896 && (reversed = reversed_comparison (op0, mode)))
2897 return reversed;
2899 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2900 if (tem)
2901 return tem;
2903 tem = simplify_associative_operation (code, mode, op0, op1);
2904 if (tem)
2905 return tem;
2906 break;
2908 case AND:
2909 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2910 return trueop1;
2911 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2912 return op0;
2913 if (HWI_COMPUTABLE_MODE_P (mode))
2915 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2916 HOST_WIDE_INT nzop1;
2917 if (CONST_INT_P (trueop1))
2919 HOST_WIDE_INT val1 = INTVAL (trueop1);
2920 /* If we are turning off bits already known off in OP0, we need
2921 not do an AND. */
2922 if ((nzop0 & ~val1) == 0)
2923 return op0;
2925 nzop1 = nonzero_bits (trueop1, mode);
2926 /* If we are clearing all the nonzero bits, the result is zero. */
2927 if ((nzop1 & nzop0) == 0
2928 && !side_effects_p (op0) && !side_effects_p (op1))
2929 return CONST0_RTX (mode);
2931 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2932 && GET_MODE_CLASS (mode) != MODE_CC)
2933 return op0;
2934 /* A & (~A) -> 0 */
2935 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2936 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2937 && ! side_effects_p (op0)
2938 && GET_MODE_CLASS (mode) != MODE_CC)
2939 return CONST0_RTX (mode);
2941 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2942 there are no nonzero bits of C outside of X's mode. */
2943 if ((GET_CODE (op0) == SIGN_EXTEND
2944 || GET_CODE (op0) == ZERO_EXTEND)
2945 && CONST_INT_P (trueop1)
2946 && HWI_COMPUTABLE_MODE_P (mode)
2947 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2948 & UINTVAL (trueop1)) == 0)
2950 machine_mode imode = GET_MODE (XEXP (op0, 0));
2951 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2952 gen_int_mode (INTVAL (trueop1),
2953 imode));
2954 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2957 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2958 we might be able to further simplify the AND with X and potentially
2959 remove the truncation altogether. */
2960 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2962 rtx x = XEXP (op0, 0);
2963 machine_mode xmode = GET_MODE (x);
2964 tem = simplify_gen_binary (AND, xmode, x,
2965 gen_int_mode (INTVAL (trueop1), xmode));
2966 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2969 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2970 if (GET_CODE (op0) == IOR
2971 && CONST_INT_P (trueop1)
2972 && CONST_INT_P (XEXP (op0, 1)))
2974 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2975 return simplify_gen_binary (IOR, mode,
2976 simplify_gen_binary (AND, mode,
2977 XEXP (op0, 0), op1),
2978 gen_int_mode (tmp, mode));
2981 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2982 insn (and may simplify more). */
2983 if (GET_CODE (op0) == XOR
2984 && rtx_equal_p (XEXP (op0, 0), op1)
2985 && ! side_effects_p (op1))
2986 return simplify_gen_binary (AND, mode,
2987 simplify_gen_unary (NOT, mode,
2988 XEXP (op0, 1), mode),
2989 op1);
2991 if (GET_CODE (op0) == XOR
2992 && rtx_equal_p (XEXP (op0, 1), op1)
2993 && ! side_effects_p (op1))
2994 return simplify_gen_binary (AND, mode,
2995 simplify_gen_unary (NOT, mode,
2996 XEXP (op0, 0), mode),
2997 op1);
2999 /* Similarly for (~(A ^ B)) & A. */
3000 if (GET_CODE (op0) == NOT
3001 && GET_CODE (XEXP (op0, 0)) == XOR
3002 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3003 && ! side_effects_p (op1))
3004 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3006 if (GET_CODE (op0) == NOT
3007 && GET_CODE (XEXP (op0, 0)) == XOR
3008 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3009 && ! side_effects_p (op1))
3010 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3012 /* Convert (A | B) & A to A. */
3013 if (GET_CODE (op0) == IOR
3014 && (rtx_equal_p (XEXP (op0, 0), op1)
3015 || rtx_equal_p (XEXP (op0, 1), op1))
3016 && ! side_effects_p (XEXP (op0, 0))
3017 && ! side_effects_p (XEXP (op0, 1)))
3018 return op1;
3020 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3021 ((A & N) + B) & M -> (A + B) & M
3022 Similarly if (N & M) == 0,
3023 ((A | N) + B) & M -> (A + B) & M
3024 and for - instead of + and/or ^ instead of |.
3025 Also, if (N & M) == 0, then
3026 (A +- N) & M -> A & M. */
3027 if (CONST_INT_P (trueop1)
3028 && HWI_COMPUTABLE_MODE_P (mode)
3029 && ~UINTVAL (trueop1)
3030 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3031 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3033 rtx pmop[2];
3034 int which;
3036 pmop[0] = XEXP (op0, 0);
3037 pmop[1] = XEXP (op0, 1);
3039 if (CONST_INT_P (pmop[1])
3040 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3041 return simplify_gen_binary (AND, mode, pmop[0], op1);
3043 for (which = 0; which < 2; which++)
3045 tem = pmop[which];
3046 switch (GET_CODE (tem))
3048 case AND:
3049 if (CONST_INT_P (XEXP (tem, 1))
3050 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3051 == UINTVAL (trueop1))
3052 pmop[which] = XEXP (tem, 0);
3053 break;
3054 case IOR:
3055 case XOR:
3056 if (CONST_INT_P (XEXP (tem, 1))
3057 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3058 pmop[which] = XEXP (tem, 0);
3059 break;
3060 default:
3061 break;
3065 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3067 tem = simplify_gen_binary (GET_CODE (op0), mode,
3068 pmop[0], pmop[1]);
3069 return simplify_gen_binary (code, mode, tem, op1);
3073 /* (and X (ior (not X) Y) -> (and X Y) */
3074 if (GET_CODE (op1) == IOR
3075 && GET_CODE (XEXP (op1, 0)) == NOT
3076 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3077 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3079 /* (and (ior (not X) Y) X) -> (and X Y) */
3080 if (GET_CODE (op0) == IOR
3081 && GET_CODE (XEXP (op0, 0)) == NOT
3082 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3083 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3085 /* (and X (ior Y (not X)) -> (and X Y) */
3086 if (GET_CODE (op1) == IOR
3087 && GET_CODE (XEXP (op1, 1)) == NOT
3088 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3089 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3091 /* (and (ior Y (not X)) X) -> (and X Y) */
3092 if (GET_CODE (op0) == IOR
3093 && GET_CODE (XEXP (op0, 1)) == NOT
3094 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3095 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3097 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3098 if (tem)
3099 return tem;
3101 tem = simplify_associative_operation (code, mode, op0, op1);
3102 if (tem)
3103 return tem;
3104 break;
3106 case UDIV:
3107 /* 0/x is 0 (or x&0 if x has side-effects). */
3108 if (trueop0 == CONST0_RTX (mode))
3110 if (side_effects_p (op1))
3111 return simplify_gen_binary (AND, mode, op1, trueop0);
3112 return trueop0;
3114 /* x/1 is x. */
3115 if (trueop1 == CONST1_RTX (mode))
3117 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3118 if (tem)
3119 return tem;
3121 /* Convert divide by power of two into shift. */
3122 if (CONST_INT_P (trueop1)
3123 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3124 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3125 break;
3127 case DIV:
3128 /* Handle floating point and integers separately. */
3129 if (SCALAR_FLOAT_MODE_P (mode))
3131 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3132 safe for modes with NaNs, since 0.0 / 0.0 will then be
3133 NaN rather than 0.0. Nor is it safe for modes with signed
3134 zeros, since dividing 0 by a negative number gives -0.0 */
3135 if (trueop0 == CONST0_RTX (mode)
3136 && !HONOR_NANS (mode)
3137 && !HONOR_SIGNED_ZEROS (mode)
3138 && ! side_effects_p (op1))
3139 return op0;
3140 /* x/1.0 is x. */
3141 if (trueop1 == CONST1_RTX (mode)
3142 && !HONOR_SNANS (mode))
3143 return op0;
3145 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3146 && trueop1 != CONST0_RTX (mode))
3148 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3150 /* x/-1.0 is -x. */
3151 if (real_equal (d1, &dconstm1)
3152 && !HONOR_SNANS (mode))
3153 return simplify_gen_unary (NEG, mode, op0, mode);
3155 /* Change FP division by a constant into multiplication.
3156 Only do this with -freciprocal-math. */
3157 if (flag_reciprocal_math
3158 && !real_equal (d1, &dconst0))
3160 REAL_VALUE_TYPE d;
3161 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3162 tem = const_double_from_real_value (d, mode);
3163 return simplify_gen_binary (MULT, mode, op0, tem);
3167 else if (SCALAR_INT_MODE_P (mode))
3169 /* 0/x is 0 (or x&0 if x has side-effects). */
3170 if (trueop0 == CONST0_RTX (mode)
3171 && !cfun->can_throw_non_call_exceptions)
3173 if (side_effects_p (op1))
3174 return simplify_gen_binary (AND, mode, op1, trueop0);
3175 return trueop0;
3177 /* x/1 is x. */
3178 if (trueop1 == CONST1_RTX (mode))
3180 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3181 if (tem)
3182 return tem;
3184 /* x/-1 is -x. */
3185 if (trueop1 == constm1_rtx)
3187 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3188 if (x)
3189 return simplify_gen_unary (NEG, mode, x, mode);
3192 break;
3194 case UMOD:
3195 /* 0%x is 0 (or x&0 if x has side-effects). */
3196 if (trueop0 == CONST0_RTX (mode))
3198 if (side_effects_p (op1))
3199 return simplify_gen_binary (AND, mode, op1, trueop0);
3200 return trueop0;
3202 /* x%1 is 0 (of x&0 if x has side-effects). */
3203 if (trueop1 == CONST1_RTX (mode))
3205 if (side_effects_p (op0))
3206 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3207 return CONST0_RTX (mode);
3209 /* Implement modulus by power of two as AND. */
3210 if (CONST_INT_P (trueop1)
3211 && exact_log2 (UINTVAL (trueop1)) > 0)
3212 return simplify_gen_binary (AND, mode, op0,
3213 gen_int_mode (INTVAL (op1) - 1, mode));
3214 break;
3216 case MOD:
3217 /* 0%x is 0 (or x&0 if x has side-effects). */
3218 if (trueop0 == CONST0_RTX (mode))
3220 if (side_effects_p (op1))
3221 return simplify_gen_binary (AND, mode, op1, trueop0);
3222 return trueop0;
3224 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3225 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3227 if (side_effects_p (op0))
3228 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3229 return CONST0_RTX (mode);
3231 break;
3233 case ROTATERT:
3234 case ROTATE:
3235 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3236 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3237 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3238 amount instead. */
3239 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3240 if (CONST_INT_P (trueop1)
3241 && IN_RANGE (INTVAL (trueop1),
3242 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3243 GET_MODE_PRECISION (mode) - 1))
3244 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3245 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3246 - INTVAL (trueop1)));
3247 #endif
3248 /* FALLTHRU */
3249 case ASHIFTRT:
3250 if (trueop1 == CONST0_RTX (mode))
3251 return op0;
3252 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3253 return op0;
3254 /* Rotating ~0 always results in ~0. */
3255 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3256 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3257 && ! side_effects_p (op1))
3258 return op0;
3259 /* Given:
3260 scalar modes M1, M2
3261 scalar constants c1, c2
3262 size (M2) > size (M1)
3263 c1 == size (M2) - size (M1)
3264 optimize:
3265 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3266 <low_part>)
3267 (const_int <c2>))
3269 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3270 <low_part>). */
3271 if (code == ASHIFTRT
3272 && !VECTOR_MODE_P (mode)
3273 && SUBREG_P (op0)
3274 && CONST_INT_P (op1)
3275 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3276 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3277 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3278 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3279 > GET_MODE_BITSIZE (mode))
3280 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3281 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3282 - GET_MODE_BITSIZE (mode)))
3283 && subreg_lowpart_p (op0))
3285 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3286 + INTVAL (op1));
3287 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3288 tmp = simplify_gen_binary (ASHIFTRT,
3289 GET_MODE (SUBREG_REG (op0)),
3290 XEXP (SUBREG_REG (op0), 0),
3291 tmp);
3292 return lowpart_subreg (mode, tmp, inner_mode);
3294 canonicalize_shift:
3295 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3297 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3298 if (val != INTVAL (op1))
3299 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3301 break;
3303 case ASHIFT:
3304 case SS_ASHIFT:
3305 case US_ASHIFT:
3306 if (trueop1 == CONST0_RTX (mode))
3307 return op0;
3308 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3309 return op0;
3310 goto canonicalize_shift;
3312 case LSHIFTRT:
3313 if (trueop1 == CONST0_RTX (mode))
3314 return op0;
3315 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3316 return op0;
3317 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3318 if (GET_CODE (op0) == CLZ
3319 && CONST_INT_P (trueop1)
3320 && STORE_FLAG_VALUE == 1
3321 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3323 machine_mode imode = GET_MODE (XEXP (op0, 0));
3324 unsigned HOST_WIDE_INT zero_val = 0;
3326 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3327 && zero_val == GET_MODE_PRECISION (imode)
3328 && INTVAL (trueop1) == exact_log2 (zero_val))
3329 return simplify_gen_relational (EQ, mode, imode,
3330 XEXP (op0, 0), const0_rtx);
3332 goto canonicalize_shift;
3334 case SMIN:
3335 if (width <= HOST_BITS_PER_WIDE_INT
3336 && mode_signbit_p (mode, trueop1)
3337 && ! side_effects_p (op0))
3338 return op1;
3339 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3340 return op0;
3341 tem = simplify_associative_operation (code, mode, op0, op1);
3342 if (tem)
3343 return tem;
3344 break;
3346 case SMAX:
3347 if (width <= HOST_BITS_PER_WIDE_INT
3348 && CONST_INT_P (trueop1)
3349 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3350 && ! side_effects_p (op0))
3351 return op1;
3352 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3353 return op0;
3354 tem = simplify_associative_operation (code, mode, op0, op1);
3355 if (tem)
3356 return tem;
3357 break;
3359 case UMIN:
3360 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3361 return op1;
3362 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3363 return op0;
3364 tem = simplify_associative_operation (code, mode, op0, op1);
3365 if (tem)
3366 return tem;
3367 break;
3369 case UMAX:
3370 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3371 return op1;
3372 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3373 return op0;
3374 tem = simplify_associative_operation (code, mode, op0, op1);
3375 if (tem)
3376 return tem;
3377 break;
3379 case SS_PLUS:
3380 case US_PLUS:
3381 case SS_MINUS:
3382 case US_MINUS:
3383 case SS_MULT:
3384 case US_MULT:
3385 case SS_DIV:
3386 case US_DIV:
3387 /* ??? There are simplifications that can be done. */
3388 return 0;
3390 case VEC_SELECT:
3391 if (!VECTOR_MODE_P (mode))
3393 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3394 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3395 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3396 gcc_assert (XVECLEN (trueop1, 0) == 1);
3397 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3399 if (GET_CODE (trueop0) == CONST_VECTOR)
3400 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3401 (trueop1, 0, 0)));
3403 /* Extract a scalar element from a nested VEC_SELECT expression
3404 (with optional nested VEC_CONCAT expression). Some targets
3405 (i386) extract scalar element from a vector using chain of
3406 nested VEC_SELECT expressions. When input operand is a memory
3407 operand, this operation can be simplified to a simple scalar
3408 load from an offseted memory address. */
3409 if (GET_CODE (trueop0) == VEC_SELECT)
3411 rtx op0 = XEXP (trueop0, 0);
3412 rtx op1 = XEXP (trueop0, 1);
3414 machine_mode opmode = GET_MODE (op0);
3415 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3416 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3418 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3419 int elem;
3421 rtvec vec;
3422 rtx tmp_op, tmp;
3424 gcc_assert (GET_CODE (op1) == PARALLEL);
3425 gcc_assert (i < n_elts);
3427 /* Select element, pointed by nested selector. */
3428 elem = INTVAL (XVECEXP (op1, 0, i));
3430 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3431 if (GET_CODE (op0) == VEC_CONCAT)
3433 rtx op00 = XEXP (op0, 0);
3434 rtx op01 = XEXP (op0, 1);
3436 machine_mode mode00, mode01;
3437 int n_elts00, n_elts01;
3439 mode00 = GET_MODE (op00);
3440 mode01 = GET_MODE (op01);
3442 /* Find out number of elements of each operand. */
3443 if (VECTOR_MODE_P (mode00))
3445 elt_size = GET_MODE_UNIT_SIZE (mode00);
3446 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3448 else
3449 n_elts00 = 1;
3451 if (VECTOR_MODE_P (mode01))
3453 elt_size = GET_MODE_UNIT_SIZE (mode01);
3454 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3456 else
3457 n_elts01 = 1;
3459 gcc_assert (n_elts == n_elts00 + n_elts01);
3461 /* Select correct operand of VEC_CONCAT
3462 and adjust selector. */
3463 if (elem < n_elts01)
3464 tmp_op = op00;
3465 else
3467 tmp_op = op01;
3468 elem -= n_elts00;
3471 else
3472 tmp_op = op0;
3474 vec = rtvec_alloc (1);
3475 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3477 tmp = gen_rtx_fmt_ee (code, mode,
3478 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3479 return tmp;
3481 if (GET_CODE (trueop0) == VEC_DUPLICATE
3482 && GET_MODE (XEXP (trueop0, 0)) == mode)
3483 return XEXP (trueop0, 0);
3485 else
3487 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3488 gcc_assert (GET_MODE_INNER (mode)
3489 == GET_MODE_INNER (GET_MODE (trueop0)));
3490 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3492 if (GET_CODE (trueop0) == CONST_VECTOR)
3494 int elt_size = GET_MODE_UNIT_SIZE (mode);
3495 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3496 rtvec v = rtvec_alloc (n_elts);
3497 unsigned int i;
3499 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3500 for (i = 0; i < n_elts; i++)
3502 rtx x = XVECEXP (trueop1, 0, i);
3504 gcc_assert (CONST_INT_P (x));
3505 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3506 INTVAL (x));
3509 return gen_rtx_CONST_VECTOR (mode, v);
3512 /* Recognize the identity. */
3513 if (GET_MODE (trueop0) == mode)
3515 bool maybe_ident = true;
3516 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3518 rtx j = XVECEXP (trueop1, 0, i);
3519 if (!CONST_INT_P (j) || INTVAL (j) != i)
3521 maybe_ident = false;
3522 break;
3525 if (maybe_ident)
3526 return trueop0;
3529 /* If we build {a,b} then permute it, build the result directly. */
3530 if (XVECLEN (trueop1, 0) == 2
3531 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3532 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3533 && GET_CODE (trueop0) == VEC_CONCAT
3534 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3535 && GET_MODE (XEXP (trueop0, 0)) == mode
3536 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3537 && GET_MODE (XEXP (trueop0, 1)) == mode)
3539 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3540 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3541 rtx subop0, subop1;
3543 gcc_assert (i0 < 4 && i1 < 4);
3544 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3545 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3547 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3550 if (XVECLEN (trueop1, 0) == 2
3551 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3552 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3553 && GET_CODE (trueop0) == VEC_CONCAT
3554 && GET_MODE (trueop0) == mode)
3556 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3557 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3558 rtx subop0, subop1;
3560 gcc_assert (i0 < 2 && i1 < 2);
3561 subop0 = XEXP (trueop0, i0);
3562 subop1 = XEXP (trueop0, i1);
3564 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3567 /* If we select one half of a vec_concat, return that. */
3568 if (GET_CODE (trueop0) == VEC_CONCAT
3569 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3571 rtx subop0 = XEXP (trueop0, 0);
3572 rtx subop1 = XEXP (trueop0, 1);
3573 machine_mode mode0 = GET_MODE (subop0);
3574 machine_mode mode1 = GET_MODE (subop1);
3575 int li = GET_MODE_UNIT_SIZE (mode0);
3576 int l0 = GET_MODE_SIZE (mode0) / li;
3577 int l1 = GET_MODE_SIZE (mode1) / li;
3578 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3579 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3581 bool success = true;
3582 for (int i = 1; i < l0; ++i)
3584 rtx j = XVECEXP (trueop1, 0, i);
3585 if (!CONST_INT_P (j) || INTVAL (j) != i)
3587 success = false;
3588 break;
3591 if (success)
3592 return subop0;
3594 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3596 bool success = true;
3597 for (int i = 1; i < l1; ++i)
3599 rtx j = XVECEXP (trueop1, 0, i);
3600 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3602 success = false;
3603 break;
3606 if (success)
3607 return subop1;
3612 if (XVECLEN (trueop1, 0) == 1
3613 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3614 && GET_CODE (trueop0) == VEC_CONCAT)
3616 rtx vec = trueop0;
3617 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3619 /* Try to find the element in the VEC_CONCAT. */
3620 while (GET_MODE (vec) != mode
3621 && GET_CODE (vec) == VEC_CONCAT)
3623 HOST_WIDE_INT vec_size;
3625 if (CONST_INT_P (XEXP (vec, 0)))
3627 /* vec_concat of two const_ints doesn't make sense with
3628 respect to modes. */
3629 if (CONST_INT_P (XEXP (vec, 1)))
3630 return 0;
3632 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3633 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3635 else
3636 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3638 if (offset < vec_size)
3639 vec = XEXP (vec, 0);
3640 else
3642 offset -= vec_size;
3643 vec = XEXP (vec, 1);
3645 vec = avoid_constant_pool_reference (vec);
3648 if (GET_MODE (vec) == mode)
3649 return vec;
3652 /* If we select elements in a vec_merge that all come from the same
3653 operand, select from that operand directly. */
3654 if (GET_CODE (op0) == VEC_MERGE)
3656 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3657 if (CONST_INT_P (trueop02))
3659 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3660 bool all_operand0 = true;
3661 bool all_operand1 = true;
3662 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3664 rtx j = XVECEXP (trueop1, 0, i);
3665 if (sel & (1 << UINTVAL (j)))
3666 all_operand1 = false;
3667 else
3668 all_operand0 = false;
3670 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3671 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3672 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3673 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3677 /* If we have two nested selects that are inverses of each
3678 other, replace them with the source operand. */
3679 if (GET_CODE (trueop0) == VEC_SELECT
3680 && GET_MODE (XEXP (trueop0, 0)) == mode)
3682 rtx op0_subop1 = XEXP (trueop0, 1);
3683 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3684 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3686 /* Apply the outer ordering vector to the inner one. (The inner
3687 ordering vector is expressly permitted to be of a different
3688 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3689 then the two VEC_SELECTs cancel. */
3690 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3692 rtx x = XVECEXP (trueop1, 0, i);
3693 if (!CONST_INT_P (x))
3694 return 0;
3695 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3696 if (!CONST_INT_P (y) || i != INTVAL (y))
3697 return 0;
3699 return XEXP (trueop0, 0);
3702 return 0;
3703 case VEC_CONCAT:
3705 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3706 ? GET_MODE (trueop0)
3707 : GET_MODE_INNER (mode));
3708 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3709 ? GET_MODE (trueop1)
3710 : GET_MODE_INNER (mode));
3712 gcc_assert (VECTOR_MODE_P (mode));
3713 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3714 == GET_MODE_SIZE (mode));
3716 if (VECTOR_MODE_P (op0_mode))
3717 gcc_assert (GET_MODE_INNER (mode)
3718 == GET_MODE_INNER (op0_mode));
3719 else
3720 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3722 if (VECTOR_MODE_P (op1_mode))
3723 gcc_assert (GET_MODE_INNER (mode)
3724 == GET_MODE_INNER (op1_mode));
3725 else
3726 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3728 if ((GET_CODE (trueop0) == CONST_VECTOR
3729 || CONST_SCALAR_INT_P (trueop0)
3730 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3731 && (GET_CODE (trueop1) == CONST_VECTOR
3732 || CONST_SCALAR_INT_P (trueop1)
3733 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3735 int elt_size = GET_MODE_UNIT_SIZE (mode);
3736 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3737 rtvec v = rtvec_alloc (n_elts);
3738 unsigned int i;
3739 unsigned in_n_elts = 1;
3741 if (VECTOR_MODE_P (op0_mode))
3742 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3743 for (i = 0; i < n_elts; i++)
3745 if (i < in_n_elts)
3747 if (!VECTOR_MODE_P (op0_mode))
3748 RTVEC_ELT (v, i) = trueop0;
3749 else
3750 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3752 else
3754 if (!VECTOR_MODE_P (op1_mode))
3755 RTVEC_ELT (v, i) = trueop1;
3756 else
3757 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3758 i - in_n_elts);
3762 return gen_rtx_CONST_VECTOR (mode, v);
3765 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3766 Restrict the transformation to avoid generating a VEC_SELECT with a
3767 mode unrelated to its operand. */
3768 if (GET_CODE (trueop0) == VEC_SELECT
3769 && GET_CODE (trueop1) == VEC_SELECT
3770 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3771 && GET_MODE (XEXP (trueop0, 0)) == mode)
3773 rtx par0 = XEXP (trueop0, 1);
3774 rtx par1 = XEXP (trueop1, 1);
3775 int len0 = XVECLEN (par0, 0);
3776 int len1 = XVECLEN (par1, 0);
3777 rtvec vec = rtvec_alloc (len0 + len1);
3778 for (int i = 0; i < len0; i++)
3779 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3780 for (int i = 0; i < len1; i++)
3781 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3782 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3783 gen_rtx_PARALLEL (VOIDmode, vec));
3786 return 0;
3788 default:
3789 gcc_unreachable ();
3792 return 0;
3796 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3797 rtx op0, rtx op1)
3799 unsigned int width = GET_MODE_PRECISION (mode);
3801 if (VECTOR_MODE_P (mode)
3802 && code != VEC_CONCAT
3803 && GET_CODE (op0) == CONST_VECTOR
3804 && GET_CODE (op1) == CONST_VECTOR)
3806 unsigned n_elts = GET_MODE_NUNITS (mode);
3807 machine_mode op0mode = GET_MODE (op0);
3808 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3809 machine_mode op1mode = GET_MODE (op1);
3810 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3811 rtvec v = rtvec_alloc (n_elts);
3812 unsigned int i;
3814 gcc_assert (op0_n_elts == n_elts);
3815 gcc_assert (op1_n_elts == n_elts);
3816 for (i = 0; i < n_elts; i++)
3818 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3819 CONST_VECTOR_ELT (op0, i),
3820 CONST_VECTOR_ELT (op1, i));
3821 if (!x)
3822 return 0;
3823 RTVEC_ELT (v, i) = x;
3826 return gen_rtx_CONST_VECTOR (mode, v);
3829 if (VECTOR_MODE_P (mode)
3830 && code == VEC_CONCAT
3831 && (CONST_SCALAR_INT_P (op0)
3832 || GET_CODE (op0) == CONST_FIXED
3833 || CONST_DOUBLE_AS_FLOAT_P (op0))
3834 && (CONST_SCALAR_INT_P (op1)
3835 || CONST_DOUBLE_AS_FLOAT_P (op1)
3836 || GET_CODE (op1) == CONST_FIXED))
3838 unsigned n_elts = GET_MODE_NUNITS (mode);
3839 rtvec v = rtvec_alloc (n_elts);
3841 gcc_assert (n_elts >= 2);
3842 if (n_elts == 2)
3844 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3845 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3847 RTVEC_ELT (v, 0) = op0;
3848 RTVEC_ELT (v, 1) = op1;
3850 else
3852 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3853 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3854 unsigned i;
3856 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3857 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3858 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3860 for (i = 0; i < op0_n_elts; ++i)
3861 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3862 for (i = 0; i < op1_n_elts; ++i)
3863 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3866 return gen_rtx_CONST_VECTOR (mode, v);
3869 if (SCALAR_FLOAT_MODE_P (mode)
3870 && CONST_DOUBLE_AS_FLOAT_P (op0)
3871 && CONST_DOUBLE_AS_FLOAT_P (op1)
3872 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3874 if (code == AND
3875 || code == IOR
3876 || code == XOR)
3878 long tmp0[4];
3879 long tmp1[4];
3880 REAL_VALUE_TYPE r;
3881 int i;
3883 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3884 GET_MODE (op0));
3885 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3886 GET_MODE (op1));
3887 for (i = 0; i < 4; i++)
3889 switch (code)
3891 case AND:
3892 tmp0[i] &= tmp1[i];
3893 break;
3894 case IOR:
3895 tmp0[i] |= tmp1[i];
3896 break;
3897 case XOR:
3898 tmp0[i] ^= tmp1[i];
3899 break;
3900 default:
3901 gcc_unreachable ();
3904 real_from_target (&r, tmp0, mode);
3905 return const_double_from_real_value (r, mode);
3907 else
3909 REAL_VALUE_TYPE f0, f1, value, result;
3910 const REAL_VALUE_TYPE *opr0, *opr1;
3911 bool inexact;
3913 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
3914 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
3916 if (HONOR_SNANS (mode)
3917 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
3918 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
3919 return 0;
3921 real_convert (&f0, mode, opr0);
3922 real_convert (&f1, mode, opr1);
3924 if (code == DIV
3925 && real_equal (&f1, &dconst0)
3926 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3927 return 0;
3929 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3930 && flag_trapping_math
3931 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3933 int s0 = REAL_VALUE_NEGATIVE (f0);
3934 int s1 = REAL_VALUE_NEGATIVE (f1);
3936 switch (code)
3938 case PLUS:
3939 /* Inf + -Inf = NaN plus exception. */
3940 if (s0 != s1)
3941 return 0;
3942 break;
3943 case MINUS:
3944 /* Inf - Inf = NaN plus exception. */
3945 if (s0 == s1)
3946 return 0;
3947 break;
3948 case DIV:
3949 /* Inf / Inf = NaN plus exception. */
3950 return 0;
3951 default:
3952 break;
3956 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3957 && flag_trapping_math
3958 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
3959 || (REAL_VALUE_ISINF (f1)
3960 && real_equal (&f0, &dconst0))))
3961 /* Inf * 0 = NaN plus exception. */
3962 return 0;
3964 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3965 &f0, &f1);
3966 real_convert (&result, mode, &value);
3968 /* Don't constant fold this floating point operation if
3969 the result has overflowed and flag_trapping_math. */
3971 if (flag_trapping_math
3972 && MODE_HAS_INFINITIES (mode)
3973 && REAL_VALUE_ISINF (result)
3974 && !REAL_VALUE_ISINF (f0)
3975 && !REAL_VALUE_ISINF (f1))
3976 /* Overflow plus exception. */
3977 return 0;
3979 /* Don't constant fold this floating point operation if the
3980 result may dependent upon the run-time rounding mode and
3981 flag_rounding_math is set, or if GCC's software emulation
3982 is unable to accurately represent the result. */
3984 if ((flag_rounding_math
3985 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3986 && (inexact || !real_identical (&result, &value)))
3987 return NULL_RTX;
3989 return const_double_from_real_value (result, mode);
3993 /* We can fold some multi-word operations. */
3994 if ((GET_MODE_CLASS (mode) == MODE_INT
3995 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3996 && CONST_SCALAR_INT_P (op0)
3997 && CONST_SCALAR_INT_P (op1))
3999 wide_int result;
4000 bool overflow;
4001 rtx_mode_t pop0 = std::make_pair (op0, mode);
4002 rtx_mode_t pop1 = std::make_pair (op1, mode);
4004 #if TARGET_SUPPORTS_WIDE_INT == 0
4005 /* This assert keeps the simplification from producing a result
4006 that cannot be represented in a CONST_DOUBLE but a lot of
4007 upstream callers expect that this function never fails to
4008 simplify something and so you if you added this to the test
4009 above the code would die later anyway. If this assert
4010 happens, you just need to make the port support wide int. */
4011 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
4012 #endif
4013 switch (code)
4015 case MINUS:
4016 result = wi::sub (pop0, pop1);
4017 break;
4019 case PLUS:
4020 result = wi::add (pop0, pop1);
4021 break;
4023 case MULT:
4024 result = wi::mul (pop0, pop1);
4025 break;
4027 case DIV:
4028 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4029 if (overflow)
4030 return NULL_RTX;
4031 break;
4033 case MOD:
4034 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4035 if (overflow)
4036 return NULL_RTX;
4037 break;
4039 case UDIV:
4040 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4041 if (overflow)
4042 return NULL_RTX;
4043 break;
4045 case UMOD:
4046 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4047 if (overflow)
4048 return NULL_RTX;
4049 break;
4051 case AND:
4052 result = wi::bit_and (pop0, pop1);
4053 break;
4055 case IOR:
4056 result = wi::bit_or (pop0, pop1);
4057 break;
4059 case XOR:
4060 result = wi::bit_xor (pop0, pop1);
4061 break;
4063 case SMIN:
4064 result = wi::smin (pop0, pop1);
4065 break;
4067 case SMAX:
4068 result = wi::smax (pop0, pop1);
4069 break;
4071 case UMIN:
4072 result = wi::umin (pop0, pop1);
4073 break;
4075 case UMAX:
4076 result = wi::umax (pop0, pop1);
4077 break;
4079 case LSHIFTRT:
4080 case ASHIFTRT:
4081 case ASHIFT:
4083 wide_int wop1 = pop1;
4084 if (SHIFT_COUNT_TRUNCATED)
4085 wop1 = wi::umod_trunc (wop1, width);
4086 else if (wi::geu_p (wop1, width))
4087 return NULL_RTX;
4089 switch (code)
4091 case LSHIFTRT:
4092 result = wi::lrshift (pop0, wop1);
4093 break;
4095 case ASHIFTRT:
4096 result = wi::arshift (pop0, wop1);
4097 break;
4099 case ASHIFT:
4100 result = wi::lshift (pop0, wop1);
4101 break;
4103 default:
4104 gcc_unreachable ();
4106 break;
4108 case ROTATE:
4109 case ROTATERT:
4111 if (wi::neg_p (pop1))
4112 return NULL_RTX;
4114 switch (code)
4116 case ROTATE:
4117 result = wi::lrotate (pop0, pop1);
4118 break;
4120 case ROTATERT:
4121 result = wi::rrotate (pop0, pop1);
4122 break;
4124 default:
4125 gcc_unreachable ();
4127 break;
4129 default:
4130 return NULL_RTX;
4132 return immed_wide_int_const (result, mode);
4135 return NULL_RTX;
4140 /* Return a positive integer if X should sort after Y. The value
4141 returned is 1 if and only if X and Y are both regs. */
4143 static int
4144 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4146 int result;
4148 result = (commutative_operand_precedence (y)
4149 - commutative_operand_precedence (x));
4150 if (result)
4151 return result + result;
4153 /* Group together equal REGs to do more simplification. */
4154 if (REG_P (x) && REG_P (y))
4155 return REGNO (x) > REGNO (y);
4157 return 0;
4160 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4161 operands may be another PLUS or MINUS.
4163 Rather than test for specific case, we do this by a brute-force method
4164 and do all possible simplifications until no more changes occur. Then
4165 we rebuild the operation.
4167 May return NULL_RTX when no changes were made. */
4169 static rtx
4170 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4171 rtx op1)
4173 struct simplify_plus_minus_op_data
4175 rtx op;
4176 short neg;
4177 } ops[16];
4178 rtx result, tem;
4179 int n_ops = 2;
4180 int changed, n_constants, canonicalized = 0;
4181 int i, j;
4183 memset (ops, 0, sizeof ops);
4185 /* Set up the two operands and then expand them until nothing has been
4186 changed. If we run out of room in our array, give up; this should
4187 almost never happen. */
4189 ops[0].op = op0;
4190 ops[0].neg = 0;
4191 ops[1].op = op1;
4192 ops[1].neg = (code == MINUS);
4196 changed = 0;
4197 n_constants = 0;
4199 for (i = 0; i < n_ops; i++)
4201 rtx this_op = ops[i].op;
4202 int this_neg = ops[i].neg;
4203 enum rtx_code this_code = GET_CODE (this_op);
4205 switch (this_code)
4207 case PLUS:
4208 case MINUS:
4209 if (n_ops == ARRAY_SIZE (ops))
4210 return NULL_RTX;
4212 ops[n_ops].op = XEXP (this_op, 1);
4213 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4214 n_ops++;
4216 ops[i].op = XEXP (this_op, 0);
4217 changed = 1;
4218 /* If this operand was negated then we will potentially
4219 canonicalize the expression. Similarly if we don't
4220 place the operands adjacent we're re-ordering the
4221 expression and thus might be performing a
4222 canonicalization. Ignore register re-ordering.
4223 ??? It might be better to shuffle the ops array here,
4224 but then (plus (plus (A, B), plus (C, D))) wouldn't
4225 be seen as non-canonical. */
4226 if (this_neg
4227 || (i != n_ops - 2
4228 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4229 canonicalized = 1;
4230 break;
4232 case NEG:
4233 ops[i].op = XEXP (this_op, 0);
4234 ops[i].neg = ! this_neg;
4235 changed = 1;
4236 canonicalized = 1;
4237 break;
4239 case CONST:
4240 if (n_ops != ARRAY_SIZE (ops)
4241 && GET_CODE (XEXP (this_op, 0)) == PLUS
4242 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4243 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4245 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4246 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4247 ops[n_ops].neg = this_neg;
4248 n_ops++;
4249 changed = 1;
4250 canonicalized = 1;
4252 break;
4254 case NOT:
4255 /* ~a -> (-a - 1) */
4256 if (n_ops != ARRAY_SIZE (ops))
4258 ops[n_ops].op = CONSTM1_RTX (mode);
4259 ops[n_ops++].neg = this_neg;
4260 ops[i].op = XEXP (this_op, 0);
4261 ops[i].neg = !this_neg;
4262 changed = 1;
4263 canonicalized = 1;
4265 break;
4267 case CONST_INT:
4268 n_constants++;
4269 if (this_neg)
4271 ops[i].op = neg_const_int (mode, this_op);
4272 ops[i].neg = 0;
4273 changed = 1;
4274 canonicalized = 1;
4276 break;
4278 default:
4279 break;
4283 while (changed);
4285 if (n_constants > 1)
4286 canonicalized = 1;
4288 gcc_assert (n_ops >= 2);
4290 /* If we only have two operands, we can avoid the loops. */
4291 if (n_ops == 2)
4293 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4294 rtx lhs, rhs;
4296 /* Get the two operands. Be careful with the order, especially for
4297 the cases where code == MINUS. */
4298 if (ops[0].neg && ops[1].neg)
4300 lhs = gen_rtx_NEG (mode, ops[0].op);
4301 rhs = ops[1].op;
4303 else if (ops[0].neg)
4305 lhs = ops[1].op;
4306 rhs = ops[0].op;
4308 else
4310 lhs = ops[0].op;
4311 rhs = ops[1].op;
4314 return simplify_const_binary_operation (code, mode, lhs, rhs);
4317 /* Now simplify each pair of operands until nothing changes. */
4318 while (1)
4320 /* Insertion sort is good enough for a small array. */
4321 for (i = 1; i < n_ops; i++)
4323 struct simplify_plus_minus_op_data save;
4324 int cmp;
4326 j = i - 1;
4327 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4328 if (cmp <= 0)
4329 continue;
4330 /* Just swapping registers doesn't count as canonicalization. */
4331 if (cmp != 1)
4332 canonicalized = 1;
4334 save = ops[i];
4336 ops[j + 1] = ops[j];
4337 while (j--
4338 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4339 ops[j + 1] = save;
4342 changed = 0;
4343 for (i = n_ops - 1; i > 0; i--)
4344 for (j = i - 1; j >= 0; j--)
4346 rtx lhs = ops[j].op, rhs = ops[i].op;
4347 int lneg = ops[j].neg, rneg = ops[i].neg;
4349 if (lhs != 0 && rhs != 0)
4351 enum rtx_code ncode = PLUS;
4353 if (lneg != rneg)
4355 ncode = MINUS;
4356 if (lneg)
4357 std::swap (lhs, rhs);
4359 else if (swap_commutative_operands_p (lhs, rhs))
4360 std::swap (lhs, rhs);
4362 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4363 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4365 rtx tem_lhs, tem_rhs;
4367 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4368 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4369 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4370 tem_rhs);
4372 if (tem && !CONSTANT_P (tem))
4373 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4375 else
4376 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4378 if (tem)
4380 /* Reject "simplifications" that just wrap the two
4381 arguments in a CONST. Failure to do so can result
4382 in infinite recursion with simplify_binary_operation
4383 when it calls us to simplify CONST operations.
4384 Also, if we find such a simplification, don't try
4385 any more combinations with this rhs: We must have
4386 something like symbol+offset, ie. one of the
4387 trivial CONST expressions we handle later. */
4388 if (GET_CODE (tem) == CONST
4389 && GET_CODE (XEXP (tem, 0)) == ncode
4390 && XEXP (XEXP (tem, 0), 0) == lhs
4391 && XEXP (XEXP (tem, 0), 1) == rhs)
4392 break;
4393 lneg &= rneg;
4394 if (GET_CODE (tem) == NEG)
4395 tem = XEXP (tem, 0), lneg = !lneg;
4396 if (CONST_INT_P (tem) && lneg)
4397 tem = neg_const_int (mode, tem), lneg = 0;
4399 ops[i].op = tem;
4400 ops[i].neg = lneg;
4401 ops[j].op = NULL_RTX;
4402 changed = 1;
4403 canonicalized = 1;
4408 if (!changed)
4409 break;
4411 /* Pack all the operands to the lower-numbered entries. */
4412 for (i = 0, j = 0; j < n_ops; j++)
4413 if (ops[j].op)
4415 ops[i] = ops[j];
4416 i++;
4418 n_ops = i;
4421 /* If nothing changed, fail. */
4422 if (!canonicalized)
4423 return NULL_RTX;
4425 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4426 if (n_ops == 2
4427 && CONST_INT_P (ops[1].op)
4428 && CONSTANT_P (ops[0].op)
4429 && ops[0].neg)
4430 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4432 /* We suppressed creation of trivial CONST expressions in the
4433 combination loop to avoid recursion. Create one manually now.
4434 The combination loop should have ensured that there is exactly
4435 one CONST_INT, and the sort will have ensured that it is last
4436 in the array and that any other constant will be next-to-last. */
4438 if (n_ops > 1
4439 && CONST_INT_P (ops[n_ops - 1].op)
4440 && CONSTANT_P (ops[n_ops - 2].op))
4442 rtx value = ops[n_ops - 1].op;
4443 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4444 value = neg_const_int (mode, value);
4445 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4446 INTVAL (value));
4447 n_ops--;
4450 /* Put a non-negated operand first, if possible. */
4452 for (i = 0; i < n_ops && ops[i].neg; i++)
4453 continue;
4454 if (i == n_ops)
4455 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4456 else if (i != 0)
4458 tem = ops[0].op;
4459 ops[0] = ops[i];
4460 ops[i].op = tem;
4461 ops[i].neg = 1;
4464 /* Now make the result by performing the requested operations. */
4465 result = ops[0].op;
4466 for (i = 1; i < n_ops; i++)
4467 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4468 mode, result, ops[i].op);
4470 return result;
4473 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4474 static bool
4475 plus_minus_operand_p (const_rtx x)
4477 return GET_CODE (x) == PLUS
4478 || GET_CODE (x) == MINUS
4479 || (GET_CODE (x) == CONST
4480 && GET_CODE (XEXP (x, 0)) == PLUS
4481 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4482 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4485 /* Like simplify_binary_operation except used for relational operators.
4486 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4487 not also be VOIDmode.
4489 CMP_MODE specifies in which mode the comparison is done in, so it is
4490 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4491 the operands or, if both are VOIDmode, the operands are compared in
4492 "infinite precision". */
4494 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4495 machine_mode cmp_mode, rtx op0, rtx op1)
4497 rtx tem, trueop0, trueop1;
4499 if (cmp_mode == VOIDmode)
4500 cmp_mode = GET_MODE (op0);
4501 if (cmp_mode == VOIDmode)
4502 cmp_mode = GET_MODE (op1);
4504 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4505 if (tem)
4507 if (SCALAR_FLOAT_MODE_P (mode))
4509 if (tem == const0_rtx)
4510 return CONST0_RTX (mode);
4511 #ifdef FLOAT_STORE_FLAG_VALUE
4513 REAL_VALUE_TYPE val;
4514 val = FLOAT_STORE_FLAG_VALUE (mode);
4515 return const_double_from_real_value (val, mode);
4517 #else
4518 return NULL_RTX;
4519 #endif
4521 if (VECTOR_MODE_P (mode))
4523 if (tem == const0_rtx)
4524 return CONST0_RTX (mode);
4525 #ifdef VECTOR_STORE_FLAG_VALUE
4527 int i, units;
4528 rtvec v;
4530 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4531 if (val == NULL_RTX)
4532 return NULL_RTX;
4533 if (val == const1_rtx)
4534 return CONST1_RTX (mode);
4536 units = GET_MODE_NUNITS (mode);
4537 v = rtvec_alloc (units);
4538 for (i = 0; i < units; i++)
4539 RTVEC_ELT (v, i) = val;
4540 return gen_rtx_raw_CONST_VECTOR (mode, v);
4542 #else
4543 return NULL_RTX;
4544 #endif
4547 return tem;
4550 /* For the following tests, ensure const0_rtx is op1. */
4551 if (swap_commutative_operands_p (op0, op1)
4552 || (op0 == const0_rtx && op1 != const0_rtx))
4553 std::swap (op0, op1), code = swap_condition (code);
4555 /* If op0 is a compare, extract the comparison arguments from it. */
4556 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4557 return simplify_gen_relational (code, mode, VOIDmode,
4558 XEXP (op0, 0), XEXP (op0, 1));
4560 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4561 || CC0_P (op0))
4562 return NULL_RTX;
4564 trueop0 = avoid_constant_pool_reference (op0);
4565 trueop1 = avoid_constant_pool_reference (op1);
4566 return simplify_relational_operation_1 (code, mode, cmp_mode,
4567 trueop0, trueop1);
4570 /* This part of simplify_relational_operation is only used when CMP_MODE
4571 is not in class MODE_CC (i.e. it is a real comparison).
4573 MODE is the mode of the result, while CMP_MODE specifies in which
4574 mode the comparison is done in, so it is the mode of the operands. */
4576 static rtx
4577 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4578 machine_mode cmp_mode, rtx op0, rtx op1)
4580 enum rtx_code op0code = GET_CODE (op0);
4582 if (op1 == const0_rtx && COMPARISON_P (op0))
4584 /* If op0 is a comparison, extract the comparison arguments
4585 from it. */
4586 if (code == NE)
4588 if (GET_MODE (op0) == mode)
4589 return simplify_rtx (op0);
4590 else
4591 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4592 XEXP (op0, 0), XEXP (op0, 1));
4594 else if (code == EQ)
4596 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4597 if (new_code != UNKNOWN)
4598 return simplify_gen_relational (new_code, mode, VOIDmode,
4599 XEXP (op0, 0), XEXP (op0, 1));
4603 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4604 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4605 if ((code == LTU || code == GEU)
4606 && GET_CODE (op0) == PLUS
4607 && CONST_INT_P (XEXP (op0, 1))
4608 && (rtx_equal_p (op1, XEXP (op0, 0))
4609 || rtx_equal_p (op1, XEXP (op0, 1)))
4610 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4611 && XEXP (op0, 1) != const0_rtx)
4613 rtx new_cmp
4614 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4615 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4616 cmp_mode, XEXP (op0, 0), new_cmp);
4619 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4620 if ((code == LTU || code == GEU)
4621 && GET_CODE (op0) == PLUS
4622 && rtx_equal_p (op1, XEXP (op0, 1))
4623 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4624 && !rtx_equal_p (op1, XEXP (op0, 0)))
4625 return simplify_gen_relational (code, mode, cmp_mode, op0,
4626 copy_rtx (XEXP (op0, 0)));
4628 if (op1 == const0_rtx)
4630 /* Canonicalize (GTU x 0) as (NE x 0). */
4631 if (code == GTU)
4632 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4633 /* Canonicalize (LEU x 0) as (EQ x 0). */
4634 if (code == LEU)
4635 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4637 else if (op1 == const1_rtx)
4639 switch (code)
4641 case GE:
4642 /* Canonicalize (GE x 1) as (GT x 0). */
4643 return simplify_gen_relational (GT, mode, cmp_mode,
4644 op0, const0_rtx);
4645 case GEU:
4646 /* Canonicalize (GEU x 1) as (NE x 0). */
4647 return simplify_gen_relational (NE, mode, cmp_mode,
4648 op0, const0_rtx);
4649 case LT:
4650 /* Canonicalize (LT x 1) as (LE x 0). */
4651 return simplify_gen_relational (LE, mode, cmp_mode,
4652 op0, const0_rtx);
4653 case LTU:
4654 /* Canonicalize (LTU x 1) as (EQ x 0). */
4655 return simplify_gen_relational (EQ, mode, cmp_mode,
4656 op0, const0_rtx);
4657 default:
4658 break;
4661 else if (op1 == constm1_rtx)
4663 /* Canonicalize (LE x -1) as (LT x 0). */
4664 if (code == LE)
4665 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4666 /* Canonicalize (GT x -1) as (GE x 0). */
4667 if (code == GT)
4668 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4671 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4672 if ((code == EQ || code == NE)
4673 && (op0code == PLUS || op0code == MINUS)
4674 && CONSTANT_P (op1)
4675 && CONSTANT_P (XEXP (op0, 1))
4676 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4678 rtx x = XEXP (op0, 0);
4679 rtx c = XEXP (op0, 1);
4680 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4681 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4683 /* Detect an infinite recursive condition, where we oscillate at this
4684 simplification case between:
4685 A + B == C <---> C - B == A,
4686 where A, B, and C are all constants with non-simplifiable expressions,
4687 usually SYMBOL_REFs. */
4688 if (GET_CODE (tem) == invcode
4689 && CONSTANT_P (x)
4690 && rtx_equal_p (c, XEXP (tem, 1)))
4691 return NULL_RTX;
4693 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4696 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4697 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4698 if (code == NE
4699 && op1 == const0_rtx
4700 && GET_MODE_CLASS (mode) == MODE_INT
4701 && cmp_mode != VOIDmode
4702 /* ??? Work-around BImode bugs in the ia64 backend. */
4703 && mode != BImode
4704 && cmp_mode != BImode
4705 && nonzero_bits (op0, cmp_mode) == 1
4706 && STORE_FLAG_VALUE == 1)
4707 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4708 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4709 : lowpart_subreg (mode, op0, cmp_mode);
4711 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4712 if ((code == EQ || code == NE)
4713 && op1 == const0_rtx
4714 && op0code == XOR)
4715 return simplify_gen_relational (code, mode, cmp_mode,
4716 XEXP (op0, 0), XEXP (op0, 1));
4718 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4719 if ((code == EQ || code == NE)
4720 && op0code == XOR
4721 && rtx_equal_p (XEXP (op0, 0), op1)
4722 && !side_effects_p (XEXP (op0, 0)))
4723 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4724 CONST0_RTX (mode));
4726 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4727 if ((code == EQ || code == NE)
4728 && op0code == XOR
4729 && rtx_equal_p (XEXP (op0, 1), op1)
4730 && !side_effects_p (XEXP (op0, 1)))
4731 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4732 CONST0_RTX (mode));
4734 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4735 if ((code == EQ || code == NE)
4736 && op0code == XOR
4737 && CONST_SCALAR_INT_P (op1)
4738 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4739 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4740 simplify_gen_binary (XOR, cmp_mode,
4741 XEXP (op0, 1), op1));
4743 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4744 can be implemented with a BICS instruction on some targets, or
4745 constant-folded if y is a constant. */
4746 if ((code == EQ || code == NE)
4747 && op0code == AND
4748 && rtx_equal_p (XEXP (op0, 0), op1)
4749 && !side_effects_p (op1)
4750 && op1 != CONST0_RTX (cmp_mode))
4752 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4753 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4755 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4756 CONST0_RTX (cmp_mode));
4759 /* Likewise for (eq/ne (and x y) y). */
4760 if ((code == EQ || code == NE)
4761 && op0code == AND
4762 && rtx_equal_p (XEXP (op0, 1), op1)
4763 && !side_effects_p (op1)
4764 && op1 != CONST0_RTX (cmp_mode))
4766 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4767 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4769 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4770 CONST0_RTX (cmp_mode));
4773 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4774 if ((code == EQ || code == NE)
4775 && GET_CODE (op0) == BSWAP
4776 && CONST_SCALAR_INT_P (op1))
4777 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4778 simplify_gen_unary (BSWAP, cmp_mode,
4779 op1, cmp_mode));
4781 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4782 if ((code == EQ || code == NE)
4783 && GET_CODE (op0) == BSWAP
4784 && GET_CODE (op1) == BSWAP)
4785 return simplify_gen_relational (code, mode, cmp_mode,
4786 XEXP (op0, 0), XEXP (op1, 0));
4788 if (op0code == POPCOUNT && op1 == const0_rtx)
4789 switch (code)
4791 case EQ:
4792 case LE:
4793 case LEU:
4794 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4795 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4796 XEXP (op0, 0), const0_rtx);
4798 case NE:
4799 case GT:
4800 case GTU:
4801 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4802 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4803 XEXP (op0, 0), const0_rtx);
4805 default:
4806 break;
4809 return NULL_RTX;
4812 enum
4814 CMP_EQ = 1,
4815 CMP_LT = 2,
4816 CMP_GT = 4,
4817 CMP_LTU = 8,
4818 CMP_GTU = 16
4822 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4823 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4824 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4825 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4826 For floating-point comparisons, assume that the operands were ordered. */
4828 static rtx
4829 comparison_result (enum rtx_code code, int known_results)
4831 switch (code)
4833 case EQ:
4834 case UNEQ:
4835 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4836 case NE:
4837 case LTGT:
4838 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4840 case LT:
4841 case UNLT:
4842 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4843 case GE:
4844 case UNGE:
4845 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4847 case GT:
4848 case UNGT:
4849 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4850 case LE:
4851 case UNLE:
4852 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4854 case LTU:
4855 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4856 case GEU:
4857 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4859 case GTU:
4860 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4861 case LEU:
4862 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4864 case ORDERED:
4865 return const_true_rtx;
4866 case UNORDERED:
4867 return const0_rtx;
4868 default:
4869 gcc_unreachable ();
4873 /* Check if the given comparison (done in the given MODE) is actually
4874 a tautology or a contradiction. If the mode is VOID_mode, the
4875 comparison is done in "infinite precision". If no simplification
4876 is possible, this function returns zero. Otherwise, it returns
4877 either const_true_rtx or const0_rtx. */
4880 simplify_const_relational_operation (enum rtx_code code,
4881 machine_mode mode,
4882 rtx op0, rtx op1)
4884 rtx tem;
4885 rtx trueop0;
4886 rtx trueop1;
4888 gcc_assert (mode != VOIDmode
4889 || (GET_MODE (op0) == VOIDmode
4890 && GET_MODE (op1) == VOIDmode));
4892 /* If op0 is a compare, extract the comparison arguments from it. */
4893 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4895 op1 = XEXP (op0, 1);
4896 op0 = XEXP (op0, 0);
4898 if (GET_MODE (op0) != VOIDmode)
4899 mode = GET_MODE (op0);
4900 else if (GET_MODE (op1) != VOIDmode)
4901 mode = GET_MODE (op1);
4902 else
4903 return 0;
4906 /* We can't simplify MODE_CC values since we don't know what the
4907 actual comparison is. */
4908 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4909 return 0;
4911 /* Make sure the constant is second. */
4912 if (swap_commutative_operands_p (op0, op1))
4914 std::swap (op0, op1);
4915 code = swap_condition (code);
4918 trueop0 = avoid_constant_pool_reference (op0);
4919 trueop1 = avoid_constant_pool_reference (op1);
4921 /* For integer comparisons of A and B maybe we can simplify A - B and can
4922 then simplify a comparison of that with zero. If A and B are both either
4923 a register or a CONST_INT, this can't help; testing for these cases will
4924 prevent infinite recursion here and speed things up.
4926 We can only do this for EQ and NE comparisons as otherwise we may
4927 lose or introduce overflow which we cannot disregard as undefined as
4928 we do not know the signedness of the operation on either the left or
4929 the right hand side of the comparison. */
4931 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4932 && (code == EQ || code == NE)
4933 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4934 && (REG_P (op1) || CONST_INT_P (trueop1)))
4935 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4936 /* We cannot do this if tem is a nonzero address. */
4937 && ! nonzero_address_p (tem))
4938 return simplify_const_relational_operation (signed_condition (code),
4939 mode, tem, const0_rtx);
4941 if (! HONOR_NANS (mode) && code == ORDERED)
4942 return const_true_rtx;
4944 if (! HONOR_NANS (mode) && code == UNORDERED)
4945 return const0_rtx;
4947 /* For modes without NaNs, if the two operands are equal, we know the
4948 result except if they have side-effects. Even with NaNs we know
4949 the result of unordered comparisons and, if signaling NaNs are
4950 irrelevant, also the result of LT/GT/LTGT. */
4951 if ((! HONOR_NANS (trueop0)
4952 || code == UNEQ || code == UNLE || code == UNGE
4953 || ((code == LT || code == GT || code == LTGT)
4954 && ! HONOR_SNANS (trueop0)))
4955 && rtx_equal_p (trueop0, trueop1)
4956 && ! side_effects_p (trueop0))
4957 return comparison_result (code, CMP_EQ);
4959 /* If the operands are floating-point constants, see if we can fold
4960 the result. */
4961 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4962 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4963 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4965 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
4966 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
4968 /* Comparisons are unordered iff at least one of the values is NaN. */
4969 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
4970 switch (code)
4972 case UNEQ:
4973 case UNLT:
4974 case UNGT:
4975 case UNLE:
4976 case UNGE:
4977 case NE:
4978 case UNORDERED:
4979 return const_true_rtx;
4980 case EQ:
4981 case LT:
4982 case GT:
4983 case LE:
4984 case GE:
4985 case LTGT:
4986 case ORDERED:
4987 return const0_rtx;
4988 default:
4989 return 0;
4992 return comparison_result (code,
4993 (real_equal (d0, d1) ? CMP_EQ :
4994 real_less (d0, d1) ? CMP_LT : CMP_GT));
4997 /* Otherwise, see if the operands are both integers. */
4998 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4999 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5001 /* It would be nice if we really had a mode here. However, the
5002 largest int representable on the target is as good as
5003 infinite. */
5004 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5005 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
5006 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
5008 if (wi::eq_p (ptrueop0, ptrueop1))
5009 return comparison_result (code, CMP_EQ);
5010 else
5012 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5013 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5014 return comparison_result (code, cr);
5018 /* Optimize comparisons with upper and lower bounds. */
5019 if (HWI_COMPUTABLE_MODE_P (mode)
5020 && CONST_INT_P (trueop1)
5021 && !side_effects_p (trueop0))
5023 int sign;
5024 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5025 HOST_WIDE_INT val = INTVAL (trueop1);
5026 HOST_WIDE_INT mmin, mmax;
5028 if (code == GEU
5029 || code == LEU
5030 || code == GTU
5031 || code == LTU)
5032 sign = 0;
5033 else
5034 sign = 1;
5036 /* Get a reduced range if the sign bit is zero. */
5037 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5039 mmin = 0;
5040 mmax = nonzero;
5042 else
5044 rtx mmin_rtx, mmax_rtx;
5045 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5047 mmin = INTVAL (mmin_rtx);
5048 mmax = INTVAL (mmax_rtx);
5049 if (sign)
5051 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5053 mmin >>= (sign_copies - 1);
5054 mmax >>= (sign_copies - 1);
5058 switch (code)
5060 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5061 case GEU:
5062 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5063 return const_true_rtx;
5064 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5065 return const0_rtx;
5066 break;
5067 case GE:
5068 if (val <= mmin)
5069 return const_true_rtx;
5070 if (val > mmax)
5071 return const0_rtx;
5072 break;
5074 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5075 case LEU:
5076 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5077 return const_true_rtx;
5078 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5079 return const0_rtx;
5080 break;
5081 case LE:
5082 if (val >= mmax)
5083 return const_true_rtx;
5084 if (val < mmin)
5085 return const0_rtx;
5086 break;
5088 case EQ:
5089 /* x == y is always false for y out of range. */
5090 if (val < mmin || val > mmax)
5091 return const0_rtx;
5092 break;
5094 /* x > y is always false for y >= mmax, always true for y < mmin. */
5095 case GTU:
5096 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5097 return const0_rtx;
5098 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5099 return const_true_rtx;
5100 break;
5101 case GT:
5102 if (val >= mmax)
5103 return const0_rtx;
5104 if (val < mmin)
5105 return const_true_rtx;
5106 break;
5108 /* x < y is always false for y <= mmin, always true for y > mmax. */
5109 case LTU:
5110 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5111 return const0_rtx;
5112 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5113 return const_true_rtx;
5114 break;
5115 case LT:
5116 if (val <= mmin)
5117 return const0_rtx;
5118 if (val > mmax)
5119 return const_true_rtx;
5120 break;
5122 case NE:
5123 /* x != y is always true for y out of range. */
5124 if (val < mmin || val > mmax)
5125 return const_true_rtx;
5126 break;
5128 default:
5129 break;
5133 /* Optimize integer comparisons with zero. */
5134 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5136 /* Some addresses are known to be nonzero. We don't know
5137 their sign, but equality comparisons are known. */
5138 if (nonzero_address_p (trueop0))
5140 if (code == EQ || code == LEU)
5141 return const0_rtx;
5142 if (code == NE || code == GTU)
5143 return const_true_rtx;
5146 /* See if the first operand is an IOR with a constant. If so, we
5147 may be able to determine the result of this comparison. */
5148 if (GET_CODE (op0) == IOR)
5150 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5151 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5153 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5154 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5155 && (UINTVAL (inner_const)
5156 & ((unsigned HOST_WIDE_INT) 1
5157 << sign_bitnum)));
5159 switch (code)
5161 case EQ:
5162 case LEU:
5163 return const0_rtx;
5164 case NE:
5165 case GTU:
5166 return const_true_rtx;
5167 case LT:
5168 case LE:
5169 if (has_sign)
5170 return const_true_rtx;
5171 break;
5172 case GT:
5173 case GE:
5174 if (has_sign)
5175 return const0_rtx;
5176 break;
5177 default:
5178 break;
5184 /* Optimize comparison of ABS with zero. */
5185 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5186 && (GET_CODE (trueop0) == ABS
5187 || (GET_CODE (trueop0) == FLOAT_EXTEND
5188 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5190 switch (code)
5192 case LT:
5193 /* Optimize abs(x) < 0.0. */
5194 if (!HONOR_SNANS (mode)
5195 && (!INTEGRAL_MODE_P (mode)
5196 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5198 if (INTEGRAL_MODE_P (mode)
5199 && (issue_strict_overflow_warning
5200 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5201 warning (OPT_Wstrict_overflow,
5202 ("assuming signed overflow does not occur when "
5203 "assuming abs (x) < 0 is false"));
5204 return const0_rtx;
5206 break;
5208 case GE:
5209 /* Optimize abs(x) >= 0.0. */
5210 if (!HONOR_NANS (mode)
5211 && (!INTEGRAL_MODE_P (mode)
5212 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5214 if (INTEGRAL_MODE_P (mode)
5215 && (issue_strict_overflow_warning
5216 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5217 warning (OPT_Wstrict_overflow,
5218 ("assuming signed overflow does not occur when "
5219 "assuming abs (x) >= 0 is true"));
5220 return const_true_rtx;
5222 break;
5224 case UNGE:
5225 /* Optimize ! (abs(x) < 0.0). */
5226 return const_true_rtx;
5228 default:
5229 break;
5233 return 0;
5236 /* Simplify CODE, an operation with result mode MODE and three operands,
5237 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5238 a constant. Return 0 if no simplifications is possible. */
5241 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5242 machine_mode op0_mode, rtx op0, rtx op1,
5243 rtx op2)
5245 unsigned int width = GET_MODE_PRECISION (mode);
5246 bool any_change = false;
5247 rtx tem, trueop2;
5249 /* VOIDmode means "infinite" precision. */
5250 if (width == 0)
5251 width = HOST_BITS_PER_WIDE_INT;
5253 switch (code)
5255 case FMA:
5256 /* Simplify negations around the multiplication. */
5257 /* -a * -b + c => a * b + c. */
5258 if (GET_CODE (op0) == NEG)
5260 tem = simplify_unary_operation (NEG, mode, op1, mode);
5261 if (tem)
5262 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5264 else if (GET_CODE (op1) == NEG)
5266 tem = simplify_unary_operation (NEG, mode, op0, mode);
5267 if (tem)
5268 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5271 /* Canonicalize the two multiplication operands. */
5272 /* a * -b + c => -b * a + c. */
5273 if (swap_commutative_operands_p (op0, op1))
5274 std::swap (op0, op1), any_change = true;
5276 if (any_change)
5277 return gen_rtx_FMA (mode, op0, op1, op2);
5278 return NULL_RTX;
5280 case SIGN_EXTRACT:
5281 case ZERO_EXTRACT:
5282 if (CONST_INT_P (op0)
5283 && CONST_INT_P (op1)
5284 && CONST_INT_P (op2)
5285 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5286 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5288 /* Extracting a bit-field from a constant */
5289 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5290 HOST_WIDE_INT op1val = INTVAL (op1);
5291 HOST_WIDE_INT op2val = INTVAL (op2);
5292 if (BITS_BIG_ENDIAN)
5293 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5294 else
5295 val >>= op2val;
5297 if (HOST_BITS_PER_WIDE_INT != op1val)
5299 /* First zero-extend. */
5300 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5301 /* If desired, propagate sign bit. */
5302 if (code == SIGN_EXTRACT
5303 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5304 != 0)
5305 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5308 return gen_int_mode (val, mode);
5310 break;
5312 case IF_THEN_ELSE:
5313 if (CONST_INT_P (op0))
5314 return op0 != const0_rtx ? op1 : op2;
5316 /* Convert c ? a : a into "a". */
5317 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5318 return op1;
5320 /* Convert a != b ? a : b into "a". */
5321 if (GET_CODE (op0) == NE
5322 && ! side_effects_p (op0)
5323 && ! HONOR_NANS (mode)
5324 && ! HONOR_SIGNED_ZEROS (mode)
5325 && ((rtx_equal_p (XEXP (op0, 0), op1)
5326 && rtx_equal_p (XEXP (op0, 1), op2))
5327 || (rtx_equal_p (XEXP (op0, 0), op2)
5328 && rtx_equal_p (XEXP (op0, 1), op1))))
5329 return op1;
5331 /* Convert a == b ? a : b into "b". */
5332 if (GET_CODE (op0) == EQ
5333 && ! side_effects_p (op0)
5334 && ! HONOR_NANS (mode)
5335 && ! HONOR_SIGNED_ZEROS (mode)
5336 && ((rtx_equal_p (XEXP (op0, 0), op1)
5337 && rtx_equal_p (XEXP (op0, 1), op2))
5338 || (rtx_equal_p (XEXP (op0, 0), op2)
5339 && rtx_equal_p (XEXP (op0, 1), op1))))
5340 return op2;
5342 /* Convert (!c) != {0,...,0} ? a : b into
5343 c != {0,...,0} ? b : a for vector modes. */
5344 if (VECTOR_MODE_P (GET_MODE (op1))
5345 && GET_CODE (op0) == NE
5346 && GET_CODE (XEXP (op0, 0)) == NOT
5347 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5349 rtx cv = XEXP (op0, 1);
5350 int nunits = CONST_VECTOR_NUNITS (cv);
5351 bool ok = true;
5352 for (int i = 0; i < nunits; ++i)
5353 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5355 ok = false;
5356 break;
5358 if (ok)
5360 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5361 XEXP (XEXP (op0, 0), 0),
5362 XEXP (op0, 1));
5363 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5364 return retval;
5368 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5370 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5371 ? GET_MODE (XEXP (op0, 1))
5372 : GET_MODE (XEXP (op0, 0)));
5373 rtx temp;
5375 /* Look for happy constants in op1 and op2. */
5376 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5378 HOST_WIDE_INT t = INTVAL (op1);
5379 HOST_WIDE_INT f = INTVAL (op2);
5381 if (t == STORE_FLAG_VALUE && f == 0)
5382 code = GET_CODE (op0);
5383 else if (t == 0 && f == STORE_FLAG_VALUE)
5385 enum rtx_code tmp;
5386 tmp = reversed_comparison_code (op0, NULL_RTX);
5387 if (tmp == UNKNOWN)
5388 break;
5389 code = tmp;
5391 else
5392 break;
5394 return simplify_gen_relational (code, mode, cmp_mode,
5395 XEXP (op0, 0), XEXP (op0, 1));
5398 if (cmp_mode == VOIDmode)
5399 cmp_mode = op0_mode;
5400 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5401 cmp_mode, XEXP (op0, 0),
5402 XEXP (op0, 1));
5404 /* See if any simplifications were possible. */
5405 if (temp)
5407 if (CONST_INT_P (temp))
5408 return temp == const0_rtx ? op2 : op1;
5409 else if (temp)
5410 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5413 break;
5415 case VEC_MERGE:
5416 gcc_assert (GET_MODE (op0) == mode);
5417 gcc_assert (GET_MODE (op1) == mode);
5418 gcc_assert (VECTOR_MODE_P (mode));
5419 trueop2 = avoid_constant_pool_reference (op2);
5420 if (CONST_INT_P (trueop2))
5422 int elt_size = GET_MODE_UNIT_SIZE (mode);
5423 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5424 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5425 unsigned HOST_WIDE_INT mask;
5426 if (n_elts == HOST_BITS_PER_WIDE_INT)
5427 mask = -1;
5428 else
5429 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5431 if (!(sel & mask) && !side_effects_p (op0))
5432 return op1;
5433 if ((sel & mask) == mask && !side_effects_p (op1))
5434 return op0;
5436 rtx trueop0 = avoid_constant_pool_reference (op0);
5437 rtx trueop1 = avoid_constant_pool_reference (op1);
5438 if (GET_CODE (trueop0) == CONST_VECTOR
5439 && GET_CODE (trueop1) == CONST_VECTOR)
5441 rtvec v = rtvec_alloc (n_elts);
5442 unsigned int i;
5444 for (i = 0; i < n_elts; i++)
5445 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5446 ? CONST_VECTOR_ELT (trueop0, i)
5447 : CONST_VECTOR_ELT (trueop1, i));
5448 return gen_rtx_CONST_VECTOR (mode, v);
5451 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5452 if no element from a appears in the result. */
5453 if (GET_CODE (op0) == VEC_MERGE)
5455 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5456 if (CONST_INT_P (tem))
5458 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5459 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5460 return simplify_gen_ternary (code, mode, mode,
5461 XEXP (op0, 1), op1, op2);
5462 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5463 return simplify_gen_ternary (code, mode, mode,
5464 XEXP (op0, 0), op1, op2);
5467 if (GET_CODE (op1) == VEC_MERGE)
5469 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5470 if (CONST_INT_P (tem))
5472 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5473 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5474 return simplify_gen_ternary (code, mode, mode,
5475 op0, XEXP (op1, 1), op2);
5476 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5477 return simplify_gen_ternary (code, mode, mode,
5478 op0, XEXP (op1, 0), op2);
5482 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5483 with a. */
5484 if (GET_CODE (op0) == VEC_DUPLICATE
5485 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5486 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5487 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5489 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5490 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5492 if (XEXP (XEXP (op0, 0), 0) == op1
5493 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5494 return op1;
5499 if (rtx_equal_p (op0, op1)
5500 && !side_effects_p (op2) && !side_effects_p (op1))
5501 return op0;
5503 break;
5505 default:
5506 gcc_unreachable ();
5509 return 0;
5512 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5513 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5514 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5516 Works by unpacking OP into a collection of 8-bit values
5517 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5518 and then repacking them again for OUTERMODE. */
5520 static rtx
5521 simplify_immed_subreg (machine_mode outermode, rtx op,
5522 machine_mode innermode, unsigned int byte)
5524 enum {
5525 value_bit = 8,
5526 value_mask = (1 << value_bit) - 1
5528 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5529 int value_start;
5530 int i;
5531 int elem;
5533 int num_elem;
5534 rtx * elems;
5535 int elem_bitsize;
5536 rtx result_s;
5537 rtvec result_v = NULL;
5538 enum mode_class outer_class;
5539 machine_mode outer_submode;
5540 int max_bitsize;
5542 /* Some ports misuse CCmode. */
5543 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5544 return op;
5546 /* We have no way to represent a complex constant at the rtl level. */
5547 if (COMPLEX_MODE_P (outermode))
5548 return NULL_RTX;
5550 /* We support any size mode. */
5551 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5552 GET_MODE_BITSIZE (innermode));
5554 /* Unpack the value. */
5556 if (GET_CODE (op) == CONST_VECTOR)
5558 num_elem = CONST_VECTOR_NUNITS (op);
5559 elems = &CONST_VECTOR_ELT (op, 0);
5560 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5562 else
5564 num_elem = 1;
5565 elems = &op;
5566 elem_bitsize = max_bitsize;
5568 /* If this asserts, it is too complicated; reducing value_bit may help. */
5569 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5570 /* I don't know how to handle endianness of sub-units. */
5571 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5573 for (elem = 0; elem < num_elem; elem++)
5575 unsigned char * vp;
5576 rtx el = elems[elem];
5578 /* Vectors are kept in target memory order. (This is probably
5579 a mistake.) */
5581 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5582 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5583 / BITS_PER_UNIT);
5584 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5585 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5586 unsigned bytele = (subword_byte % UNITS_PER_WORD
5587 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5588 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5591 switch (GET_CODE (el))
5593 case CONST_INT:
5594 for (i = 0;
5595 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5596 i += value_bit)
5597 *vp++ = INTVAL (el) >> i;
5598 /* CONST_INTs are always logically sign-extended. */
5599 for (; i < elem_bitsize; i += value_bit)
5600 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5601 break;
5603 case CONST_WIDE_INT:
5605 rtx_mode_t val = std::make_pair (el, innermode);
5606 unsigned char extend = wi::sign_mask (val);
5608 for (i = 0; i < elem_bitsize; i += value_bit)
5609 *vp++ = wi::extract_uhwi (val, i, value_bit);
5610 for (; i < elem_bitsize; i += value_bit)
5611 *vp++ = extend;
5613 break;
5615 case CONST_DOUBLE:
5616 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5618 unsigned char extend = 0;
5619 /* If this triggers, someone should have generated a
5620 CONST_INT instead. */
5621 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5623 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5624 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5625 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5627 *vp++
5628 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5629 i += value_bit;
5632 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5633 extend = -1;
5634 for (; i < elem_bitsize; i += value_bit)
5635 *vp++ = extend;
5637 else
5639 /* This is big enough for anything on the platform. */
5640 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5641 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5643 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5644 gcc_assert (bitsize <= elem_bitsize);
5645 gcc_assert (bitsize % value_bit == 0);
5647 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5648 GET_MODE (el));
5650 /* real_to_target produces its result in words affected by
5651 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5652 and use WORDS_BIG_ENDIAN instead; see the documentation
5653 of SUBREG in rtl.texi. */
5654 for (i = 0; i < bitsize; i += value_bit)
5656 int ibase;
5657 if (WORDS_BIG_ENDIAN)
5658 ibase = bitsize - 1 - i;
5659 else
5660 ibase = i;
5661 *vp++ = tmp[ibase / 32] >> i % 32;
5664 /* It shouldn't matter what's done here, so fill it with
5665 zero. */
5666 for (; i < elem_bitsize; i += value_bit)
5667 *vp++ = 0;
5669 break;
5671 case CONST_FIXED:
5672 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5674 for (i = 0; i < elem_bitsize; i += value_bit)
5675 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5677 else
5679 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5680 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5681 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5682 i += value_bit)
5683 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5684 >> (i - HOST_BITS_PER_WIDE_INT);
5685 for (; i < elem_bitsize; i += value_bit)
5686 *vp++ = 0;
5688 break;
5690 default:
5691 gcc_unreachable ();
5695 /* Now, pick the right byte to start with. */
5696 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5697 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5698 will already have offset 0. */
5699 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5701 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5702 - byte);
5703 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5704 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5705 byte = (subword_byte % UNITS_PER_WORD
5706 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5709 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5710 so if it's become negative it will instead be very large.) */
5711 gcc_assert (byte < GET_MODE_SIZE (innermode));
5713 /* Convert from bytes to chunks of size value_bit. */
5714 value_start = byte * (BITS_PER_UNIT / value_bit);
5716 /* Re-pack the value. */
5717 num_elem = GET_MODE_NUNITS (outermode);
5719 if (VECTOR_MODE_P (outermode))
5721 result_v = rtvec_alloc (num_elem);
5722 elems = &RTVEC_ELT (result_v, 0);
5724 else
5725 elems = &result_s;
5727 outer_submode = GET_MODE_INNER (outermode);
5728 outer_class = GET_MODE_CLASS (outer_submode);
5729 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5731 gcc_assert (elem_bitsize % value_bit == 0);
5732 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5734 for (elem = 0; elem < num_elem; elem++)
5736 unsigned char *vp;
5738 /* Vectors are stored in target memory order. (This is probably
5739 a mistake.) */
5741 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5742 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5743 / BITS_PER_UNIT);
5744 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5745 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5746 unsigned bytele = (subword_byte % UNITS_PER_WORD
5747 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5748 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5751 switch (outer_class)
5753 case MODE_INT:
5754 case MODE_PARTIAL_INT:
5756 int u;
5757 int base = 0;
5758 int units
5759 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5760 / HOST_BITS_PER_WIDE_INT;
5761 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5762 wide_int r;
5764 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5765 return NULL_RTX;
5766 for (u = 0; u < units; u++)
5768 unsigned HOST_WIDE_INT buf = 0;
5769 for (i = 0;
5770 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5771 i += value_bit)
5772 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5774 tmp[u] = buf;
5775 base += HOST_BITS_PER_WIDE_INT;
5777 r = wide_int::from_array (tmp, units,
5778 GET_MODE_PRECISION (outer_submode));
5779 #if TARGET_SUPPORTS_WIDE_INT == 0
5780 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5781 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5782 return NULL_RTX;
5783 #endif
5784 elems[elem] = immed_wide_int_const (r, outer_submode);
5786 break;
5788 case MODE_FLOAT:
5789 case MODE_DECIMAL_FLOAT:
5791 REAL_VALUE_TYPE r;
5792 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5794 /* real_from_target wants its input in words affected by
5795 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5796 and use WORDS_BIG_ENDIAN instead; see the documentation
5797 of SUBREG in rtl.texi. */
5798 for (i = 0; i < max_bitsize / 32; i++)
5799 tmp[i] = 0;
5800 for (i = 0; i < elem_bitsize; i += value_bit)
5802 int ibase;
5803 if (WORDS_BIG_ENDIAN)
5804 ibase = elem_bitsize - 1 - i;
5805 else
5806 ibase = i;
5807 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5810 real_from_target (&r, tmp, outer_submode);
5811 elems[elem] = const_double_from_real_value (r, outer_submode);
5813 break;
5815 case MODE_FRACT:
5816 case MODE_UFRACT:
5817 case MODE_ACCUM:
5818 case MODE_UACCUM:
5820 FIXED_VALUE_TYPE f;
5821 f.data.low = 0;
5822 f.data.high = 0;
5823 f.mode = outer_submode;
5825 for (i = 0;
5826 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5827 i += value_bit)
5828 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5829 for (; i < elem_bitsize; i += value_bit)
5830 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5831 << (i - HOST_BITS_PER_WIDE_INT));
5833 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5835 break;
5837 default:
5838 gcc_unreachable ();
5841 if (VECTOR_MODE_P (outermode))
5842 return gen_rtx_CONST_VECTOR (outermode, result_v);
5843 else
5844 return result_s;
5847 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5848 Return 0 if no simplifications are possible. */
5850 simplify_subreg (machine_mode outermode, rtx op,
5851 machine_mode innermode, unsigned int byte)
5853 /* Little bit of sanity checking. */
5854 gcc_assert (innermode != VOIDmode);
5855 gcc_assert (outermode != VOIDmode);
5856 gcc_assert (innermode != BLKmode);
5857 gcc_assert (outermode != BLKmode);
5859 gcc_assert (GET_MODE (op) == innermode
5860 || GET_MODE (op) == VOIDmode);
5862 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5863 return NULL_RTX;
5865 if (byte >= GET_MODE_SIZE (innermode))
5866 return NULL_RTX;
5868 if (outermode == innermode && !byte)
5869 return op;
5871 if (CONST_SCALAR_INT_P (op)
5872 || CONST_DOUBLE_AS_FLOAT_P (op)
5873 || GET_CODE (op) == CONST_FIXED
5874 || GET_CODE (op) == CONST_VECTOR)
5875 return simplify_immed_subreg (outermode, op, innermode, byte);
5877 /* Changing mode twice with SUBREG => just change it once,
5878 or not at all if changing back op starting mode. */
5879 if (GET_CODE (op) == SUBREG)
5881 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5882 int final_offset = byte + SUBREG_BYTE (op);
5883 rtx newx;
5885 if (outermode == innermostmode
5886 && byte == 0 && SUBREG_BYTE (op) == 0)
5887 return SUBREG_REG (op);
5889 /* The SUBREG_BYTE represents offset, as if the value were stored
5890 in memory. Irritating exception is paradoxical subreg, where
5891 we define SUBREG_BYTE to be 0. On big endian machines, this
5892 value should be negative. For a moment, undo this exception. */
5893 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5895 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5896 if (WORDS_BIG_ENDIAN)
5897 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5898 if (BYTES_BIG_ENDIAN)
5899 final_offset += difference % UNITS_PER_WORD;
5901 if (SUBREG_BYTE (op) == 0
5902 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5904 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5905 if (WORDS_BIG_ENDIAN)
5906 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5907 if (BYTES_BIG_ENDIAN)
5908 final_offset += difference % UNITS_PER_WORD;
5911 /* See whether resulting subreg will be paradoxical. */
5912 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5914 /* In nonparadoxical subregs we can't handle negative offsets. */
5915 if (final_offset < 0)
5916 return NULL_RTX;
5917 /* Bail out in case resulting subreg would be incorrect. */
5918 if (final_offset % GET_MODE_SIZE (outermode)
5919 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5920 return NULL_RTX;
5922 else
5924 int offset = 0;
5925 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5927 /* In paradoxical subreg, see if we are still looking on lower part.
5928 If so, our SUBREG_BYTE will be 0. */
5929 if (WORDS_BIG_ENDIAN)
5930 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5931 if (BYTES_BIG_ENDIAN)
5932 offset += difference % UNITS_PER_WORD;
5933 if (offset == final_offset)
5934 final_offset = 0;
5935 else
5936 return NULL_RTX;
5939 /* Recurse for further possible simplifications. */
5940 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5941 final_offset);
5942 if (newx)
5943 return newx;
5944 if (validate_subreg (outermode, innermostmode,
5945 SUBREG_REG (op), final_offset))
5947 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5948 if (SUBREG_PROMOTED_VAR_P (op)
5949 && SUBREG_PROMOTED_SIGN (op) >= 0
5950 && GET_MODE_CLASS (outermode) == MODE_INT
5951 && IN_RANGE (GET_MODE_SIZE (outermode),
5952 GET_MODE_SIZE (innermode),
5953 GET_MODE_SIZE (innermostmode))
5954 && subreg_lowpart_p (newx))
5956 SUBREG_PROMOTED_VAR_P (newx) = 1;
5957 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5959 return newx;
5961 return NULL_RTX;
5964 /* SUBREG of a hard register => just change the register number
5965 and/or mode. If the hard register is not valid in that mode,
5966 suppress this simplification. If the hard register is the stack,
5967 frame, or argument pointer, leave this as a SUBREG. */
5969 if (REG_P (op) && HARD_REGISTER_P (op))
5971 unsigned int regno, final_regno;
5973 regno = REGNO (op);
5974 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5975 if (HARD_REGISTER_NUM_P (final_regno))
5977 rtx x;
5978 int final_offset = byte;
5980 /* Adjust offset for paradoxical subregs. */
5981 if (byte == 0
5982 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5984 int difference = (GET_MODE_SIZE (innermode)
5985 - GET_MODE_SIZE (outermode));
5986 if (WORDS_BIG_ENDIAN)
5987 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5988 if (BYTES_BIG_ENDIAN)
5989 final_offset += difference % UNITS_PER_WORD;
5992 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5994 /* Propagate original regno. We don't have any way to specify
5995 the offset inside original regno, so do so only for lowpart.
5996 The information is used only by alias analysis that can not
5997 grog partial register anyway. */
5999 if (subreg_lowpart_offset (outermode, innermode) == byte)
6000 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6001 return x;
6005 /* If we have a SUBREG of a register that we are replacing and we are
6006 replacing it with a MEM, make a new MEM and try replacing the
6007 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6008 or if we would be widening it. */
6010 if (MEM_P (op)
6011 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6012 /* Allow splitting of volatile memory references in case we don't
6013 have instruction to move the whole thing. */
6014 && (! MEM_VOLATILE_P (op)
6015 || ! have_insn_for (SET, innermode))
6016 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6017 return adjust_address_nv (op, outermode, byte);
6019 /* Handle complex values represented as CONCAT
6020 of real and imaginary part. */
6021 if (GET_CODE (op) == CONCAT)
6023 unsigned int part_size, final_offset;
6024 rtx part, res;
6026 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
6027 if (byte < part_size)
6029 part = XEXP (op, 0);
6030 final_offset = byte;
6032 else
6034 part = XEXP (op, 1);
6035 final_offset = byte - part_size;
6038 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6039 return NULL_RTX;
6041 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
6042 if (res)
6043 return res;
6044 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
6045 return gen_rtx_SUBREG (outermode, part, final_offset);
6046 return NULL_RTX;
6049 /* A SUBREG resulting from a zero extension may fold to zero if
6050 it extracts higher bits that the ZERO_EXTEND's source bits. */
6051 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6053 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6054 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6055 return CONST0_RTX (outermode);
6058 if (SCALAR_INT_MODE_P (outermode)
6059 && SCALAR_INT_MODE_P (innermode)
6060 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6061 && byte == subreg_lowpart_offset (outermode, innermode))
6063 rtx tem = simplify_truncation (outermode, op, innermode);
6064 if (tem)
6065 return tem;
6068 return NULL_RTX;
6071 /* Make a SUBREG operation or equivalent if it folds. */
6074 simplify_gen_subreg (machine_mode outermode, rtx op,
6075 machine_mode innermode, unsigned int byte)
6077 rtx newx;
6079 newx = simplify_subreg (outermode, op, innermode, byte);
6080 if (newx)
6081 return newx;
6083 if (GET_CODE (op) == SUBREG
6084 || GET_CODE (op) == CONCAT
6085 || GET_MODE (op) == VOIDmode)
6086 return NULL_RTX;
6088 if (validate_subreg (outermode, innermode, op, byte))
6089 return gen_rtx_SUBREG (outermode, op, byte);
6091 return NULL_RTX;
6094 /* Generates a subreg to get the least significant part of EXPR (in mode
6095 INNER_MODE) to OUTER_MODE. */
6098 lowpart_subreg (machine_mode outer_mode, rtx expr,
6099 machine_mode inner_mode)
6101 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6102 subreg_lowpart_offset (outer_mode, inner_mode));
6105 /* Simplify X, an rtx expression.
6107 Return the simplified expression or NULL if no simplifications
6108 were possible.
6110 This is the preferred entry point into the simplification routines;
6111 however, we still allow passes to call the more specific routines.
6113 Right now GCC has three (yes, three) major bodies of RTL simplification
6114 code that need to be unified.
6116 1. fold_rtx in cse.c. This code uses various CSE specific
6117 information to aid in RTL simplification.
6119 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6120 it uses combine specific information to aid in RTL
6121 simplification.
6123 3. The routines in this file.
6126 Long term we want to only have one body of simplification code; to
6127 get to that state I recommend the following steps:
6129 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6130 which are not pass dependent state into these routines.
6132 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6133 use this routine whenever possible.
6135 3. Allow for pass dependent state to be provided to these
6136 routines and add simplifications based on the pass dependent
6137 state. Remove code from cse.c & combine.c that becomes
6138 redundant/dead.
6140 It will take time, but ultimately the compiler will be easier to
6141 maintain and improve. It's totally silly that when we add a
6142 simplification that it needs to be added to 4 places (3 for RTL
6143 simplification and 1 for tree simplification. */
6146 simplify_rtx (const_rtx x)
6148 const enum rtx_code code = GET_CODE (x);
6149 const machine_mode mode = GET_MODE (x);
6151 switch (GET_RTX_CLASS (code))
6153 case RTX_UNARY:
6154 return simplify_unary_operation (code, mode,
6155 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6156 case RTX_COMM_ARITH:
6157 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6158 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6160 /* Fall through.... */
6162 case RTX_BIN_ARITH:
6163 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6165 case RTX_TERNARY:
6166 case RTX_BITFIELD_OPS:
6167 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6168 XEXP (x, 0), XEXP (x, 1),
6169 XEXP (x, 2));
6171 case RTX_COMPARE:
6172 case RTX_COMM_COMPARE:
6173 return simplify_relational_operation (code, mode,
6174 ((GET_MODE (XEXP (x, 0))
6175 != VOIDmode)
6176 ? GET_MODE (XEXP (x, 0))
6177 : GET_MODE (XEXP (x, 1))),
6178 XEXP (x, 0),
6179 XEXP (x, 1));
6181 case RTX_EXTRA:
6182 if (code == SUBREG)
6183 return simplify_subreg (mode, SUBREG_REG (x),
6184 GET_MODE (SUBREG_REG (x)),
6185 SUBREG_BYTE (x));
6186 break;
6188 case RTX_OBJ:
6189 if (code == LO_SUM)
6191 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6192 if (GET_CODE (XEXP (x, 0)) == HIGH
6193 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6194 return XEXP (x, 1);
6196 break;
6198 default:
6199 break;
6201 return NULL;