2016-09-25 François Dumont <fdumont@gcc.gnu.org>
[official-gcc.git] / gcc / simplify-rtx.c
blob67902792cdf96d341173aa2404ce8e4f0fafd8b3
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "optabs.h"
30 #include "emit-rtl.h"
31 #include "recog.h"
32 #include "diagnostic-core.h"
33 #include "varasm.h"
34 #include "flags.h"
36 /* Simplification and canonicalization of RTL. */
38 /* Much code operates on (low, high) pairs; the low value is an
39 unsigned wide int, the high value a signed wide int. We
40 occasionally need to sign extend from low to high as if low were a
41 signed wide int. */
42 #define HWI_SIGN_EXTEND(low) \
43 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
45 static rtx neg_const_int (machine_mode, const_rtx);
46 static bool plus_minus_operand_p (const_rtx);
47 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
48 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
49 unsigned int);
50 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
51 rtx, rtx);
52 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
53 machine_mode, rtx, rtx);
54 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
55 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
56 rtx, rtx, rtx, rtx);
58 /* Negate a CONST_INT rtx, truncating (because a conversion from a
59 maximally negative number can overflow). */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
63 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
66 /* Test whether expression, X, is an immediate constant that represents
67 the most significant bit of machine mode MODE. */
69 bool
70 mode_signbit_p (machine_mode mode, const_rtx x)
72 unsigned HOST_WIDE_INT val;
73 unsigned int width;
75 if (GET_MODE_CLASS (mode) != MODE_INT)
76 return false;
78 width = GET_MODE_PRECISION (mode);
79 if (width == 0)
80 return false;
82 if (width <= HOST_BITS_PER_WIDE_INT
83 && CONST_INT_P (x))
84 val = INTVAL (x);
85 #if TARGET_SUPPORTS_WIDE_INT
86 else if (CONST_WIDE_INT_P (x))
88 unsigned int i;
89 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
90 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
91 return false;
92 for (i = 0; i < elts - 1; i++)
93 if (CONST_WIDE_INT_ELT (x, i) != 0)
94 return false;
95 val = CONST_WIDE_INT_ELT (x, elts - 1);
96 width %= HOST_BITS_PER_WIDE_INT;
97 if (width == 0)
98 width = HOST_BITS_PER_WIDE_INT;
100 #else
101 else if (width <= HOST_BITS_PER_DOUBLE_INT
102 && CONST_DOUBLE_AS_INT_P (x)
103 && CONST_DOUBLE_LOW (x) == 0)
105 val = CONST_DOUBLE_HIGH (x);
106 width -= HOST_BITS_PER_WIDE_INT;
108 #endif
109 else
110 /* X is not an integer constant. */
111 return false;
113 if (width < HOST_BITS_PER_WIDE_INT)
114 val &= (HOST_WIDE_INT_1U << width) - 1;
115 return val == (HOST_WIDE_INT_1U << (width - 1));
118 /* Test whether VAL is equal to the most significant bit of mode MODE
119 (after masking with the mode mask of MODE). Returns false if the
120 precision of MODE is too large to handle. */
122 bool
123 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
125 unsigned int width;
127 if (GET_MODE_CLASS (mode) != MODE_INT)
128 return false;
130 width = GET_MODE_PRECISION (mode);
131 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
132 return false;
134 val &= GET_MODE_MASK (mode);
135 return val == (HOST_WIDE_INT_1U << (width - 1));
138 /* Test whether the most significant bit of mode MODE is set in VAL.
139 Returns false if the precision of MODE is too large to handle. */
140 bool
141 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
143 unsigned int width;
145 if (GET_MODE_CLASS (mode) != MODE_INT)
146 return false;
148 width = GET_MODE_PRECISION (mode);
149 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
150 return false;
152 val &= HOST_WIDE_INT_1U << (width - 1);
153 return val != 0;
156 /* Test whether the most significant bit of mode MODE is clear in VAL.
157 Returns false if the precision of MODE is too large to handle. */
158 bool
159 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
161 unsigned int width;
163 if (GET_MODE_CLASS (mode) != MODE_INT)
164 return false;
166 width = GET_MODE_PRECISION (mode);
167 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
168 return false;
170 val &= HOST_WIDE_INT_1U << (width - 1);
171 return val == 0;
174 /* Make a binary operation by properly ordering the operands and
175 seeing if the expression folds. */
178 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
179 rtx op1)
181 rtx tem;
183 /* If this simplifies, do it. */
184 tem = simplify_binary_operation (code, mode, op0, op1);
185 if (tem)
186 return tem;
188 /* Put complex operands first and constants second if commutative. */
189 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
190 && swap_commutative_operands_p (op0, op1))
191 std::swap (op0, op1);
193 return gen_rtx_fmt_ee (code, mode, op0, op1);
196 /* If X is a MEM referencing the constant pool, return the real value.
197 Otherwise return X. */
199 avoid_constant_pool_reference (rtx x)
201 rtx c, tmp, addr;
202 machine_mode cmode;
203 HOST_WIDE_INT offset = 0;
205 switch (GET_CODE (x))
207 case MEM:
208 break;
210 case FLOAT_EXTEND:
211 /* Handle float extensions of constant pool references. */
212 tmp = XEXP (x, 0);
213 c = avoid_constant_pool_reference (tmp);
214 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
215 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
216 GET_MODE (x));
217 return x;
219 default:
220 return x;
223 if (GET_MODE (x) == BLKmode)
224 return x;
226 addr = XEXP (x, 0);
228 /* Call target hook to avoid the effects of -fpic etc.... */
229 addr = targetm.delegitimize_address (addr);
231 /* Split the address into a base and integer offset. */
232 if (GET_CODE (addr) == CONST
233 && GET_CODE (XEXP (addr, 0)) == PLUS
234 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
236 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
237 addr = XEXP (XEXP (addr, 0), 0);
240 if (GET_CODE (addr) == LO_SUM)
241 addr = XEXP (addr, 1);
243 /* If this is a constant pool reference, we can turn it into its
244 constant and hope that simplifications happen. */
245 if (GET_CODE (addr) == SYMBOL_REF
246 && CONSTANT_POOL_ADDRESS_P (addr))
248 c = get_pool_constant (addr);
249 cmode = get_pool_mode (addr);
251 /* If we're accessing the constant in a different mode than it was
252 originally stored, attempt to fix that up via subreg simplifications.
253 If that fails we have no choice but to return the original memory. */
254 if (offset == 0 && cmode == GET_MODE (x))
255 return c;
256 else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
258 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
259 if (tem && CONSTANT_P (tem))
260 return tem;
264 return x;
267 /* Simplify a MEM based on its attributes. This is the default
268 delegitimize_address target hook, and it's recommended that every
269 overrider call it. */
272 delegitimize_mem_from_attrs (rtx x)
274 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
275 use their base addresses as equivalent. */
276 if (MEM_P (x)
277 && MEM_EXPR (x)
278 && MEM_OFFSET_KNOWN_P (x))
280 tree decl = MEM_EXPR (x);
281 machine_mode mode = GET_MODE (x);
282 HOST_WIDE_INT offset = 0;
284 switch (TREE_CODE (decl))
286 default:
287 decl = NULL;
288 break;
290 case VAR_DECL:
291 break;
293 case ARRAY_REF:
294 case ARRAY_RANGE_REF:
295 case COMPONENT_REF:
296 case BIT_FIELD_REF:
297 case REALPART_EXPR:
298 case IMAGPART_EXPR:
299 case VIEW_CONVERT_EXPR:
301 HOST_WIDE_INT bitsize, bitpos;
302 tree toffset;
303 int unsignedp, reversep, volatilep = 0;
305 decl
306 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
307 &unsignedp, &reversep, &volatilep);
308 if (bitsize != GET_MODE_BITSIZE (mode)
309 || (bitpos % BITS_PER_UNIT)
310 || (toffset && !tree_fits_shwi_p (toffset)))
311 decl = NULL;
312 else
314 offset += bitpos / BITS_PER_UNIT;
315 if (toffset)
316 offset += tree_to_shwi (toffset);
318 break;
322 if (decl
323 && mode == GET_MODE (x)
324 && TREE_CODE (decl) == VAR_DECL
325 && (TREE_STATIC (decl)
326 || DECL_THREAD_LOCAL_P (decl))
327 && DECL_RTL_SET_P (decl)
328 && MEM_P (DECL_RTL (decl)))
330 rtx newx;
332 offset += MEM_OFFSET (x);
334 newx = DECL_RTL (decl);
336 if (MEM_P (newx))
338 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
340 /* Avoid creating a new MEM needlessly if we already had
341 the same address. We do if there's no OFFSET and the
342 old address X is identical to NEWX, or if X is of the
343 form (plus NEWX OFFSET), or the NEWX is of the form
344 (plus Y (const_int Z)) and X is that with the offset
345 added: (plus Y (const_int Z+OFFSET)). */
346 if (!((offset == 0
347 || (GET_CODE (o) == PLUS
348 && GET_CODE (XEXP (o, 1)) == CONST_INT
349 && (offset == INTVAL (XEXP (o, 1))
350 || (GET_CODE (n) == PLUS
351 && GET_CODE (XEXP (n, 1)) == CONST_INT
352 && (INTVAL (XEXP (n, 1)) + offset
353 == INTVAL (XEXP (o, 1)))
354 && (n = XEXP (n, 0))))
355 && (o = XEXP (o, 0))))
356 && rtx_equal_p (o, n)))
357 x = adjust_address_nv (newx, mode, offset);
359 else if (GET_MODE (x) == GET_MODE (newx)
360 && offset == 0)
361 x = newx;
365 return x;
368 /* Make a unary operation by first seeing if it folds and otherwise making
369 the specified operation. */
372 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
373 machine_mode op_mode)
375 rtx tem;
377 /* If this simplifies, use it. */
378 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
379 return tem;
381 return gen_rtx_fmt_e (code, mode, op);
384 /* Likewise for ternary operations. */
387 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
388 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
390 rtx tem;
392 /* If this simplifies, use it. */
393 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
394 op0, op1, op2)))
395 return tem;
397 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
400 /* Likewise, for relational operations.
401 CMP_MODE specifies mode comparison is done in. */
404 simplify_gen_relational (enum rtx_code code, machine_mode mode,
405 machine_mode cmp_mode, rtx op0, rtx op1)
407 rtx tem;
409 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
410 op0, op1)))
411 return tem;
413 return gen_rtx_fmt_ee (code, mode, op0, op1);
416 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
417 and simplify the result. If FN is non-NULL, call this callback on each
418 X, if it returns non-NULL, replace X with its return value and simplify the
419 result. */
422 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
423 rtx (*fn) (rtx, const_rtx, void *), void *data)
425 enum rtx_code code = GET_CODE (x);
426 machine_mode mode = GET_MODE (x);
427 machine_mode op_mode;
428 const char *fmt;
429 rtx op0, op1, op2, newx, op;
430 rtvec vec, newvec;
431 int i, j;
433 if (__builtin_expect (fn != NULL, 0))
435 newx = fn (x, old_rtx, data);
436 if (newx)
437 return newx;
439 else if (rtx_equal_p (x, old_rtx))
440 return copy_rtx ((rtx) data);
442 switch (GET_RTX_CLASS (code))
444 case RTX_UNARY:
445 op0 = XEXP (x, 0);
446 op_mode = GET_MODE (op0);
447 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
448 if (op0 == XEXP (x, 0))
449 return x;
450 return simplify_gen_unary (code, mode, op0, op_mode);
452 case RTX_BIN_ARITH:
453 case RTX_COMM_ARITH:
454 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
455 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
456 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
457 return x;
458 return simplify_gen_binary (code, mode, op0, op1);
460 case RTX_COMPARE:
461 case RTX_COMM_COMPARE:
462 op0 = XEXP (x, 0);
463 op1 = XEXP (x, 1);
464 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
465 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
466 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
467 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
468 return x;
469 return simplify_gen_relational (code, mode, op_mode, op0, op1);
471 case RTX_TERNARY:
472 case RTX_BITFIELD_OPS:
473 op0 = XEXP (x, 0);
474 op_mode = GET_MODE (op0);
475 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
476 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
477 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
478 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
479 return x;
480 if (op_mode == VOIDmode)
481 op_mode = GET_MODE (op0);
482 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
484 case RTX_EXTRA:
485 if (code == SUBREG)
487 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
488 if (op0 == SUBREG_REG (x))
489 return x;
490 op0 = simplify_gen_subreg (GET_MODE (x), op0,
491 GET_MODE (SUBREG_REG (x)),
492 SUBREG_BYTE (x));
493 return op0 ? op0 : x;
495 break;
497 case RTX_OBJ:
498 if (code == MEM)
500 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
501 if (op0 == XEXP (x, 0))
502 return x;
503 return replace_equiv_address_nv (x, op0);
505 else if (code == LO_SUM)
507 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
508 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
510 /* (lo_sum (high x) y) -> y where x and y have the same base. */
511 if (GET_CODE (op0) == HIGH)
513 rtx base0, base1, offset0, offset1;
514 split_const (XEXP (op0, 0), &base0, &offset0);
515 split_const (op1, &base1, &offset1);
516 if (rtx_equal_p (base0, base1))
517 return op1;
520 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
521 return x;
522 return gen_rtx_LO_SUM (mode, op0, op1);
524 break;
526 default:
527 break;
530 newx = x;
531 fmt = GET_RTX_FORMAT (code);
532 for (i = 0; fmt[i]; i++)
533 switch (fmt[i])
535 case 'E':
536 vec = XVEC (x, i);
537 newvec = XVEC (newx, i);
538 for (j = 0; j < GET_NUM_ELEM (vec); j++)
540 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
541 old_rtx, fn, data);
542 if (op != RTVEC_ELT (vec, j))
544 if (newvec == vec)
546 newvec = shallow_copy_rtvec (vec);
547 if (x == newx)
548 newx = shallow_copy_rtx (x);
549 XVEC (newx, i) = newvec;
551 RTVEC_ELT (newvec, j) = op;
554 break;
556 case 'e':
557 if (XEXP (x, i))
559 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
560 if (op != XEXP (x, i))
562 if (x == newx)
563 newx = shallow_copy_rtx (x);
564 XEXP (newx, i) = op;
567 break;
569 return newx;
572 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
573 resulting RTX. Return a new RTX which is as simplified as possible. */
576 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
578 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
581 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
582 Only handle cases where the truncated value is inherently an rvalue.
584 RTL provides two ways of truncating a value:
586 1. a lowpart subreg. This form is only a truncation when both
587 the outer and inner modes (here MODE and OP_MODE respectively)
588 are scalar integers, and only then when the subreg is used as
589 an rvalue.
591 It is only valid to form such truncating subregs if the
592 truncation requires no action by the target. The onus for
593 proving this is on the creator of the subreg -- e.g. the
594 caller to simplify_subreg or simplify_gen_subreg -- and typically
595 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
597 2. a TRUNCATE. This form handles both scalar and compound integers.
599 The first form is preferred where valid. However, the TRUNCATE
600 handling in simplify_unary_operation turns the second form into the
601 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
602 so it is generally safe to form rvalue truncations using:
604 simplify_gen_unary (TRUNCATE, ...)
606 and leave simplify_unary_operation to work out which representation
607 should be used.
609 Because of the proof requirements on (1), simplify_truncation must
610 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
611 regardless of whether the outer truncation came from a SUBREG or a
612 TRUNCATE. For example, if the caller has proven that an SImode
613 truncation of:
615 (and:DI X Y)
617 is a no-op and can be represented as a subreg, it does not follow
618 that SImode truncations of X and Y are also no-ops. On a target
619 like 64-bit MIPS that requires SImode values to be stored in
620 sign-extended form, an SImode truncation of:
622 (and:DI (reg:DI X) (const_int 63))
624 is trivially a no-op because only the lower 6 bits can be set.
625 However, X is still an arbitrary 64-bit number and so we cannot
626 assume that truncating it too is a no-op. */
628 static rtx
629 simplify_truncation (machine_mode mode, rtx op,
630 machine_mode op_mode)
632 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
633 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
634 gcc_assert (precision <= op_precision);
636 /* Optimize truncations of zero and sign extended values. */
637 if (GET_CODE (op) == ZERO_EXTEND
638 || GET_CODE (op) == SIGN_EXTEND)
640 /* There are three possibilities. If MODE is the same as the
641 origmode, we can omit both the extension and the subreg.
642 If MODE is not larger than the origmode, we can apply the
643 truncation without the extension. Finally, if the outermode
644 is larger than the origmode, we can just extend to the appropriate
645 mode. */
646 machine_mode origmode = GET_MODE (XEXP (op, 0));
647 if (mode == origmode)
648 return XEXP (op, 0);
649 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
650 return simplify_gen_unary (TRUNCATE, mode,
651 XEXP (op, 0), origmode);
652 else
653 return simplify_gen_unary (GET_CODE (op), mode,
654 XEXP (op, 0), origmode);
657 /* If the machine can perform operations in the truncated mode, distribute
658 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
659 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
660 if (1
661 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
662 && (GET_CODE (op) == PLUS
663 || GET_CODE (op) == MINUS
664 || GET_CODE (op) == MULT))
666 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
667 if (op0)
669 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
670 if (op1)
671 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
675 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
676 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
677 the outer subreg is effectively a truncation to the original mode. */
678 if ((GET_CODE (op) == LSHIFTRT
679 || GET_CODE (op) == ASHIFTRT)
680 /* Ensure that OP_MODE is at least twice as wide as MODE
681 to avoid the possibility that an outer LSHIFTRT shifts by more
682 than the sign extension's sign_bit_copies and introduces zeros
683 into the high bits of the result. */
684 && 2 * precision <= op_precision
685 && CONST_INT_P (XEXP (op, 1))
686 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
687 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
688 && UINTVAL (XEXP (op, 1)) < precision)
689 return simplify_gen_binary (ASHIFTRT, mode,
690 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
692 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
693 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
694 the outer subreg is effectively a truncation to the original mode. */
695 if ((GET_CODE (op) == LSHIFTRT
696 || GET_CODE (op) == ASHIFTRT)
697 && CONST_INT_P (XEXP (op, 1))
698 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
699 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
700 && UINTVAL (XEXP (op, 1)) < precision)
701 return simplify_gen_binary (LSHIFTRT, mode,
702 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
704 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
705 to (ashift:QI (x:QI) C), where C is a suitable small constant and
706 the outer subreg is effectively a truncation to the original mode. */
707 if (GET_CODE (op) == ASHIFT
708 && CONST_INT_P (XEXP (op, 1))
709 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
710 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
711 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
712 && UINTVAL (XEXP (op, 1)) < precision)
713 return simplify_gen_binary (ASHIFT, mode,
714 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
716 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
717 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
718 and C2. */
719 if (GET_CODE (op) == AND
720 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
721 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
722 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
723 && CONST_INT_P (XEXP (op, 1)))
725 rtx op0 = (XEXP (XEXP (op, 0), 0));
726 rtx shift_op = XEXP (XEXP (op, 0), 1);
727 rtx mask_op = XEXP (op, 1);
728 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
729 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
731 if (shift < precision
732 /* If doing this transform works for an X with all bits set,
733 it works for any X. */
734 && ((GET_MODE_MASK (mode) >> shift) & mask)
735 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
736 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
737 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
739 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
740 return simplify_gen_binary (AND, mode, op0, mask_op);
744 /* Recognize a word extraction from a multi-word subreg. */
745 if ((GET_CODE (op) == LSHIFTRT
746 || GET_CODE (op) == ASHIFTRT)
747 && SCALAR_INT_MODE_P (mode)
748 && SCALAR_INT_MODE_P (op_mode)
749 && precision >= BITS_PER_WORD
750 && 2 * precision <= op_precision
751 && CONST_INT_P (XEXP (op, 1))
752 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
753 && UINTVAL (XEXP (op, 1)) < op_precision)
755 int byte = subreg_lowpart_offset (mode, op_mode);
756 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
757 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
758 (WORDS_BIG_ENDIAN
759 ? byte - shifted_bytes
760 : byte + shifted_bytes));
763 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
764 and try replacing the TRUNCATE and shift with it. Don't do this
765 if the MEM has a mode-dependent address. */
766 if ((GET_CODE (op) == LSHIFTRT
767 || GET_CODE (op) == ASHIFTRT)
768 && SCALAR_INT_MODE_P (op_mode)
769 && MEM_P (XEXP (op, 0))
770 && CONST_INT_P (XEXP (op, 1))
771 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
772 && INTVAL (XEXP (op, 1)) > 0
773 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
774 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
775 MEM_ADDR_SPACE (XEXP (op, 0)))
776 && ! MEM_VOLATILE_P (XEXP (op, 0))
777 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
778 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
780 int byte = subreg_lowpart_offset (mode, op_mode);
781 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
782 return adjust_address_nv (XEXP (op, 0), mode,
783 (WORDS_BIG_ENDIAN
784 ? byte - shifted_bytes
785 : byte + shifted_bytes));
788 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
789 (OP:SI foo:SI) if OP is NEG or ABS. */
790 if ((GET_CODE (op) == ABS
791 || GET_CODE (op) == NEG)
792 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
793 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
794 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
795 return simplify_gen_unary (GET_CODE (op), mode,
796 XEXP (XEXP (op, 0), 0), mode);
798 /* (truncate:A (subreg:B (truncate:C X) 0)) is
799 (truncate:A X). */
800 if (GET_CODE (op) == SUBREG
801 && SCALAR_INT_MODE_P (mode)
802 && SCALAR_INT_MODE_P (op_mode)
803 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
804 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
805 && subreg_lowpart_p (op))
807 rtx inner = XEXP (SUBREG_REG (op), 0);
808 if (GET_MODE_PRECISION (mode)
809 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
810 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
811 else
812 /* If subreg above is paradoxical and C is narrower
813 than A, return (subreg:A (truncate:C X) 0). */
814 return simplify_gen_subreg (mode, SUBREG_REG (op),
815 GET_MODE (SUBREG_REG (op)), 0);
818 /* (truncate:A (truncate:B X)) is (truncate:A X). */
819 if (GET_CODE (op) == TRUNCATE)
820 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
821 GET_MODE (XEXP (op, 0)));
823 return NULL_RTX;
826 /* Try to simplify a unary operation CODE whose output mode is to be
827 MODE with input operand OP whose mode was originally OP_MODE.
828 Return zero if no simplification can be made. */
830 simplify_unary_operation (enum rtx_code code, machine_mode mode,
831 rtx op, machine_mode op_mode)
833 rtx trueop, tem;
835 trueop = avoid_constant_pool_reference (op);
837 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
838 if (tem)
839 return tem;
841 return simplify_unary_operation_1 (code, mode, op);
844 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
845 to be exact. */
847 static bool
848 exact_int_to_float_conversion_p (const_rtx op)
850 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
851 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
852 /* Constants shouldn't reach here. */
853 gcc_assert (op0_mode != VOIDmode);
854 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
855 int in_bits = in_prec;
856 if (HWI_COMPUTABLE_MODE_P (op0_mode))
858 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
859 if (GET_CODE (op) == FLOAT)
860 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
861 else if (GET_CODE (op) == UNSIGNED_FLOAT)
862 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
863 else
864 gcc_unreachable ();
865 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
867 return in_bits <= out_bits;
870 /* Perform some simplifications we can do even if the operands
871 aren't constant. */
872 static rtx
873 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
875 enum rtx_code reversed;
876 rtx temp;
878 switch (code)
880 case NOT:
881 /* (not (not X)) == X. */
882 if (GET_CODE (op) == NOT)
883 return XEXP (op, 0);
885 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
886 comparison is all ones. */
887 if (COMPARISON_P (op)
888 && (mode == BImode || STORE_FLAG_VALUE == -1)
889 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
890 return simplify_gen_relational (reversed, mode, VOIDmode,
891 XEXP (op, 0), XEXP (op, 1));
893 /* (not (plus X -1)) can become (neg X). */
894 if (GET_CODE (op) == PLUS
895 && XEXP (op, 1) == constm1_rtx)
896 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
898 /* Similarly, (not (neg X)) is (plus X -1). */
899 if (GET_CODE (op) == NEG)
900 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
901 CONSTM1_RTX (mode));
903 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
904 if (GET_CODE (op) == XOR
905 && CONST_INT_P (XEXP (op, 1))
906 && (temp = simplify_unary_operation (NOT, mode,
907 XEXP (op, 1), mode)) != 0)
908 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
910 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
911 if (GET_CODE (op) == PLUS
912 && CONST_INT_P (XEXP (op, 1))
913 && mode_signbit_p (mode, XEXP (op, 1))
914 && (temp = simplify_unary_operation (NOT, mode,
915 XEXP (op, 1), mode)) != 0)
916 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
919 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
920 operands other than 1, but that is not valid. We could do a
921 similar simplification for (not (lshiftrt C X)) where C is
922 just the sign bit, but this doesn't seem common enough to
923 bother with. */
924 if (GET_CODE (op) == ASHIFT
925 && XEXP (op, 0) == const1_rtx)
927 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
928 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
931 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
932 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
933 so we can perform the above simplification. */
934 if (STORE_FLAG_VALUE == -1
935 && GET_CODE (op) == ASHIFTRT
936 && CONST_INT_P (XEXP (op, 1))
937 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
938 return simplify_gen_relational (GE, mode, VOIDmode,
939 XEXP (op, 0), const0_rtx);
942 if (GET_CODE (op) == SUBREG
943 && subreg_lowpart_p (op)
944 && (GET_MODE_SIZE (GET_MODE (op))
945 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
946 && GET_CODE (SUBREG_REG (op)) == ASHIFT
947 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
949 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
950 rtx x;
952 x = gen_rtx_ROTATE (inner_mode,
953 simplify_gen_unary (NOT, inner_mode, const1_rtx,
954 inner_mode),
955 XEXP (SUBREG_REG (op), 1));
956 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
957 if (temp)
958 return temp;
961 /* Apply De Morgan's laws to reduce number of patterns for machines
962 with negating logical insns (and-not, nand, etc.). If result has
963 only one NOT, put it first, since that is how the patterns are
964 coded. */
965 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
967 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
968 machine_mode op_mode;
970 op_mode = GET_MODE (in1);
971 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
973 op_mode = GET_MODE (in2);
974 if (op_mode == VOIDmode)
975 op_mode = mode;
976 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
978 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
979 std::swap (in1, in2);
981 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
982 mode, in1, in2);
985 /* (not (bswap x)) -> (bswap (not x)). */
986 if (GET_CODE (op) == BSWAP)
988 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
989 return simplify_gen_unary (BSWAP, mode, x, mode);
991 break;
993 case NEG:
994 /* (neg (neg X)) == X. */
995 if (GET_CODE (op) == NEG)
996 return XEXP (op, 0);
998 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
999 If comparison is not reversible use
1000 x ? y : (neg y). */
1001 if (GET_CODE (op) == IF_THEN_ELSE)
1003 rtx cond = XEXP (op, 0);
1004 rtx true_rtx = XEXP (op, 1);
1005 rtx false_rtx = XEXP (op, 2);
1007 if ((GET_CODE (true_rtx) == NEG
1008 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1009 || (GET_CODE (false_rtx) == NEG
1010 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1012 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1013 temp = reversed_comparison (cond, mode);
1014 else
1016 temp = cond;
1017 std::swap (true_rtx, false_rtx);
1019 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1020 mode, temp, true_rtx, false_rtx);
1024 /* (neg (plus X 1)) can become (not X). */
1025 if (GET_CODE (op) == PLUS
1026 && XEXP (op, 1) == const1_rtx)
1027 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1029 /* Similarly, (neg (not X)) is (plus X 1). */
1030 if (GET_CODE (op) == NOT)
1031 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1032 CONST1_RTX (mode));
1034 /* (neg (minus X Y)) can become (minus Y X). This transformation
1035 isn't safe for modes with signed zeros, since if X and Y are
1036 both +0, (minus Y X) is the same as (minus X Y). If the
1037 rounding mode is towards +infinity (or -infinity) then the two
1038 expressions will be rounded differently. */
1039 if (GET_CODE (op) == MINUS
1040 && !HONOR_SIGNED_ZEROS (mode)
1041 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1042 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1044 if (GET_CODE (op) == PLUS
1045 && !HONOR_SIGNED_ZEROS (mode)
1046 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1048 /* (neg (plus A C)) is simplified to (minus -C A). */
1049 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1050 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1052 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1053 if (temp)
1054 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1057 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1058 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1059 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1062 /* (neg (mult A B)) becomes (mult A (neg B)).
1063 This works even for floating-point values. */
1064 if (GET_CODE (op) == MULT
1065 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1067 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1068 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1071 /* NEG commutes with ASHIFT since it is multiplication. Only do
1072 this if we can then eliminate the NEG (e.g., if the operand
1073 is a constant). */
1074 if (GET_CODE (op) == ASHIFT)
1076 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1077 if (temp)
1078 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1081 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1082 C is equal to the width of MODE minus 1. */
1083 if (GET_CODE (op) == ASHIFTRT
1084 && CONST_INT_P (XEXP (op, 1))
1085 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1086 return simplify_gen_binary (LSHIFTRT, mode,
1087 XEXP (op, 0), XEXP (op, 1));
1089 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1090 C is equal to the width of MODE minus 1. */
1091 if (GET_CODE (op) == LSHIFTRT
1092 && CONST_INT_P (XEXP (op, 1))
1093 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1094 return simplify_gen_binary (ASHIFTRT, mode,
1095 XEXP (op, 0), XEXP (op, 1));
1097 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1098 if (GET_CODE (op) == XOR
1099 && XEXP (op, 1) == const1_rtx
1100 && nonzero_bits (XEXP (op, 0), mode) == 1)
1101 return plus_constant (mode, XEXP (op, 0), -1);
1103 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1104 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1105 if (GET_CODE (op) == LT
1106 && XEXP (op, 1) == const0_rtx
1107 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1109 machine_mode inner = GET_MODE (XEXP (op, 0));
1110 int isize = GET_MODE_PRECISION (inner);
1111 if (STORE_FLAG_VALUE == 1)
1113 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1114 GEN_INT (isize - 1));
1115 if (mode == inner)
1116 return temp;
1117 if (GET_MODE_PRECISION (mode) > isize)
1118 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1119 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1121 else if (STORE_FLAG_VALUE == -1)
1123 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1124 GEN_INT (isize - 1));
1125 if (mode == inner)
1126 return temp;
1127 if (GET_MODE_PRECISION (mode) > isize)
1128 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1129 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1132 break;
1134 case TRUNCATE:
1135 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1136 with the umulXi3_highpart patterns. */
1137 if (GET_CODE (op) == LSHIFTRT
1138 && GET_CODE (XEXP (op, 0)) == MULT)
1139 break;
1141 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1143 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1145 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1146 if (temp)
1147 return temp;
1149 /* We can't handle truncation to a partial integer mode here
1150 because we don't know the real bitsize of the partial
1151 integer mode. */
1152 break;
1155 if (GET_MODE (op) != VOIDmode)
1157 temp = simplify_truncation (mode, op, GET_MODE (op));
1158 if (temp)
1159 return temp;
1162 /* If we know that the value is already truncated, we can
1163 replace the TRUNCATE with a SUBREG. */
1164 if (GET_MODE_NUNITS (mode) == 1
1165 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1166 || truncated_to_mode (mode, op)))
1168 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1169 if (temp)
1170 return temp;
1173 /* A truncate of a comparison can be replaced with a subreg if
1174 STORE_FLAG_VALUE permits. This is like the previous test,
1175 but it works even if the comparison is done in a mode larger
1176 than HOST_BITS_PER_WIDE_INT. */
1177 if (HWI_COMPUTABLE_MODE_P (mode)
1178 && COMPARISON_P (op)
1179 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1181 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1182 if (temp)
1183 return temp;
1186 /* A truncate of a memory is just loading the low part of the memory
1187 if we are not changing the meaning of the address. */
1188 if (GET_CODE (op) == MEM
1189 && !VECTOR_MODE_P (mode)
1190 && !MEM_VOLATILE_P (op)
1191 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1193 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1194 if (temp)
1195 return temp;
1198 break;
1200 case FLOAT_TRUNCATE:
1201 if (DECIMAL_FLOAT_MODE_P (mode))
1202 break;
1204 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1205 if (GET_CODE (op) == FLOAT_EXTEND
1206 && GET_MODE (XEXP (op, 0)) == mode)
1207 return XEXP (op, 0);
1209 /* (float_truncate:SF (float_truncate:DF foo:XF))
1210 = (float_truncate:SF foo:XF).
1211 This may eliminate double rounding, so it is unsafe.
1213 (float_truncate:SF (float_extend:XF foo:DF))
1214 = (float_truncate:SF foo:DF).
1216 (float_truncate:DF (float_extend:XF foo:SF))
1217 = (float_extend:DF foo:SF). */
1218 if ((GET_CODE (op) == FLOAT_TRUNCATE
1219 && flag_unsafe_math_optimizations)
1220 || GET_CODE (op) == FLOAT_EXTEND)
1221 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1222 0)))
1223 > GET_MODE_SIZE (mode)
1224 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1225 mode,
1226 XEXP (op, 0), mode);
1228 /* (float_truncate (float x)) is (float x) */
1229 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1230 && (flag_unsafe_math_optimizations
1231 || exact_int_to_float_conversion_p (op)))
1232 return simplify_gen_unary (GET_CODE (op), mode,
1233 XEXP (op, 0),
1234 GET_MODE (XEXP (op, 0)));
1236 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1237 (OP:SF foo:SF) if OP is NEG or ABS. */
1238 if ((GET_CODE (op) == ABS
1239 || GET_CODE (op) == NEG)
1240 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1241 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1242 return simplify_gen_unary (GET_CODE (op), mode,
1243 XEXP (XEXP (op, 0), 0), mode);
1245 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1246 is (float_truncate:SF x). */
1247 if (GET_CODE (op) == SUBREG
1248 && subreg_lowpart_p (op)
1249 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1250 return SUBREG_REG (op);
1251 break;
1253 case FLOAT_EXTEND:
1254 if (DECIMAL_FLOAT_MODE_P (mode))
1255 break;
1257 /* (float_extend (float_extend x)) is (float_extend x)
1259 (float_extend (float x)) is (float x) assuming that double
1260 rounding can't happen.
1262 if (GET_CODE (op) == FLOAT_EXTEND
1263 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1264 && exact_int_to_float_conversion_p (op)))
1265 return simplify_gen_unary (GET_CODE (op), mode,
1266 XEXP (op, 0),
1267 GET_MODE (XEXP (op, 0)));
1269 break;
1271 case ABS:
1272 /* (abs (neg <foo>)) -> (abs <foo>) */
1273 if (GET_CODE (op) == NEG)
1274 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1275 GET_MODE (XEXP (op, 0)));
1277 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1278 do nothing. */
1279 if (GET_MODE (op) == VOIDmode)
1280 break;
1282 /* If operand is something known to be positive, ignore the ABS. */
1283 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1284 || val_signbit_known_clear_p (GET_MODE (op),
1285 nonzero_bits (op, GET_MODE (op))))
1286 return op;
1288 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1289 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1290 return gen_rtx_NEG (mode, op);
1292 break;
1294 case FFS:
1295 /* (ffs (*_extend <X>)) = (ffs <X>) */
1296 if (GET_CODE (op) == SIGN_EXTEND
1297 || GET_CODE (op) == ZERO_EXTEND)
1298 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1299 GET_MODE (XEXP (op, 0)));
1300 break;
1302 case POPCOUNT:
1303 switch (GET_CODE (op))
1305 case BSWAP:
1306 case ZERO_EXTEND:
1307 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1308 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1309 GET_MODE (XEXP (op, 0)));
1311 case ROTATE:
1312 case ROTATERT:
1313 /* Rotations don't affect popcount. */
1314 if (!side_effects_p (XEXP (op, 1)))
1315 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1316 GET_MODE (XEXP (op, 0)));
1317 break;
1319 default:
1320 break;
1322 break;
1324 case PARITY:
1325 switch (GET_CODE (op))
1327 case NOT:
1328 case BSWAP:
1329 case ZERO_EXTEND:
1330 case SIGN_EXTEND:
1331 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1332 GET_MODE (XEXP (op, 0)));
1334 case ROTATE:
1335 case ROTATERT:
1336 /* Rotations don't affect parity. */
1337 if (!side_effects_p (XEXP (op, 1)))
1338 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1339 GET_MODE (XEXP (op, 0)));
1340 break;
1342 default:
1343 break;
1345 break;
1347 case BSWAP:
1348 /* (bswap (bswap x)) -> x. */
1349 if (GET_CODE (op) == BSWAP)
1350 return XEXP (op, 0);
1351 break;
1353 case FLOAT:
1354 /* (float (sign_extend <X>)) = (float <X>). */
1355 if (GET_CODE (op) == SIGN_EXTEND)
1356 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1357 GET_MODE (XEXP (op, 0)));
1358 break;
1360 case SIGN_EXTEND:
1361 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1362 becomes just the MINUS if its mode is MODE. This allows
1363 folding switch statements on machines using casesi (such as
1364 the VAX). */
1365 if (GET_CODE (op) == TRUNCATE
1366 && GET_MODE (XEXP (op, 0)) == mode
1367 && GET_CODE (XEXP (op, 0)) == MINUS
1368 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1369 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1370 return XEXP (op, 0);
1372 /* Extending a widening multiplication should be canonicalized to
1373 a wider widening multiplication. */
1374 if (GET_CODE (op) == MULT)
1376 rtx lhs = XEXP (op, 0);
1377 rtx rhs = XEXP (op, 1);
1378 enum rtx_code lcode = GET_CODE (lhs);
1379 enum rtx_code rcode = GET_CODE (rhs);
1381 /* Widening multiplies usually extend both operands, but sometimes
1382 they use a shift to extract a portion of a register. */
1383 if ((lcode == SIGN_EXTEND
1384 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1385 && (rcode == SIGN_EXTEND
1386 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1388 machine_mode lmode = GET_MODE (lhs);
1389 machine_mode rmode = GET_MODE (rhs);
1390 int bits;
1392 if (lcode == ASHIFTRT)
1393 /* Number of bits not shifted off the end. */
1394 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1395 else /* lcode == SIGN_EXTEND */
1396 /* Size of inner mode. */
1397 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1399 if (rcode == ASHIFTRT)
1400 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1401 else /* rcode == SIGN_EXTEND */
1402 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1404 /* We can only widen multiplies if the result is mathematiclly
1405 equivalent. I.e. if overflow was impossible. */
1406 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1407 return simplify_gen_binary
1408 (MULT, mode,
1409 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1410 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1414 /* Check for a sign extension of a subreg of a promoted
1415 variable, where the promotion is sign-extended, and the
1416 target mode is the same as the variable's promotion. */
1417 if (GET_CODE (op) == SUBREG
1418 && SUBREG_PROMOTED_VAR_P (op)
1419 && SUBREG_PROMOTED_SIGNED_P (op)
1420 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1422 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1423 if (temp)
1424 return temp;
1427 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1428 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1429 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1431 gcc_assert (GET_MODE_PRECISION (mode)
1432 > GET_MODE_PRECISION (GET_MODE (op)));
1433 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1434 GET_MODE (XEXP (op, 0)));
1437 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1438 is (sign_extend:M (subreg:O <X>)) if there is mode with
1439 GET_MODE_BITSIZE (N) - I bits.
1440 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1441 is similarly (zero_extend:M (subreg:O <X>)). */
1442 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1443 && GET_CODE (XEXP (op, 0)) == ASHIFT
1444 && CONST_INT_P (XEXP (op, 1))
1445 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1446 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1448 machine_mode tmode
1449 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1450 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1451 gcc_assert (GET_MODE_BITSIZE (mode)
1452 > GET_MODE_BITSIZE (GET_MODE (op)));
1453 if (tmode != BLKmode)
1455 rtx inner =
1456 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1457 if (inner)
1458 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1459 ? SIGN_EXTEND : ZERO_EXTEND,
1460 mode, inner, tmode);
1464 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1465 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1466 if (GET_CODE (op) == LSHIFTRT
1467 && CONST_INT_P (XEXP (op, 1))
1468 && XEXP (op, 1) != const0_rtx)
1469 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1471 #if defined(POINTERS_EXTEND_UNSIGNED)
1472 /* As we do not know which address space the pointer is referring to,
1473 we can do this only if the target does not support different pointer
1474 or address modes depending on the address space. */
1475 if (target_default_pointer_address_modes_p ()
1476 && ! POINTERS_EXTEND_UNSIGNED
1477 && mode == Pmode && GET_MODE (op) == ptr_mode
1478 && (CONSTANT_P (op)
1479 || (GET_CODE (op) == SUBREG
1480 && REG_P (SUBREG_REG (op))
1481 && REG_POINTER (SUBREG_REG (op))
1482 && GET_MODE (SUBREG_REG (op)) == Pmode))
1483 && !targetm.have_ptr_extend ())
1485 temp
1486 = convert_memory_address_addr_space_1 (Pmode, op,
1487 ADDR_SPACE_GENERIC, false,
1488 true);
1489 if (temp)
1490 return temp;
1492 #endif
1493 break;
1495 case ZERO_EXTEND:
1496 /* Check for a zero extension of a subreg of a promoted
1497 variable, where the promotion is zero-extended, and the
1498 target mode is the same as the variable's promotion. */
1499 if (GET_CODE (op) == SUBREG
1500 && SUBREG_PROMOTED_VAR_P (op)
1501 && SUBREG_PROMOTED_UNSIGNED_P (op)
1502 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1504 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1505 if (temp)
1506 return temp;
1509 /* Extending a widening multiplication should be canonicalized to
1510 a wider widening multiplication. */
1511 if (GET_CODE (op) == MULT)
1513 rtx lhs = XEXP (op, 0);
1514 rtx rhs = XEXP (op, 1);
1515 enum rtx_code lcode = GET_CODE (lhs);
1516 enum rtx_code rcode = GET_CODE (rhs);
1518 /* Widening multiplies usually extend both operands, but sometimes
1519 they use a shift to extract a portion of a register. */
1520 if ((lcode == ZERO_EXTEND
1521 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1522 && (rcode == ZERO_EXTEND
1523 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1525 machine_mode lmode = GET_MODE (lhs);
1526 machine_mode rmode = GET_MODE (rhs);
1527 int bits;
1529 if (lcode == LSHIFTRT)
1530 /* Number of bits not shifted off the end. */
1531 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1532 else /* lcode == ZERO_EXTEND */
1533 /* Size of inner mode. */
1534 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1536 if (rcode == LSHIFTRT)
1537 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1538 else /* rcode == ZERO_EXTEND */
1539 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1541 /* We can only widen multiplies if the result is mathematiclly
1542 equivalent. I.e. if overflow was impossible. */
1543 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1544 return simplify_gen_binary
1545 (MULT, mode,
1546 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1547 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1551 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1552 if (GET_CODE (op) == ZERO_EXTEND)
1553 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1554 GET_MODE (XEXP (op, 0)));
1556 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1557 is (zero_extend:M (subreg:O <X>)) if there is mode with
1558 GET_MODE_PRECISION (N) - I bits. */
1559 if (GET_CODE (op) == LSHIFTRT
1560 && GET_CODE (XEXP (op, 0)) == ASHIFT
1561 && CONST_INT_P (XEXP (op, 1))
1562 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1563 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1565 machine_mode tmode
1566 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1567 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1568 if (tmode != BLKmode)
1570 rtx inner =
1571 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1572 if (inner)
1573 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1577 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1578 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1579 of mode N. E.g.
1580 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1581 (and:SI (reg:SI) (const_int 63)). */
1582 if (GET_CODE (op) == SUBREG
1583 && GET_MODE_PRECISION (GET_MODE (op))
1584 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1585 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1586 <= HOST_BITS_PER_WIDE_INT
1587 && GET_MODE_PRECISION (mode)
1588 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1589 && subreg_lowpart_p (op)
1590 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1591 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1593 if (GET_MODE_PRECISION (mode)
1594 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1595 return SUBREG_REG (op);
1596 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1597 GET_MODE (SUBREG_REG (op)));
1600 #if defined(POINTERS_EXTEND_UNSIGNED)
1601 /* As we do not know which address space the pointer is referring to,
1602 we can do this only if the target does not support different pointer
1603 or address modes depending on the address space. */
1604 if (target_default_pointer_address_modes_p ()
1605 && POINTERS_EXTEND_UNSIGNED > 0
1606 && mode == Pmode && GET_MODE (op) == ptr_mode
1607 && (CONSTANT_P (op)
1608 || (GET_CODE (op) == SUBREG
1609 && REG_P (SUBREG_REG (op))
1610 && REG_POINTER (SUBREG_REG (op))
1611 && GET_MODE (SUBREG_REG (op)) == Pmode))
1612 && !targetm.have_ptr_extend ())
1614 temp
1615 = convert_memory_address_addr_space_1 (Pmode, op,
1616 ADDR_SPACE_GENERIC, false,
1617 true);
1618 if (temp)
1619 return temp;
1621 #endif
1622 break;
1624 default:
1625 break;
1628 return 0;
1631 /* Try to compute the value of a unary operation CODE whose output mode is to
1632 be MODE with input operand OP whose mode was originally OP_MODE.
1633 Return zero if the value cannot be computed. */
1635 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1636 rtx op, machine_mode op_mode)
1638 unsigned int width = GET_MODE_PRECISION (mode);
1640 if (code == VEC_DUPLICATE)
1642 gcc_assert (VECTOR_MODE_P (mode));
1643 if (GET_MODE (op) != VOIDmode)
1645 if (!VECTOR_MODE_P (GET_MODE (op)))
1646 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1647 else
1648 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1649 (GET_MODE (op)));
1651 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1652 || GET_CODE (op) == CONST_VECTOR)
1654 int elt_size = GET_MODE_UNIT_SIZE (mode);
1655 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1656 rtvec v = rtvec_alloc (n_elts);
1657 unsigned int i;
1659 if (GET_CODE (op) != CONST_VECTOR)
1660 for (i = 0; i < n_elts; i++)
1661 RTVEC_ELT (v, i) = op;
1662 else
1664 machine_mode inmode = GET_MODE (op);
1665 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1666 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1668 gcc_assert (in_n_elts < n_elts);
1669 gcc_assert ((n_elts % in_n_elts) == 0);
1670 for (i = 0; i < n_elts; i++)
1671 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1673 return gen_rtx_CONST_VECTOR (mode, v);
1677 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1679 int elt_size = GET_MODE_UNIT_SIZE (mode);
1680 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1681 machine_mode opmode = GET_MODE (op);
1682 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1683 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1684 rtvec v = rtvec_alloc (n_elts);
1685 unsigned int i;
1687 gcc_assert (op_n_elts == n_elts);
1688 for (i = 0; i < n_elts; i++)
1690 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1691 CONST_VECTOR_ELT (op, i),
1692 GET_MODE_INNER (opmode));
1693 if (!x)
1694 return 0;
1695 RTVEC_ELT (v, i) = x;
1697 return gen_rtx_CONST_VECTOR (mode, v);
1700 /* The order of these tests is critical so that, for example, we don't
1701 check the wrong mode (input vs. output) for a conversion operation,
1702 such as FIX. At some point, this should be simplified. */
1704 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1706 REAL_VALUE_TYPE d;
1708 if (op_mode == VOIDmode)
1710 /* CONST_INT have VOIDmode as the mode. We assume that all
1711 the bits of the constant are significant, though, this is
1712 a dangerous assumption as many times CONST_INTs are
1713 created and used with garbage in the bits outside of the
1714 precision of the implied mode of the const_int. */
1715 op_mode = MAX_MODE_INT;
1718 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1720 /* Avoid the folding if flag_signaling_nans is on and
1721 operand is a signaling NaN. */
1722 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1723 return 0;
1725 d = real_value_truncate (mode, d);
1726 return const_double_from_real_value (d, mode);
1728 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1730 REAL_VALUE_TYPE d;
1732 if (op_mode == VOIDmode)
1734 /* CONST_INT have VOIDmode as the mode. We assume that all
1735 the bits of the constant are significant, though, this is
1736 a dangerous assumption as many times CONST_INTs are
1737 created and used with garbage in the bits outside of the
1738 precision of the implied mode of the const_int. */
1739 op_mode = MAX_MODE_INT;
1742 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1744 /* Avoid the folding if flag_signaling_nans is on and
1745 operand is a signaling NaN. */
1746 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1747 return 0;
1749 d = real_value_truncate (mode, d);
1750 return const_double_from_real_value (d, mode);
1753 if (CONST_SCALAR_INT_P (op) && width > 0)
1755 wide_int result;
1756 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1757 rtx_mode_t op0 = std::make_pair (op, imode);
1758 int int_value;
1760 #if TARGET_SUPPORTS_WIDE_INT == 0
1761 /* This assert keeps the simplification from producing a result
1762 that cannot be represented in a CONST_DOUBLE but a lot of
1763 upstream callers expect that this function never fails to
1764 simplify something and so you if you added this to the test
1765 above the code would die later anyway. If this assert
1766 happens, you just need to make the port support wide int. */
1767 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1768 #endif
1770 switch (code)
1772 case NOT:
1773 result = wi::bit_not (op0);
1774 break;
1776 case NEG:
1777 result = wi::neg (op0);
1778 break;
1780 case ABS:
1781 result = wi::abs (op0);
1782 break;
1784 case FFS:
1785 result = wi::shwi (wi::ffs (op0), mode);
1786 break;
1788 case CLZ:
1789 if (wi::ne_p (op0, 0))
1790 int_value = wi::clz (op0);
1791 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1792 int_value = GET_MODE_PRECISION (mode);
1793 result = wi::shwi (int_value, mode);
1794 break;
1796 case CLRSB:
1797 result = wi::shwi (wi::clrsb (op0), mode);
1798 break;
1800 case CTZ:
1801 if (wi::ne_p (op0, 0))
1802 int_value = wi::ctz (op0);
1803 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1804 int_value = GET_MODE_PRECISION (mode);
1805 result = wi::shwi (int_value, mode);
1806 break;
1808 case POPCOUNT:
1809 result = wi::shwi (wi::popcount (op0), mode);
1810 break;
1812 case PARITY:
1813 result = wi::shwi (wi::parity (op0), mode);
1814 break;
1816 case BSWAP:
1817 result = wide_int (op0).bswap ();
1818 break;
1820 case TRUNCATE:
1821 case ZERO_EXTEND:
1822 result = wide_int::from (op0, width, UNSIGNED);
1823 break;
1825 case SIGN_EXTEND:
1826 result = wide_int::from (op0, width, SIGNED);
1827 break;
1829 case SQRT:
1830 default:
1831 return 0;
1834 return immed_wide_int_const (result, mode);
1837 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1838 && SCALAR_FLOAT_MODE_P (mode)
1839 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1841 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1842 switch (code)
1844 case SQRT:
1845 return 0;
1846 case ABS:
1847 d = real_value_abs (&d);
1848 break;
1849 case NEG:
1850 d = real_value_negate (&d);
1851 break;
1852 case FLOAT_TRUNCATE:
1853 /* Don't perform the operation if flag_signaling_nans is on
1854 and the operand is a signaling NaN. */
1855 if (!(HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)))
1856 d = real_value_truncate (mode, d);
1857 break;
1858 case FLOAT_EXTEND:
1859 /* All this does is change the mode, unless changing
1860 mode class. */
1861 /* Don't perform the operation if flag_signaling_nans is on
1862 and the operand is a signaling NaN. */
1863 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op))
1864 && !(HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)))
1865 real_convert (&d, mode, &d);
1866 break;
1867 case FIX:
1868 /* Don't perform the operation if flag_signaling_nans is on
1869 and the operand is a signaling NaN. */
1870 if (!(HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)))
1871 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1872 break;
1873 case NOT:
1875 long tmp[4];
1876 int i;
1878 real_to_target (tmp, &d, GET_MODE (op));
1879 for (i = 0; i < 4; i++)
1880 tmp[i] = ~tmp[i];
1881 real_from_target (&d, tmp, mode);
1882 break;
1884 default:
1885 gcc_unreachable ();
1887 return const_double_from_real_value (d, mode);
1889 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1890 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1891 && GET_MODE_CLASS (mode) == MODE_INT
1892 && width > 0)
1894 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1895 operators are intentionally left unspecified (to ease implementation
1896 by target backends), for consistency, this routine implements the
1897 same semantics for constant folding as used by the middle-end. */
1899 /* This was formerly used only for non-IEEE float.
1900 eggert@twinsun.com says it is safe for IEEE also. */
1901 REAL_VALUE_TYPE t;
1902 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1903 wide_int wmax, wmin;
1904 /* This is part of the abi to real_to_integer, but we check
1905 things before making this call. */
1906 bool fail;
1908 switch (code)
1910 case FIX:
1911 if (REAL_VALUE_ISNAN (*x))
1912 return const0_rtx;
1914 /* Test against the signed upper bound. */
1915 wmax = wi::max_value (width, SIGNED);
1916 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1917 if (real_less (&t, x))
1918 return immed_wide_int_const (wmax, mode);
1920 /* Test against the signed lower bound. */
1921 wmin = wi::min_value (width, SIGNED);
1922 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1923 if (real_less (x, &t))
1924 return immed_wide_int_const (wmin, mode);
1926 return immed_wide_int_const (real_to_integer (x, &fail, width),
1927 mode);
1929 case UNSIGNED_FIX:
1930 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1931 return const0_rtx;
1933 /* Test against the unsigned upper bound. */
1934 wmax = wi::max_value (width, UNSIGNED);
1935 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1936 if (real_less (&t, x))
1937 return immed_wide_int_const (wmax, mode);
1939 return immed_wide_int_const (real_to_integer (x, &fail, width),
1940 mode);
1942 default:
1943 gcc_unreachable ();
1947 return NULL_RTX;
1950 /* Subroutine of simplify_binary_operation to simplify a binary operation
1951 CODE that can commute with byte swapping, with result mode MODE and
1952 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1953 Return zero if no simplification or canonicalization is possible. */
1955 static rtx
1956 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1957 rtx op0, rtx op1)
1959 rtx tem;
1961 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1962 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1964 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1965 simplify_gen_unary (BSWAP, mode, op1, mode));
1966 return simplify_gen_unary (BSWAP, mode, tem, mode);
1969 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1970 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1972 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1973 return simplify_gen_unary (BSWAP, mode, tem, mode);
1976 return NULL_RTX;
1979 /* Subroutine of simplify_binary_operation to simplify a commutative,
1980 associative binary operation CODE with result mode MODE, operating
1981 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1982 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1983 canonicalization is possible. */
1985 static rtx
1986 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1987 rtx op0, rtx op1)
1989 rtx tem;
1991 /* Linearize the operator to the left. */
1992 if (GET_CODE (op1) == code)
1994 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1995 if (GET_CODE (op0) == code)
1997 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1998 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2001 /* "a op (b op c)" becomes "(b op c) op a". */
2002 if (! swap_commutative_operands_p (op1, op0))
2003 return simplify_gen_binary (code, mode, op1, op0);
2005 std::swap (op0, op1);
2008 if (GET_CODE (op0) == code)
2010 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2011 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2013 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2014 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2017 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2018 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2019 if (tem != 0)
2020 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2022 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2023 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2024 if (tem != 0)
2025 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2028 return 0;
2032 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2033 and OP1. Return 0 if no simplification is possible.
2035 Don't use this for relational operations such as EQ or LT.
2036 Use simplify_relational_operation instead. */
2038 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2039 rtx op0, rtx op1)
2041 rtx trueop0, trueop1;
2042 rtx tem;
2044 /* Relational operations don't work here. We must know the mode
2045 of the operands in order to do the comparison correctly.
2046 Assuming a full word can give incorrect results.
2047 Consider comparing 128 with -128 in QImode. */
2048 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2049 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2051 /* Make sure the constant is second. */
2052 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2053 && swap_commutative_operands_p (op0, op1))
2054 std::swap (op0, op1);
2056 trueop0 = avoid_constant_pool_reference (op0);
2057 trueop1 = avoid_constant_pool_reference (op1);
2059 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2060 if (tem)
2061 return tem;
2062 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2064 if (tem)
2065 return tem;
2067 /* If the above steps did not result in a simplification and op0 or op1
2068 were constant pool references, use the referenced constants directly. */
2069 if (trueop0 != op0 || trueop1 != op1)
2070 return simplify_gen_binary (code, mode, trueop0, trueop1);
2072 return NULL_RTX;
2075 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2076 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2077 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2078 actual constants. */
2080 static rtx
2081 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2082 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2084 rtx tem, reversed, opleft, opright;
2085 HOST_WIDE_INT val;
2086 unsigned int width = GET_MODE_PRECISION (mode);
2088 /* Even if we can't compute a constant result,
2089 there are some cases worth simplifying. */
2091 switch (code)
2093 case PLUS:
2094 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2095 when x is NaN, infinite, or finite and nonzero. They aren't
2096 when x is -0 and the rounding mode is not towards -infinity,
2097 since (-0) + 0 is then 0. */
2098 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2099 return op0;
2101 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2102 transformations are safe even for IEEE. */
2103 if (GET_CODE (op0) == NEG)
2104 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2105 else if (GET_CODE (op1) == NEG)
2106 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2108 /* (~a) + 1 -> -a */
2109 if (INTEGRAL_MODE_P (mode)
2110 && GET_CODE (op0) == NOT
2111 && trueop1 == const1_rtx)
2112 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2114 /* Handle both-operands-constant cases. We can only add
2115 CONST_INTs to constants since the sum of relocatable symbols
2116 can't be handled by most assemblers. Don't add CONST_INT
2117 to CONST_INT since overflow won't be computed properly if wider
2118 than HOST_BITS_PER_WIDE_INT. */
2120 if ((GET_CODE (op0) == CONST
2121 || GET_CODE (op0) == SYMBOL_REF
2122 || GET_CODE (op0) == LABEL_REF)
2123 && CONST_INT_P (op1))
2124 return plus_constant (mode, op0, INTVAL (op1));
2125 else if ((GET_CODE (op1) == CONST
2126 || GET_CODE (op1) == SYMBOL_REF
2127 || GET_CODE (op1) == LABEL_REF)
2128 && CONST_INT_P (op0))
2129 return plus_constant (mode, op1, INTVAL (op0));
2131 /* See if this is something like X * C - X or vice versa or
2132 if the multiplication is written as a shift. If so, we can
2133 distribute and make a new multiply, shift, or maybe just
2134 have X (if C is 2 in the example above). But don't make
2135 something more expensive than we had before. */
2137 if (SCALAR_INT_MODE_P (mode))
2139 rtx lhs = op0, rhs = op1;
2141 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2142 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2144 if (GET_CODE (lhs) == NEG)
2146 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2147 lhs = XEXP (lhs, 0);
2149 else if (GET_CODE (lhs) == MULT
2150 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2152 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2153 lhs = XEXP (lhs, 0);
2155 else if (GET_CODE (lhs) == ASHIFT
2156 && CONST_INT_P (XEXP (lhs, 1))
2157 && INTVAL (XEXP (lhs, 1)) >= 0
2158 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2160 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2161 GET_MODE_PRECISION (mode));
2162 lhs = XEXP (lhs, 0);
2165 if (GET_CODE (rhs) == NEG)
2167 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2168 rhs = XEXP (rhs, 0);
2170 else if (GET_CODE (rhs) == MULT
2171 && CONST_INT_P (XEXP (rhs, 1)))
2173 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2174 rhs = XEXP (rhs, 0);
2176 else if (GET_CODE (rhs) == ASHIFT
2177 && CONST_INT_P (XEXP (rhs, 1))
2178 && INTVAL (XEXP (rhs, 1)) >= 0
2179 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2181 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2182 GET_MODE_PRECISION (mode));
2183 rhs = XEXP (rhs, 0);
2186 if (rtx_equal_p (lhs, rhs))
2188 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2189 rtx coeff;
2190 bool speed = optimize_function_for_speed_p (cfun);
2192 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2194 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2195 return (set_src_cost (tem, mode, speed)
2196 <= set_src_cost (orig, mode, speed) ? tem : 0);
2200 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2201 if (CONST_SCALAR_INT_P (op1)
2202 && GET_CODE (op0) == XOR
2203 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2204 && mode_signbit_p (mode, op1))
2205 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2206 simplify_gen_binary (XOR, mode, op1,
2207 XEXP (op0, 1)));
2209 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2210 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2211 && GET_CODE (op0) == MULT
2212 && GET_CODE (XEXP (op0, 0)) == NEG)
2214 rtx in1, in2;
2216 in1 = XEXP (XEXP (op0, 0), 0);
2217 in2 = XEXP (op0, 1);
2218 return simplify_gen_binary (MINUS, mode, op1,
2219 simplify_gen_binary (MULT, mode,
2220 in1, in2));
2223 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2224 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2225 is 1. */
2226 if (COMPARISON_P (op0)
2227 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2228 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2229 && (reversed = reversed_comparison (op0, mode)))
2230 return
2231 simplify_gen_unary (NEG, mode, reversed, mode);
2233 /* If one of the operands is a PLUS or a MINUS, see if we can
2234 simplify this by the associative law.
2235 Don't use the associative law for floating point.
2236 The inaccuracy makes it nonassociative,
2237 and subtle programs can break if operations are associated. */
2239 if (INTEGRAL_MODE_P (mode)
2240 && (plus_minus_operand_p (op0)
2241 || plus_minus_operand_p (op1))
2242 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2243 return tem;
2245 /* Reassociate floating point addition only when the user
2246 specifies associative math operations. */
2247 if (FLOAT_MODE_P (mode)
2248 && flag_associative_math)
2250 tem = simplify_associative_operation (code, mode, op0, op1);
2251 if (tem)
2252 return tem;
2254 break;
2256 case COMPARE:
2257 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2258 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2259 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2260 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2262 rtx xop00 = XEXP (op0, 0);
2263 rtx xop10 = XEXP (op1, 0);
2265 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2266 return xop00;
2268 if (REG_P (xop00) && REG_P (xop10)
2269 && GET_MODE (xop00) == GET_MODE (xop10)
2270 && REGNO (xop00) == REGNO (xop10)
2271 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2272 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2273 return xop00;
2275 break;
2277 case MINUS:
2278 /* We can't assume x-x is 0 even with non-IEEE floating point,
2279 but since it is zero except in very strange circumstances, we
2280 will treat it as zero with -ffinite-math-only. */
2281 if (rtx_equal_p (trueop0, trueop1)
2282 && ! side_effects_p (op0)
2283 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2284 return CONST0_RTX (mode);
2286 /* Change subtraction from zero into negation. (0 - x) is the
2287 same as -x when x is NaN, infinite, or finite and nonzero.
2288 But if the mode has signed zeros, and does not round towards
2289 -infinity, then 0 - 0 is 0, not -0. */
2290 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2291 return simplify_gen_unary (NEG, mode, op1, mode);
2293 /* (-1 - a) is ~a, unless the expression contains symbolic
2294 constants, in which case not retaining additions and
2295 subtractions could cause invalid assembly to be produced. */
2296 if (trueop0 == constm1_rtx
2297 && !contains_symbolic_reference_p (op1))
2298 return simplify_gen_unary (NOT, mode, op1, mode);
2300 /* Subtracting 0 has no effect unless the mode has signed zeros
2301 and supports rounding towards -infinity. In such a case,
2302 0 - 0 is -0. */
2303 if (!(HONOR_SIGNED_ZEROS (mode)
2304 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2305 && trueop1 == CONST0_RTX (mode))
2306 return op0;
2308 /* See if this is something like X * C - X or vice versa or
2309 if the multiplication is written as a shift. If so, we can
2310 distribute and make a new multiply, shift, or maybe just
2311 have X (if C is 2 in the example above). But don't make
2312 something more expensive than we had before. */
2314 if (SCALAR_INT_MODE_P (mode))
2316 rtx lhs = op0, rhs = op1;
2318 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2319 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2321 if (GET_CODE (lhs) == NEG)
2323 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2324 lhs = XEXP (lhs, 0);
2326 else if (GET_CODE (lhs) == MULT
2327 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2329 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2330 lhs = XEXP (lhs, 0);
2332 else if (GET_CODE (lhs) == ASHIFT
2333 && CONST_INT_P (XEXP (lhs, 1))
2334 && INTVAL (XEXP (lhs, 1)) >= 0
2335 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2337 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2338 GET_MODE_PRECISION (mode));
2339 lhs = XEXP (lhs, 0);
2342 if (GET_CODE (rhs) == NEG)
2344 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2345 rhs = XEXP (rhs, 0);
2347 else if (GET_CODE (rhs) == MULT
2348 && CONST_INT_P (XEXP (rhs, 1)))
2350 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2351 rhs = XEXP (rhs, 0);
2353 else if (GET_CODE (rhs) == ASHIFT
2354 && CONST_INT_P (XEXP (rhs, 1))
2355 && INTVAL (XEXP (rhs, 1)) >= 0
2356 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2358 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2359 GET_MODE_PRECISION (mode));
2360 negcoeff1 = -negcoeff1;
2361 rhs = XEXP (rhs, 0);
2364 if (rtx_equal_p (lhs, rhs))
2366 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2367 rtx coeff;
2368 bool speed = optimize_function_for_speed_p (cfun);
2370 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2372 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2373 return (set_src_cost (tem, mode, speed)
2374 <= set_src_cost (orig, mode, speed) ? tem : 0);
2378 /* (a - (-b)) -> (a + b). True even for IEEE. */
2379 if (GET_CODE (op1) == NEG)
2380 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2382 /* (-x - c) may be simplified as (-c - x). */
2383 if (GET_CODE (op0) == NEG
2384 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2386 tem = simplify_unary_operation (NEG, mode, op1, mode);
2387 if (tem)
2388 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2391 /* Don't let a relocatable value get a negative coeff. */
2392 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2393 return simplify_gen_binary (PLUS, mode,
2394 op0,
2395 neg_const_int (mode, op1));
2397 /* (x - (x & y)) -> (x & ~y) */
2398 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2400 if (rtx_equal_p (op0, XEXP (op1, 0)))
2402 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2403 GET_MODE (XEXP (op1, 1)));
2404 return simplify_gen_binary (AND, mode, op0, tem);
2406 if (rtx_equal_p (op0, XEXP (op1, 1)))
2408 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2409 GET_MODE (XEXP (op1, 0)));
2410 return simplify_gen_binary (AND, mode, op0, tem);
2414 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2415 by reversing the comparison code if valid. */
2416 if (STORE_FLAG_VALUE == 1
2417 && trueop0 == const1_rtx
2418 && COMPARISON_P (op1)
2419 && (reversed = reversed_comparison (op1, mode)))
2420 return reversed;
2422 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2423 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2424 && GET_CODE (op1) == MULT
2425 && GET_CODE (XEXP (op1, 0)) == NEG)
2427 rtx in1, in2;
2429 in1 = XEXP (XEXP (op1, 0), 0);
2430 in2 = XEXP (op1, 1);
2431 return simplify_gen_binary (PLUS, mode,
2432 simplify_gen_binary (MULT, mode,
2433 in1, in2),
2434 op0);
2437 /* Canonicalize (minus (neg A) (mult B C)) to
2438 (minus (mult (neg B) C) A). */
2439 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2440 && GET_CODE (op1) == MULT
2441 && GET_CODE (op0) == NEG)
2443 rtx in1, in2;
2445 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2446 in2 = XEXP (op1, 1);
2447 return simplify_gen_binary (MINUS, mode,
2448 simplify_gen_binary (MULT, mode,
2449 in1, in2),
2450 XEXP (op0, 0));
2453 /* If one of the operands is a PLUS or a MINUS, see if we can
2454 simplify this by the associative law. This will, for example,
2455 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2456 Don't use the associative law for floating point.
2457 The inaccuracy makes it nonassociative,
2458 and subtle programs can break if operations are associated. */
2460 if (INTEGRAL_MODE_P (mode)
2461 && (plus_minus_operand_p (op0)
2462 || plus_minus_operand_p (op1))
2463 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2464 return tem;
2465 break;
2467 case MULT:
2468 if (trueop1 == constm1_rtx)
2469 return simplify_gen_unary (NEG, mode, op0, mode);
2471 if (GET_CODE (op0) == NEG)
2473 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2474 /* If op1 is a MULT as well and simplify_unary_operation
2475 just moved the NEG to the second operand, simplify_gen_binary
2476 below could through simplify_associative_operation move
2477 the NEG around again and recurse endlessly. */
2478 if (temp
2479 && GET_CODE (op1) == MULT
2480 && GET_CODE (temp) == MULT
2481 && XEXP (op1, 0) == XEXP (temp, 0)
2482 && GET_CODE (XEXP (temp, 1)) == NEG
2483 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2484 temp = NULL_RTX;
2485 if (temp)
2486 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2488 if (GET_CODE (op1) == NEG)
2490 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2491 /* If op0 is a MULT as well and simplify_unary_operation
2492 just moved the NEG to the second operand, simplify_gen_binary
2493 below could through simplify_associative_operation move
2494 the NEG around again and recurse endlessly. */
2495 if (temp
2496 && GET_CODE (op0) == MULT
2497 && GET_CODE (temp) == MULT
2498 && XEXP (op0, 0) == XEXP (temp, 0)
2499 && GET_CODE (XEXP (temp, 1)) == NEG
2500 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2501 temp = NULL_RTX;
2502 if (temp)
2503 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2506 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2507 x is NaN, since x * 0 is then also NaN. Nor is it valid
2508 when the mode has signed zeros, since multiplying a negative
2509 number by 0 will give -0, not 0. */
2510 if (!HONOR_NANS (mode)
2511 && !HONOR_SIGNED_ZEROS (mode)
2512 && trueop1 == CONST0_RTX (mode)
2513 && ! side_effects_p (op0))
2514 return op1;
2516 /* In IEEE floating point, x*1 is not equivalent to x for
2517 signalling NaNs. */
2518 if (!HONOR_SNANS (mode)
2519 && trueop1 == CONST1_RTX (mode))
2520 return op0;
2522 /* Convert multiply by constant power of two into shift. */
2523 if (CONST_SCALAR_INT_P (trueop1))
2525 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2526 if (val >= 0)
2527 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2530 /* x*2 is x+x and x*(-1) is -x */
2531 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2532 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2533 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2534 && GET_MODE (op0) == mode)
2536 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2538 if (real_equal (d1, &dconst2))
2539 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2541 if (!HONOR_SNANS (mode)
2542 && real_equal (d1, &dconstm1))
2543 return simplify_gen_unary (NEG, mode, op0, mode);
2546 /* Optimize -x * -x as x * x. */
2547 if (FLOAT_MODE_P (mode)
2548 && GET_CODE (op0) == NEG
2549 && GET_CODE (op1) == NEG
2550 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2551 && !side_effects_p (XEXP (op0, 0)))
2552 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2554 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2555 if (SCALAR_FLOAT_MODE_P (mode)
2556 && GET_CODE (op0) == ABS
2557 && GET_CODE (op1) == ABS
2558 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2559 && !side_effects_p (XEXP (op0, 0)))
2560 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2562 /* Reassociate multiplication, but for floating point MULTs
2563 only when the user specifies unsafe math optimizations. */
2564 if (! FLOAT_MODE_P (mode)
2565 || flag_unsafe_math_optimizations)
2567 tem = simplify_associative_operation (code, mode, op0, op1);
2568 if (tem)
2569 return tem;
2571 break;
2573 case IOR:
2574 if (trueop1 == CONST0_RTX (mode))
2575 return op0;
2576 if (INTEGRAL_MODE_P (mode)
2577 && trueop1 == CONSTM1_RTX (mode)
2578 && !side_effects_p (op0))
2579 return op1;
2580 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2581 return op0;
2582 /* A | (~A) -> -1 */
2583 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2584 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2585 && ! side_effects_p (op0)
2586 && SCALAR_INT_MODE_P (mode))
2587 return constm1_rtx;
2589 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2590 if (CONST_INT_P (op1)
2591 && HWI_COMPUTABLE_MODE_P (mode)
2592 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2593 && !side_effects_p (op0))
2594 return op1;
2596 /* Canonicalize (X & C1) | C2. */
2597 if (GET_CODE (op0) == AND
2598 && CONST_INT_P (trueop1)
2599 && CONST_INT_P (XEXP (op0, 1)))
2601 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2602 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2603 HOST_WIDE_INT c2 = INTVAL (trueop1);
2605 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2606 if ((c1 & c2) == c1
2607 && !side_effects_p (XEXP (op0, 0)))
2608 return trueop1;
2610 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2611 if (((c1|c2) & mask) == mask)
2612 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2614 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2615 if (((c1 & ~c2) & mask) != (c1 & mask))
2617 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2618 gen_int_mode (c1 & ~c2, mode));
2619 return simplify_gen_binary (IOR, mode, tem, op1);
2623 /* Convert (A & B) | A to A. */
2624 if (GET_CODE (op0) == AND
2625 && (rtx_equal_p (XEXP (op0, 0), op1)
2626 || rtx_equal_p (XEXP (op0, 1), op1))
2627 && ! side_effects_p (XEXP (op0, 0))
2628 && ! side_effects_p (XEXP (op0, 1)))
2629 return op1;
2631 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2632 mode size to (rotate A CX). */
2634 if (GET_CODE (op1) == ASHIFT
2635 || GET_CODE (op1) == SUBREG)
2637 opleft = op1;
2638 opright = op0;
2640 else
2642 opright = op1;
2643 opleft = op0;
2646 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2647 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2648 && CONST_INT_P (XEXP (opleft, 1))
2649 && CONST_INT_P (XEXP (opright, 1))
2650 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2651 == GET_MODE_PRECISION (mode)))
2652 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2654 /* Same, but for ashift that has been "simplified" to a wider mode
2655 by simplify_shift_const. */
2657 if (GET_CODE (opleft) == SUBREG
2658 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2659 && GET_CODE (opright) == LSHIFTRT
2660 && GET_CODE (XEXP (opright, 0)) == SUBREG
2661 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2662 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2663 && (GET_MODE_SIZE (GET_MODE (opleft))
2664 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2665 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2666 SUBREG_REG (XEXP (opright, 0)))
2667 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2668 && CONST_INT_P (XEXP (opright, 1))
2669 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2670 == GET_MODE_PRECISION (mode)))
2671 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2672 XEXP (SUBREG_REG (opleft), 1));
2674 /* If we have (ior (and (X C1) C2)), simplify this by making
2675 C1 as small as possible if C1 actually changes. */
2676 if (CONST_INT_P (op1)
2677 && (HWI_COMPUTABLE_MODE_P (mode)
2678 || INTVAL (op1) > 0)
2679 && GET_CODE (op0) == AND
2680 && CONST_INT_P (XEXP (op0, 1))
2681 && CONST_INT_P (op1)
2682 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2684 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2685 gen_int_mode (UINTVAL (XEXP (op0, 1))
2686 & ~UINTVAL (op1),
2687 mode));
2688 return simplify_gen_binary (IOR, mode, tmp, op1);
2691 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2692 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2693 the PLUS does not affect any of the bits in OP1: then we can do
2694 the IOR as a PLUS and we can associate. This is valid if OP1
2695 can be safely shifted left C bits. */
2696 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2697 && GET_CODE (XEXP (op0, 0)) == PLUS
2698 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2699 && CONST_INT_P (XEXP (op0, 1))
2700 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2702 int count = INTVAL (XEXP (op0, 1));
2703 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2705 if (mask >> count == INTVAL (trueop1)
2706 && trunc_int_for_mode (mask, mode) == mask
2707 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2708 return simplify_gen_binary (ASHIFTRT, mode,
2709 plus_constant (mode, XEXP (op0, 0),
2710 mask),
2711 XEXP (op0, 1));
2714 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2715 if (tem)
2716 return tem;
2718 tem = simplify_associative_operation (code, mode, op0, op1);
2719 if (tem)
2720 return tem;
2721 break;
2723 case XOR:
2724 if (trueop1 == CONST0_RTX (mode))
2725 return op0;
2726 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2727 return simplify_gen_unary (NOT, mode, op0, mode);
2728 if (rtx_equal_p (trueop0, trueop1)
2729 && ! side_effects_p (op0)
2730 && GET_MODE_CLASS (mode) != MODE_CC)
2731 return CONST0_RTX (mode);
2733 /* Canonicalize XOR of the most significant bit to PLUS. */
2734 if (CONST_SCALAR_INT_P (op1)
2735 && mode_signbit_p (mode, op1))
2736 return simplify_gen_binary (PLUS, mode, op0, op1);
2737 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2738 if (CONST_SCALAR_INT_P (op1)
2739 && GET_CODE (op0) == PLUS
2740 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2741 && mode_signbit_p (mode, XEXP (op0, 1)))
2742 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2743 simplify_gen_binary (XOR, mode, op1,
2744 XEXP (op0, 1)));
2746 /* If we are XORing two things that have no bits in common,
2747 convert them into an IOR. This helps to detect rotation encoded
2748 using those methods and possibly other simplifications. */
2750 if (HWI_COMPUTABLE_MODE_P (mode)
2751 && (nonzero_bits (op0, mode)
2752 & nonzero_bits (op1, mode)) == 0)
2753 return (simplify_gen_binary (IOR, mode, op0, op1));
2755 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2756 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2757 (NOT y). */
2759 int num_negated = 0;
2761 if (GET_CODE (op0) == NOT)
2762 num_negated++, op0 = XEXP (op0, 0);
2763 if (GET_CODE (op1) == NOT)
2764 num_negated++, op1 = XEXP (op1, 0);
2766 if (num_negated == 2)
2767 return simplify_gen_binary (XOR, mode, op0, op1);
2768 else if (num_negated == 1)
2769 return simplify_gen_unary (NOT, mode,
2770 simplify_gen_binary (XOR, mode, op0, op1),
2771 mode);
2774 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2775 correspond to a machine insn or result in further simplifications
2776 if B is a constant. */
2778 if (GET_CODE (op0) == AND
2779 && rtx_equal_p (XEXP (op0, 1), op1)
2780 && ! side_effects_p (op1))
2781 return simplify_gen_binary (AND, mode,
2782 simplify_gen_unary (NOT, mode,
2783 XEXP (op0, 0), mode),
2784 op1);
2786 else if (GET_CODE (op0) == AND
2787 && rtx_equal_p (XEXP (op0, 0), op1)
2788 && ! side_effects_p (op1))
2789 return simplify_gen_binary (AND, mode,
2790 simplify_gen_unary (NOT, mode,
2791 XEXP (op0, 1), mode),
2792 op1);
2794 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2795 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2796 out bits inverted twice and not set by C. Similarly, given
2797 (xor (and (xor A B) C) D), simplify without inverting C in
2798 the xor operand: (xor (and A C) (B&C)^D).
2800 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2801 && GET_CODE (XEXP (op0, 0)) == XOR
2802 && CONST_INT_P (op1)
2803 && CONST_INT_P (XEXP (op0, 1))
2804 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2806 enum rtx_code op = GET_CODE (op0);
2807 rtx a = XEXP (XEXP (op0, 0), 0);
2808 rtx b = XEXP (XEXP (op0, 0), 1);
2809 rtx c = XEXP (op0, 1);
2810 rtx d = op1;
2811 HOST_WIDE_INT bval = INTVAL (b);
2812 HOST_WIDE_INT cval = INTVAL (c);
2813 HOST_WIDE_INT dval = INTVAL (d);
2814 HOST_WIDE_INT xcval;
2816 if (op == IOR)
2817 xcval = ~cval;
2818 else
2819 xcval = cval;
2821 return simplify_gen_binary (XOR, mode,
2822 simplify_gen_binary (op, mode, a, c),
2823 gen_int_mode ((bval & xcval) ^ dval,
2824 mode));
2827 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2828 we can transform like this:
2829 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2830 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2831 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2832 Attempt a few simplifications when B and C are both constants. */
2833 if (GET_CODE (op0) == AND
2834 && CONST_INT_P (op1)
2835 && CONST_INT_P (XEXP (op0, 1)))
2837 rtx a = XEXP (op0, 0);
2838 rtx b = XEXP (op0, 1);
2839 rtx c = op1;
2840 HOST_WIDE_INT bval = INTVAL (b);
2841 HOST_WIDE_INT cval = INTVAL (c);
2843 /* Instead of computing ~A&C, we compute its negated value,
2844 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2845 optimize for sure. If it does not simplify, we still try
2846 to compute ~A&C below, but since that always allocates
2847 RTL, we don't try that before committing to returning a
2848 simplified expression. */
2849 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2850 GEN_INT (~cval));
2852 if ((~cval & bval) == 0)
2854 rtx na_c = NULL_RTX;
2855 if (n_na_c)
2856 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2857 else
2859 /* If ~A does not simplify, don't bother: we don't
2860 want to simplify 2 operations into 3, and if na_c
2861 were to simplify with na, n_na_c would have
2862 simplified as well. */
2863 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2864 if (na)
2865 na_c = simplify_gen_binary (AND, mode, na, c);
2868 /* Try to simplify ~A&C | ~B&C. */
2869 if (na_c != NULL_RTX)
2870 return simplify_gen_binary (IOR, mode, na_c,
2871 gen_int_mode (~bval & cval, mode));
2873 else
2875 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2876 if (n_na_c == CONSTM1_RTX (mode))
2878 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2879 gen_int_mode (~cval & bval,
2880 mode));
2881 return simplify_gen_binary (IOR, mode, a_nc_b,
2882 gen_int_mode (~bval & cval,
2883 mode));
2888 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2889 comparison if STORE_FLAG_VALUE is 1. */
2890 if (STORE_FLAG_VALUE == 1
2891 && trueop1 == const1_rtx
2892 && COMPARISON_P (op0)
2893 && (reversed = reversed_comparison (op0, mode)))
2894 return reversed;
2896 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2897 is (lt foo (const_int 0)), so we can perform the above
2898 simplification if STORE_FLAG_VALUE is 1. */
2900 if (STORE_FLAG_VALUE == 1
2901 && trueop1 == const1_rtx
2902 && GET_CODE (op0) == LSHIFTRT
2903 && CONST_INT_P (XEXP (op0, 1))
2904 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2905 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2907 /* (xor (comparison foo bar) (const_int sign-bit))
2908 when STORE_FLAG_VALUE is the sign bit. */
2909 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2910 && trueop1 == const_true_rtx
2911 && COMPARISON_P (op0)
2912 && (reversed = reversed_comparison (op0, mode)))
2913 return reversed;
2915 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2916 if (tem)
2917 return tem;
2919 tem = simplify_associative_operation (code, mode, op0, op1);
2920 if (tem)
2921 return tem;
2922 break;
2924 case AND:
2925 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2926 return trueop1;
2927 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2928 return op0;
2929 if (HWI_COMPUTABLE_MODE_P (mode))
2931 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2932 HOST_WIDE_INT nzop1;
2933 if (CONST_INT_P (trueop1))
2935 HOST_WIDE_INT val1 = INTVAL (trueop1);
2936 /* If we are turning off bits already known off in OP0, we need
2937 not do an AND. */
2938 if ((nzop0 & ~val1) == 0)
2939 return op0;
2941 nzop1 = nonzero_bits (trueop1, mode);
2942 /* If we are clearing all the nonzero bits, the result is zero. */
2943 if ((nzop1 & nzop0) == 0
2944 && !side_effects_p (op0) && !side_effects_p (op1))
2945 return CONST0_RTX (mode);
2947 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2948 && GET_MODE_CLASS (mode) != MODE_CC)
2949 return op0;
2950 /* A & (~A) -> 0 */
2951 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2952 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2953 && ! side_effects_p (op0)
2954 && GET_MODE_CLASS (mode) != MODE_CC)
2955 return CONST0_RTX (mode);
2957 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2958 there are no nonzero bits of C outside of X's mode. */
2959 if ((GET_CODE (op0) == SIGN_EXTEND
2960 || GET_CODE (op0) == ZERO_EXTEND)
2961 && CONST_INT_P (trueop1)
2962 && HWI_COMPUTABLE_MODE_P (mode)
2963 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2964 & UINTVAL (trueop1)) == 0)
2966 machine_mode imode = GET_MODE (XEXP (op0, 0));
2967 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2968 gen_int_mode (INTVAL (trueop1),
2969 imode));
2970 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2973 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2974 we might be able to further simplify the AND with X and potentially
2975 remove the truncation altogether. */
2976 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2978 rtx x = XEXP (op0, 0);
2979 machine_mode xmode = GET_MODE (x);
2980 tem = simplify_gen_binary (AND, xmode, x,
2981 gen_int_mode (INTVAL (trueop1), xmode));
2982 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2985 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2986 if (GET_CODE (op0) == IOR
2987 && CONST_INT_P (trueop1)
2988 && CONST_INT_P (XEXP (op0, 1)))
2990 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2991 return simplify_gen_binary (IOR, mode,
2992 simplify_gen_binary (AND, mode,
2993 XEXP (op0, 0), op1),
2994 gen_int_mode (tmp, mode));
2997 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2998 insn (and may simplify more). */
2999 if (GET_CODE (op0) == XOR
3000 && rtx_equal_p (XEXP (op0, 0), op1)
3001 && ! side_effects_p (op1))
3002 return simplify_gen_binary (AND, mode,
3003 simplify_gen_unary (NOT, mode,
3004 XEXP (op0, 1), mode),
3005 op1);
3007 if (GET_CODE (op0) == XOR
3008 && rtx_equal_p (XEXP (op0, 1), op1)
3009 && ! side_effects_p (op1))
3010 return simplify_gen_binary (AND, mode,
3011 simplify_gen_unary (NOT, mode,
3012 XEXP (op0, 0), mode),
3013 op1);
3015 /* Similarly for (~(A ^ B)) & A. */
3016 if (GET_CODE (op0) == NOT
3017 && GET_CODE (XEXP (op0, 0)) == XOR
3018 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3019 && ! side_effects_p (op1))
3020 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3022 if (GET_CODE (op0) == NOT
3023 && GET_CODE (XEXP (op0, 0)) == XOR
3024 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3025 && ! side_effects_p (op1))
3026 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3028 /* Convert (A | B) & A to A. */
3029 if (GET_CODE (op0) == IOR
3030 && (rtx_equal_p (XEXP (op0, 0), op1)
3031 || rtx_equal_p (XEXP (op0, 1), op1))
3032 && ! side_effects_p (XEXP (op0, 0))
3033 && ! side_effects_p (XEXP (op0, 1)))
3034 return op1;
3036 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3037 ((A & N) + B) & M -> (A + B) & M
3038 Similarly if (N & M) == 0,
3039 ((A | N) + B) & M -> (A + B) & M
3040 and for - instead of + and/or ^ instead of |.
3041 Also, if (N & M) == 0, then
3042 (A +- N) & M -> A & M. */
3043 if (CONST_INT_P (trueop1)
3044 && HWI_COMPUTABLE_MODE_P (mode)
3045 && ~UINTVAL (trueop1)
3046 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3047 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3049 rtx pmop[2];
3050 int which;
3052 pmop[0] = XEXP (op0, 0);
3053 pmop[1] = XEXP (op0, 1);
3055 if (CONST_INT_P (pmop[1])
3056 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3057 return simplify_gen_binary (AND, mode, pmop[0], op1);
3059 for (which = 0; which < 2; which++)
3061 tem = pmop[which];
3062 switch (GET_CODE (tem))
3064 case AND:
3065 if (CONST_INT_P (XEXP (tem, 1))
3066 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3067 == UINTVAL (trueop1))
3068 pmop[which] = XEXP (tem, 0);
3069 break;
3070 case IOR:
3071 case XOR:
3072 if (CONST_INT_P (XEXP (tem, 1))
3073 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3074 pmop[which] = XEXP (tem, 0);
3075 break;
3076 default:
3077 break;
3081 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3083 tem = simplify_gen_binary (GET_CODE (op0), mode,
3084 pmop[0], pmop[1]);
3085 return simplify_gen_binary (code, mode, tem, op1);
3089 /* (and X (ior (not X) Y) -> (and X Y) */
3090 if (GET_CODE (op1) == IOR
3091 && GET_CODE (XEXP (op1, 0)) == NOT
3092 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3093 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3095 /* (and (ior (not X) Y) X) -> (and X Y) */
3096 if (GET_CODE (op0) == IOR
3097 && GET_CODE (XEXP (op0, 0)) == NOT
3098 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3099 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3101 /* (and X (ior Y (not X)) -> (and X Y) */
3102 if (GET_CODE (op1) == IOR
3103 && GET_CODE (XEXP (op1, 1)) == NOT
3104 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3105 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3107 /* (and (ior Y (not X)) X) -> (and X Y) */
3108 if (GET_CODE (op0) == IOR
3109 && GET_CODE (XEXP (op0, 1)) == NOT
3110 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3111 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3113 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3114 if (tem)
3115 return tem;
3117 tem = simplify_associative_operation (code, mode, op0, op1);
3118 if (tem)
3119 return tem;
3120 break;
3122 case UDIV:
3123 /* 0/x is 0 (or x&0 if x has side-effects). */
3124 if (trueop0 == CONST0_RTX (mode))
3126 if (side_effects_p (op1))
3127 return simplify_gen_binary (AND, mode, op1, trueop0);
3128 return trueop0;
3130 /* x/1 is x. */
3131 if (trueop1 == CONST1_RTX (mode))
3133 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3134 if (tem)
3135 return tem;
3137 /* Convert divide by power of two into shift. */
3138 if (CONST_INT_P (trueop1)
3139 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3140 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3141 break;
3143 case DIV:
3144 /* Handle floating point and integers separately. */
3145 if (SCALAR_FLOAT_MODE_P (mode))
3147 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3148 safe for modes with NaNs, since 0.0 / 0.0 will then be
3149 NaN rather than 0.0. Nor is it safe for modes with signed
3150 zeros, since dividing 0 by a negative number gives -0.0 */
3151 if (trueop0 == CONST0_RTX (mode)
3152 && !HONOR_NANS (mode)
3153 && !HONOR_SIGNED_ZEROS (mode)
3154 && ! side_effects_p (op1))
3155 return op0;
3156 /* x/1.0 is x. */
3157 if (trueop1 == CONST1_RTX (mode)
3158 && !HONOR_SNANS (mode))
3159 return op0;
3161 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3162 && trueop1 != CONST0_RTX (mode))
3164 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3166 /* x/-1.0 is -x. */
3167 if (real_equal (d1, &dconstm1)
3168 && !HONOR_SNANS (mode))
3169 return simplify_gen_unary (NEG, mode, op0, mode);
3171 /* Change FP division by a constant into multiplication.
3172 Only do this with -freciprocal-math. */
3173 if (flag_reciprocal_math
3174 && !real_equal (d1, &dconst0))
3176 REAL_VALUE_TYPE d;
3177 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3178 tem = const_double_from_real_value (d, mode);
3179 return simplify_gen_binary (MULT, mode, op0, tem);
3183 else if (SCALAR_INT_MODE_P (mode))
3185 /* 0/x is 0 (or x&0 if x has side-effects). */
3186 if (trueop0 == CONST0_RTX (mode)
3187 && !cfun->can_throw_non_call_exceptions)
3189 if (side_effects_p (op1))
3190 return simplify_gen_binary (AND, mode, op1, trueop0);
3191 return trueop0;
3193 /* x/1 is x. */
3194 if (trueop1 == CONST1_RTX (mode))
3196 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3197 if (tem)
3198 return tem;
3200 /* x/-1 is -x. */
3201 if (trueop1 == constm1_rtx)
3203 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3204 if (x)
3205 return simplify_gen_unary (NEG, mode, x, mode);
3208 break;
3210 case UMOD:
3211 /* 0%x is 0 (or x&0 if x has side-effects). */
3212 if (trueop0 == CONST0_RTX (mode))
3214 if (side_effects_p (op1))
3215 return simplify_gen_binary (AND, mode, op1, trueop0);
3216 return trueop0;
3218 /* x%1 is 0 (of x&0 if x has side-effects). */
3219 if (trueop1 == CONST1_RTX (mode))
3221 if (side_effects_p (op0))
3222 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3223 return CONST0_RTX (mode);
3225 /* Implement modulus by power of two as AND. */
3226 if (CONST_INT_P (trueop1)
3227 && exact_log2 (UINTVAL (trueop1)) > 0)
3228 return simplify_gen_binary (AND, mode, op0,
3229 gen_int_mode (INTVAL (op1) - 1, mode));
3230 break;
3232 case MOD:
3233 /* 0%x is 0 (or x&0 if x has side-effects). */
3234 if (trueop0 == CONST0_RTX (mode))
3236 if (side_effects_p (op1))
3237 return simplify_gen_binary (AND, mode, op1, trueop0);
3238 return trueop0;
3240 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3241 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3243 if (side_effects_p (op0))
3244 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3245 return CONST0_RTX (mode);
3247 break;
3249 case ROTATERT:
3250 case ROTATE:
3251 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3252 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3253 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3254 amount instead. */
3255 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3256 if (CONST_INT_P (trueop1)
3257 && IN_RANGE (INTVAL (trueop1),
3258 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3259 GET_MODE_PRECISION (mode) - 1))
3260 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3261 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3262 - INTVAL (trueop1)));
3263 #endif
3264 /* FALLTHRU */
3265 case ASHIFTRT:
3266 if (trueop1 == CONST0_RTX (mode))
3267 return op0;
3268 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3269 return op0;
3270 /* Rotating ~0 always results in ~0. */
3271 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3272 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3273 && ! side_effects_p (op1))
3274 return op0;
3275 /* Given:
3276 scalar modes M1, M2
3277 scalar constants c1, c2
3278 size (M2) > size (M1)
3279 c1 == size (M2) - size (M1)
3280 optimize:
3281 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3282 <low_part>)
3283 (const_int <c2>))
3285 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3286 <low_part>). */
3287 if (code == ASHIFTRT
3288 && !VECTOR_MODE_P (mode)
3289 && SUBREG_P (op0)
3290 && CONST_INT_P (op1)
3291 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3292 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3293 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3294 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3295 > GET_MODE_BITSIZE (mode))
3296 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3297 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3298 - GET_MODE_BITSIZE (mode)))
3299 && subreg_lowpart_p (op0))
3301 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3302 + INTVAL (op1));
3303 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3304 tmp = simplify_gen_binary (ASHIFTRT,
3305 GET_MODE (SUBREG_REG (op0)),
3306 XEXP (SUBREG_REG (op0), 0),
3307 tmp);
3308 return lowpart_subreg (mode, tmp, inner_mode);
3310 canonicalize_shift:
3311 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3313 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3314 if (val != INTVAL (op1))
3315 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3317 break;
3319 case ASHIFT:
3320 case SS_ASHIFT:
3321 case US_ASHIFT:
3322 if (trueop1 == CONST0_RTX (mode))
3323 return op0;
3324 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3325 return op0;
3326 goto canonicalize_shift;
3328 case LSHIFTRT:
3329 if (trueop1 == CONST0_RTX (mode))
3330 return op0;
3331 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3332 return op0;
3333 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3334 if (GET_CODE (op0) == CLZ
3335 && CONST_INT_P (trueop1)
3336 && STORE_FLAG_VALUE == 1
3337 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3339 machine_mode imode = GET_MODE (XEXP (op0, 0));
3340 unsigned HOST_WIDE_INT zero_val = 0;
3342 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3343 && zero_val == GET_MODE_PRECISION (imode)
3344 && INTVAL (trueop1) == exact_log2 (zero_val))
3345 return simplify_gen_relational (EQ, mode, imode,
3346 XEXP (op0, 0), const0_rtx);
3348 goto canonicalize_shift;
3350 case SMIN:
3351 if (width <= HOST_BITS_PER_WIDE_INT
3352 && mode_signbit_p (mode, trueop1)
3353 && ! side_effects_p (op0))
3354 return op1;
3355 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3356 return op0;
3357 tem = simplify_associative_operation (code, mode, op0, op1);
3358 if (tem)
3359 return tem;
3360 break;
3362 case SMAX:
3363 if (width <= HOST_BITS_PER_WIDE_INT
3364 && CONST_INT_P (trueop1)
3365 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3366 && ! side_effects_p (op0))
3367 return op1;
3368 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3369 return op0;
3370 tem = simplify_associative_operation (code, mode, op0, op1);
3371 if (tem)
3372 return tem;
3373 break;
3375 case UMIN:
3376 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3377 return op1;
3378 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3379 return op0;
3380 tem = simplify_associative_operation (code, mode, op0, op1);
3381 if (tem)
3382 return tem;
3383 break;
3385 case UMAX:
3386 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3387 return op1;
3388 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3389 return op0;
3390 tem = simplify_associative_operation (code, mode, op0, op1);
3391 if (tem)
3392 return tem;
3393 break;
3395 case SS_PLUS:
3396 case US_PLUS:
3397 case SS_MINUS:
3398 case US_MINUS:
3399 case SS_MULT:
3400 case US_MULT:
3401 case SS_DIV:
3402 case US_DIV:
3403 /* ??? There are simplifications that can be done. */
3404 return 0;
3406 case VEC_SELECT:
3407 if (!VECTOR_MODE_P (mode))
3409 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3410 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3411 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3412 gcc_assert (XVECLEN (trueop1, 0) == 1);
3413 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3415 if (GET_CODE (trueop0) == CONST_VECTOR)
3416 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3417 (trueop1, 0, 0)));
3419 /* Extract a scalar element from a nested VEC_SELECT expression
3420 (with optional nested VEC_CONCAT expression). Some targets
3421 (i386) extract scalar element from a vector using chain of
3422 nested VEC_SELECT expressions. When input operand is a memory
3423 operand, this operation can be simplified to a simple scalar
3424 load from an offseted memory address. */
3425 if (GET_CODE (trueop0) == VEC_SELECT)
3427 rtx op0 = XEXP (trueop0, 0);
3428 rtx op1 = XEXP (trueop0, 1);
3430 machine_mode opmode = GET_MODE (op0);
3431 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3432 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3434 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3435 int elem;
3437 rtvec vec;
3438 rtx tmp_op, tmp;
3440 gcc_assert (GET_CODE (op1) == PARALLEL);
3441 gcc_assert (i < n_elts);
3443 /* Select element, pointed by nested selector. */
3444 elem = INTVAL (XVECEXP (op1, 0, i));
3446 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3447 if (GET_CODE (op0) == VEC_CONCAT)
3449 rtx op00 = XEXP (op0, 0);
3450 rtx op01 = XEXP (op0, 1);
3452 machine_mode mode00, mode01;
3453 int n_elts00, n_elts01;
3455 mode00 = GET_MODE (op00);
3456 mode01 = GET_MODE (op01);
3458 /* Find out number of elements of each operand. */
3459 if (VECTOR_MODE_P (mode00))
3461 elt_size = GET_MODE_UNIT_SIZE (mode00);
3462 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3464 else
3465 n_elts00 = 1;
3467 if (VECTOR_MODE_P (mode01))
3469 elt_size = GET_MODE_UNIT_SIZE (mode01);
3470 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3472 else
3473 n_elts01 = 1;
3475 gcc_assert (n_elts == n_elts00 + n_elts01);
3477 /* Select correct operand of VEC_CONCAT
3478 and adjust selector. */
3479 if (elem < n_elts01)
3480 tmp_op = op00;
3481 else
3483 tmp_op = op01;
3484 elem -= n_elts00;
3487 else
3488 tmp_op = op0;
3490 vec = rtvec_alloc (1);
3491 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3493 tmp = gen_rtx_fmt_ee (code, mode,
3494 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3495 return tmp;
3497 if (GET_CODE (trueop0) == VEC_DUPLICATE
3498 && GET_MODE (XEXP (trueop0, 0)) == mode)
3499 return XEXP (trueop0, 0);
3501 else
3503 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3504 gcc_assert (GET_MODE_INNER (mode)
3505 == GET_MODE_INNER (GET_MODE (trueop0)));
3506 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3508 if (GET_CODE (trueop0) == CONST_VECTOR)
3510 int elt_size = GET_MODE_UNIT_SIZE (mode);
3511 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3512 rtvec v = rtvec_alloc (n_elts);
3513 unsigned int i;
3515 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3516 for (i = 0; i < n_elts; i++)
3518 rtx x = XVECEXP (trueop1, 0, i);
3520 gcc_assert (CONST_INT_P (x));
3521 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3522 INTVAL (x));
3525 return gen_rtx_CONST_VECTOR (mode, v);
3528 /* Recognize the identity. */
3529 if (GET_MODE (trueop0) == mode)
3531 bool maybe_ident = true;
3532 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3534 rtx j = XVECEXP (trueop1, 0, i);
3535 if (!CONST_INT_P (j) || INTVAL (j) != i)
3537 maybe_ident = false;
3538 break;
3541 if (maybe_ident)
3542 return trueop0;
3545 /* If we build {a,b} then permute it, build the result directly. */
3546 if (XVECLEN (trueop1, 0) == 2
3547 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3548 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3549 && GET_CODE (trueop0) == VEC_CONCAT
3550 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3551 && GET_MODE (XEXP (trueop0, 0)) == mode
3552 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3553 && GET_MODE (XEXP (trueop0, 1)) == mode)
3555 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3556 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3557 rtx subop0, subop1;
3559 gcc_assert (i0 < 4 && i1 < 4);
3560 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3561 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3563 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3566 if (XVECLEN (trueop1, 0) == 2
3567 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3568 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3569 && GET_CODE (trueop0) == VEC_CONCAT
3570 && GET_MODE (trueop0) == mode)
3572 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3573 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3574 rtx subop0, subop1;
3576 gcc_assert (i0 < 2 && i1 < 2);
3577 subop0 = XEXP (trueop0, i0);
3578 subop1 = XEXP (trueop0, i1);
3580 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3583 /* If we select one half of a vec_concat, return that. */
3584 if (GET_CODE (trueop0) == VEC_CONCAT
3585 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3587 rtx subop0 = XEXP (trueop0, 0);
3588 rtx subop1 = XEXP (trueop0, 1);
3589 machine_mode mode0 = GET_MODE (subop0);
3590 machine_mode mode1 = GET_MODE (subop1);
3591 int li = GET_MODE_UNIT_SIZE (mode0);
3592 int l0 = GET_MODE_SIZE (mode0) / li;
3593 int l1 = GET_MODE_SIZE (mode1) / li;
3594 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3595 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3597 bool success = true;
3598 for (int i = 1; i < l0; ++i)
3600 rtx j = XVECEXP (trueop1, 0, i);
3601 if (!CONST_INT_P (j) || INTVAL (j) != i)
3603 success = false;
3604 break;
3607 if (success)
3608 return subop0;
3610 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3612 bool success = true;
3613 for (int i = 1; i < l1; ++i)
3615 rtx j = XVECEXP (trueop1, 0, i);
3616 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3618 success = false;
3619 break;
3622 if (success)
3623 return subop1;
3628 if (XVECLEN (trueop1, 0) == 1
3629 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3630 && GET_CODE (trueop0) == VEC_CONCAT)
3632 rtx vec = trueop0;
3633 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3635 /* Try to find the element in the VEC_CONCAT. */
3636 while (GET_MODE (vec) != mode
3637 && GET_CODE (vec) == VEC_CONCAT)
3639 HOST_WIDE_INT vec_size;
3641 if (CONST_INT_P (XEXP (vec, 0)))
3643 /* vec_concat of two const_ints doesn't make sense with
3644 respect to modes. */
3645 if (CONST_INT_P (XEXP (vec, 1)))
3646 return 0;
3648 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3649 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3651 else
3652 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3654 if (offset < vec_size)
3655 vec = XEXP (vec, 0);
3656 else
3658 offset -= vec_size;
3659 vec = XEXP (vec, 1);
3661 vec = avoid_constant_pool_reference (vec);
3664 if (GET_MODE (vec) == mode)
3665 return vec;
3668 /* If we select elements in a vec_merge that all come from the same
3669 operand, select from that operand directly. */
3670 if (GET_CODE (op0) == VEC_MERGE)
3672 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3673 if (CONST_INT_P (trueop02))
3675 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3676 bool all_operand0 = true;
3677 bool all_operand1 = true;
3678 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3680 rtx j = XVECEXP (trueop1, 0, i);
3681 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3682 all_operand1 = false;
3683 else
3684 all_operand0 = false;
3686 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3687 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3688 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3689 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3693 /* If we have two nested selects that are inverses of each
3694 other, replace them with the source operand. */
3695 if (GET_CODE (trueop0) == VEC_SELECT
3696 && GET_MODE (XEXP (trueop0, 0)) == mode)
3698 rtx op0_subop1 = XEXP (trueop0, 1);
3699 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3700 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3702 /* Apply the outer ordering vector to the inner one. (The inner
3703 ordering vector is expressly permitted to be of a different
3704 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3705 then the two VEC_SELECTs cancel. */
3706 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3708 rtx x = XVECEXP (trueop1, 0, i);
3709 if (!CONST_INT_P (x))
3710 return 0;
3711 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3712 if (!CONST_INT_P (y) || i != INTVAL (y))
3713 return 0;
3715 return XEXP (trueop0, 0);
3718 return 0;
3719 case VEC_CONCAT:
3721 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3722 ? GET_MODE (trueop0)
3723 : GET_MODE_INNER (mode));
3724 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3725 ? GET_MODE (trueop1)
3726 : GET_MODE_INNER (mode));
3728 gcc_assert (VECTOR_MODE_P (mode));
3729 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3730 == GET_MODE_SIZE (mode));
3732 if (VECTOR_MODE_P (op0_mode))
3733 gcc_assert (GET_MODE_INNER (mode)
3734 == GET_MODE_INNER (op0_mode));
3735 else
3736 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3738 if (VECTOR_MODE_P (op1_mode))
3739 gcc_assert (GET_MODE_INNER (mode)
3740 == GET_MODE_INNER (op1_mode));
3741 else
3742 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3744 if ((GET_CODE (trueop0) == CONST_VECTOR
3745 || CONST_SCALAR_INT_P (trueop0)
3746 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3747 && (GET_CODE (trueop1) == CONST_VECTOR
3748 || CONST_SCALAR_INT_P (trueop1)
3749 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3751 int elt_size = GET_MODE_UNIT_SIZE (mode);
3752 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3753 rtvec v = rtvec_alloc (n_elts);
3754 unsigned int i;
3755 unsigned in_n_elts = 1;
3757 if (VECTOR_MODE_P (op0_mode))
3758 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3759 for (i = 0; i < n_elts; i++)
3761 if (i < in_n_elts)
3763 if (!VECTOR_MODE_P (op0_mode))
3764 RTVEC_ELT (v, i) = trueop0;
3765 else
3766 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3768 else
3770 if (!VECTOR_MODE_P (op1_mode))
3771 RTVEC_ELT (v, i) = trueop1;
3772 else
3773 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3774 i - in_n_elts);
3778 return gen_rtx_CONST_VECTOR (mode, v);
3781 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3782 Restrict the transformation to avoid generating a VEC_SELECT with a
3783 mode unrelated to its operand. */
3784 if (GET_CODE (trueop0) == VEC_SELECT
3785 && GET_CODE (trueop1) == VEC_SELECT
3786 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3787 && GET_MODE (XEXP (trueop0, 0)) == mode)
3789 rtx par0 = XEXP (trueop0, 1);
3790 rtx par1 = XEXP (trueop1, 1);
3791 int len0 = XVECLEN (par0, 0);
3792 int len1 = XVECLEN (par1, 0);
3793 rtvec vec = rtvec_alloc (len0 + len1);
3794 for (int i = 0; i < len0; i++)
3795 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3796 for (int i = 0; i < len1; i++)
3797 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3798 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3799 gen_rtx_PARALLEL (VOIDmode, vec));
3802 return 0;
3804 default:
3805 gcc_unreachable ();
3808 return 0;
3812 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3813 rtx op0, rtx op1)
3815 unsigned int width = GET_MODE_PRECISION (mode);
3817 if (VECTOR_MODE_P (mode)
3818 && code != VEC_CONCAT
3819 && GET_CODE (op0) == CONST_VECTOR
3820 && GET_CODE (op1) == CONST_VECTOR)
3822 unsigned n_elts = GET_MODE_NUNITS (mode);
3823 machine_mode op0mode = GET_MODE (op0);
3824 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3825 machine_mode op1mode = GET_MODE (op1);
3826 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3827 rtvec v = rtvec_alloc (n_elts);
3828 unsigned int i;
3830 gcc_assert (op0_n_elts == n_elts);
3831 gcc_assert (op1_n_elts == n_elts);
3832 for (i = 0; i < n_elts; i++)
3834 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3835 CONST_VECTOR_ELT (op0, i),
3836 CONST_VECTOR_ELT (op1, i));
3837 if (!x)
3838 return 0;
3839 RTVEC_ELT (v, i) = x;
3842 return gen_rtx_CONST_VECTOR (mode, v);
3845 if (VECTOR_MODE_P (mode)
3846 && code == VEC_CONCAT
3847 && (CONST_SCALAR_INT_P (op0)
3848 || GET_CODE (op0) == CONST_FIXED
3849 || CONST_DOUBLE_AS_FLOAT_P (op0))
3850 && (CONST_SCALAR_INT_P (op1)
3851 || CONST_DOUBLE_AS_FLOAT_P (op1)
3852 || GET_CODE (op1) == CONST_FIXED))
3854 unsigned n_elts = GET_MODE_NUNITS (mode);
3855 rtvec v = rtvec_alloc (n_elts);
3857 gcc_assert (n_elts >= 2);
3858 if (n_elts == 2)
3860 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3861 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3863 RTVEC_ELT (v, 0) = op0;
3864 RTVEC_ELT (v, 1) = op1;
3866 else
3868 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3869 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3870 unsigned i;
3872 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3873 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3874 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3876 for (i = 0; i < op0_n_elts; ++i)
3877 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3878 for (i = 0; i < op1_n_elts; ++i)
3879 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3882 return gen_rtx_CONST_VECTOR (mode, v);
3885 if (SCALAR_FLOAT_MODE_P (mode)
3886 && CONST_DOUBLE_AS_FLOAT_P (op0)
3887 && CONST_DOUBLE_AS_FLOAT_P (op1)
3888 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3890 if (code == AND
3891 || code == IOR
3892 || code == XOR)
3894 long tmp0[4];
3895 long tmp1[4];
3896 REAL_VALUE_TYPE r;
3897 int i;
3899 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3900 GET_MODE (op0));
3901 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3902 GET_MODE (op1));
3903 for (i = 0; i < 4; i++)
3905 switch (code)
3907 case AND:
3908 tmp0[i] &= tmp1[i];
3909 break;
3910 case IOR:
3911 tmp0[i] |= tmp1[i];
3912 break;
3913 case XOR:
3914 tmp0[i] ^= tmp1[i];
3915 break;
3916 default:
3917 gcc_unreachable ();
3920 real_from_target (&r, tmp0, mode);
3921 return const_double_from_real_value (r, mode);
3923 else
3925 REAL_VALUE_TYPE f0, f1, value, result;
3926 const REAL_VALUE_TYPE *opr0, *opr1;
3927 bool inexact;
3929 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
3930 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
3932 if (HONOR_SNANS (mode)
3933 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
3934 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
3935 return 0;
3937 real_convert (&f0, mode, opr0);
3938 real_convert (&f1, mode, opr1);
3940 if (code == DIV
3941 && real_equal (&f1, &dconst0)
3942 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3943 return 0;
3945 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3946 && flag_trapping_math
3947 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3949 int s0 = REAL_VALUE_NEGATIVE (f0);
3950 int s1 = REAL_VALUE_NEGATIVE (f1);
3952 switch (code)
3954 case PLUS:
3955 /* Inf + -Inf = NaN plus exception. */
3956 if (s0 != s1)
3957 return 0;
3958 break;
3959 case MINUS:
3960 /* Inf - Inf = NaN plus exception. */
3961 if (s0 == s1)
3962 return 0;
3963 break;
3964 case DIV:
3965 /* Inf / Inf = NaN plus exception. */
3966 return 0;
3967 default:
3968 break;
3972 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3973 && flag_trapping_math
3974 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
3975 || (REAL_VALUE_ISINF (f1)
3976 && real_equal (&f0, &dconst0))))
3977 /* Inf * 0 = NaN plus exception. */
3978 return 0;
3980 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3981 &f0, &f1);
3982 real_convert (&result, mode, &value);
3984 /* Don't constant fold this floating point operation if
3985 the result has overflowed and flag_trapping_math. */
3987 if (flag_trapping_math
3988 && MODE_HAS_INFINITIES (mode)
3989 && REAL_VALUE_ISINF (result)
3990 && !REAL_VALUE_ISINF (f0)
3991 && !REAL_VALUE_ISINF (f1))
3992 /* Overflow plus exception. */
3993 return 0;
3995 /* Don't constant fold this floating point operation if the
3996 result may dependent upon the run-time rounding mode and
3997 flag_rounding_math is set, or if GCC's software emulation
3998 is unable to accurately represent the result. */
4000 if ((flag_rounding_math
4001 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4002 && (inexact || !real_identical (&result, &value)))
4003 return NULL_RTX;
4005 return const_double_from_real_value (result, mode);
4009 /* We can fold some multi-word operations. */
4010 if ((GET_MODE_CLASS (mode) == MODE_INT
4011 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
4012 && CONST_SCALAR_INT_P (op0)
4013 && CONST_SCALAR_INT_P (op1))
4015 wide_int result;
4016 bool overflow;
4017 rtx_mode_t pop0 = std::make_pair (op0, mode);
4018 rtx_mode_t pop1 = std::make_pair (op1, mode);
4020 #if TARGET_SUPPORTS_WIDE_INT == 0
4021 /* This assert keeps the simplification from producing a result
4022 that cannot be represented in a CONST_DOUBLE but a lot of
4023 upstream callers expect that this function never fails to
4024 simplify something and so you if you added this to the test
4025 above the code would die later anyway. If this assert
4026 happens, you just need to make the port support wide int. */
4027 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
4028 #endif
4029 switch (code)
4031 case MINUS:
4032 result = wi::sub (pop0, pop1);
4033 break;
4035 case PLUS:
4036 result = wi::add (pop0, pop1);
4037 break;
4039 case MULT:
4040 result = wi::mul (pop0, pop1);
4041 break;
4043 case DIV:
4044 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4045 if (overflow)
4046 return NULL_RTX;
4047 break;
4049 case MOD:
4050 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4051 if (overflow)
4052 return NULL_RTX;
4053 break;
4055 case UDIV:
4056 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4057 if (overflow)
4058 return NULL_RTX;
4059 break;
4061 case UMOD:
4062 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4063 if (overflow)
4064 return NULL_RTX;
4065 break;
4067 case AND:
4068 result = wi::bit_and (pop0, pop1);
4069 break;
4071 case IOR:
4072 result = wi::bit_or (pop0, pop1);
4073 break;
4075 case XOR:
4076 result = wi::bit_xor (pop0, pop1);
4077 break;
4079 case SMIN:
4080 result = wi::smin (pop0, pop1);
4081 break;
4083 case SMAX:
4084 result = wi::smax (pop0, pop1);
4085 break;
4087 case UMIN:
4088 result = wi::umin (pop0, pop1);
4089 break;
4091 case UMAX:
4092 result = wi::umax (pop0, pop1);
4093 break;
4095 case LSHIFTRT:
4096 case ASHIFTRT:
4097 case ASHIFT:
4099 wide_int wop1 = pop1;
4100 if (SHIFT_COUNT_TRUNCATED)
4101 wop1 = wi::umod_trunc (wop1, width);
4102 else if (wi::geu_p (wop1, width))
4103 return NULL_RTX;
4105 switch (code)
4107 case LSHIFTRT:
4108 result = wi::lrshift (pop0, wop1);
4109 break;
4111 case ASHIFTRT:
4112 result = wi::arshift (pop0, wop1);
4113 break;
4115 case ASHIFT:
4116 result = wi::lshift (pop0, wop1);
4117 break;
4119 default:
4120 gcc_unreachable ();
4122 break;
4124 case ROTATE:
4125 case ROTATERT:
4127 if (wi::neg_p (pop1))
4128 return NULL_RTX;
4130 switch (code)
4132 case ROTATE:
4133 result = wi::lrotate (pop0, pop1);
4134 break;
4136 case ROTATERT:
4137 result = wi::rrotate (pop0, pop1);
4138 break;
4140 default:
4141 gcc_unreachable ();
4143 break;
4145 default:
4146 return NULL_RTX;
4148 return immed_wide_int_const (result, mode);
4151 return NULL_RTX;
4156 /* Return a positive integer if X should sort after Y. The value
4157 returned is 1 if and only if X and Y are both regs. */
4159 static int
4160 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4162 int result;
4164 result = (commutative_operand_precedence (y)
4165 - commutative_operand_precedence (x));
4166 if (result)
4167 return result + result;
4169 /* Group together equal REGs to do more simplification. */
4170 if (REG_P (x) && REG_P (y))
4171 return REGNO (x) > REGNO (y);
4173 return 0;
4176 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4177 operands may be another PLUS or MINUS.
4179 Rather than test for specific case, we do this by a brute-force method
4180 and do all possible simplifications until no more changes occur. Then
4181 we rebuild the operation.
4183 May return NULL_RTX when no changes were made. */
4185 static rtx
4186 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4187 rtx op1)
4189 struct simplify_plus_minus_op_data
4191 rtx op;
4192 short neg;
4193 } ops[16];
4194 rtx result, tem;
4195 int n_ops = 2;
4196 int changed, n_constants, canonicalized = 0;
4197 int i, j;
4199 memset (ops, 0, sizeof ops);
4201 /* Set up the two operands and then expand them until nothing has been
4202 changed. If we run out of room in our array, give up; this should
4203 almost never happen. */
4205 ops[0].op = op0;
4206 ops[0].neg = 0;
4207 ops[1].op = op1;
4208 ops[1].neg = (code == MINUS);
4212 changed = 0;
4213 n_constants = 0;
4215 for (i = 0; i < n_ops; i++)
4217 rtx this_op = ops[i].op;
4218 int this_neg = ops[i].neg;
4219 enum rtx_code this_code = GET_CODE (this_op);
4221 switch (this_code)
4223 case PLUS:
4224 case MINUS:
4225 if (n_ops == ARRAY_SIZE (ops))
4226 return NULL_RTX;
4228 ops[n_ops].op = XEXP (this_op, 1);
4229 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4230 n_ops++;
4232 ops[i].op = XEXP (this_op, 0);
4233 changed = 1;
4234 /* If this operand was negated then we will potentially
4235 canonicalize the expression. Similarly if we don't
4236 place the operands adjacent we're re-ordering the
4237 expression and thus might be performing a
4238 canonicalization. Ignore register re-ordering.
4239 ??? It might be better to shuffle the ops array here,
4240 but then (plus (plus (A, B), plus (C, D))) wouldn't
4241 be seen as non-canonical. */
4242 if (this_neg
4243 || (i != n_ops - 2
4244 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4245 canonicalized = 1;
4246 break;
4248 case NEG:
4249 ops[i].op = XEXP (this_op, 0);
4250 ops[i].neg = ! this_neg;
4251 changed = 1;
4252 canonicalized = 1;
4253 break;
4255 case CONST:
4256 if (n_ops != ARRAY_SIZE (ops)
4257 && GET_CODE (XEXP (this_op, 0)) == PLUS
4258 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4259 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4261 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4262 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4263 ops[n_ops].neg = this_neg;
4264 n_ops++;
4265 changed = 1;
4266 canonicalized = 1;
4268 break;
4270 case NOT:
4271 /* ~a -> (-a - 1) */
4272 if (n_ops != ARRAY_SIZE (ops))
4274 ops[n_ops].op = CONSTM1_RTX (mode);
4275 ops[n_ops++].neg = this_neg;
4276 ops[i].op = XEXP (this_op, 0);
4277 ops[i].neg = !this_neg;
4278 changed = 1;
4279 canonicalized = 1;
4281 break;
4283 case CONST_INT:
4284 n_constants++;
4285 if (this_neg)
4287 ops[i].op = neg_const_int (mode, this_op);
4288 ops[i].neg = 0;
4289 changed = 1;
4290 canonicalized = 1;
4292 break;
4294 default:
4295 break;
4299 while (changed);
4301 if (n_constants > 1)
4302 canonicalized = 1;
4304 gcc_assert (n_ops >= 2);
4306 /* If we only have two operands, we can avoid the loops. */
4307 if (n_ops == 2)
4309 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4310 rtx lhs, rhs;
4312 /* Get the two operands. Be careful with the order, especially for
4313 the cases where code == MINUS. */
4314 if (ops[0].neg && ops[1].neg)
4316 lhs = gen_rtx_NEG (mode, ops[0].op);
4317 rhs = ops[1].op;
4319 else if (ops[0].neg)
4321 lhs = ops[1].op;
4322 rhs = ops[0].op;
4324 else
4326 lhs = ops[0].op;
4327 rhs = ops[1].op;
4330 return simplify_const_binary_operation (code, mode, lhs, rhs);
4333 /* Now simplify each pair of operands until nothing changes. */
4334 while (1)
4336 /* Insertion sort is good enough for a small array. */
4337 for (i = 1; i < n_ops; i++)
4339 struct simplify_plus_minus_op_data save;
4340 int cmp;
4342 j = i - 1;
4343 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4344 if (cmp <= 0)
4345 continue;
4346 /* Just swapping registers doesn't count as canonicalization. */
4347 if (cmp != 1)
4348 canonicalized = 1;
4350 save = ops[i];
4352 ops[j + 1] = ops[j];
4353 while (j--
4354 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4355 ops[j + 1] = save;
4358 changed = 0;
4359 for (i = n_ops - 1; i > 0; i--)
4360 for (j = i - 1; j >= 0; j--)
4362 rtx lhs = ops[j].op, rhs = ops[i].op;
4363 int lneg = ops[j].neg, rneg = ops[i].neg;
4365 if (lhs != 0 && rhs != 0)
4367 enum rtx_code ncode = PLUS;
4369 if (lneg != rneg)
4371 ncode = MINUS;
4372 if (lneg)
4373 std::swap (lhs, rhs);
4375 else if (swap_commutative_operands_p (lhs, rhs))
4376 std::swap (lhs, rhs);
4378 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4379 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4381 rtx tem_lhs, tem_rhs;
4383 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4384 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4385 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4386 tem_rhs);
4388 if (tem && !CONSTANT_P (tem))
4389 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4391 else
4392 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4394 if (tem)
4396 /* Reject "simplifications" that just wrap the two
4397 arguments in a CONST. Failure to do so can result
4398 in infinite recursion with simplify_binary_operation
4399 when it calls us to simplify CONST operations.
4400 Also, if we find such a simplification, don't try
4401 any more combinations with this rhs: We must have
4402 something like symbol+offset, ie. one of the
4403 trivial CONST expressions we handle later. */
4404 if (GET_CODE (tem) == CONST
4405 && GET_CODE (XEXP (tem, 0)) == ncode
4406 && XEXP (XEXP (tem, 0), 0) == lhs
4407 && XEXP (XEXP (tem, 0), 1) == rhs)
4408 break;
4409 lneg &= rneg;
4410 if (GET_CODE (tem) == NEG)
4411 tem = XEXP (tem, 0), lneg = !lneg;
4412 if (CONST_INT_P (tem) && lneg)
4413 tem = neg_const_int (mode, tem), lneg = 0;
4415 ops[i].op = tem;
4416 ops[i].neg = lneg;
4417 ops[j].op = NULL_RTX;
4418 changed = 1;
4419 canonicalized = 1;
4424 if (!changed)
4425 break;
4427 /* Pack all the operands to the lower-numbered entries. */
4428 for (i = 0, j = 0; j < n_ops; j++)
4429 if (ops[j].op)
4431 ops[i] = ops[j];
4432 i++;
4434 n_ops = i;
4437 /* If nothing changed, check that rematerialization of rtl instructions
4438 is still required. */
4439 if (!canonicalized)
4441 /* Perform rematerialization if only all operands are registers and
4442 all operations are PLUS. */
4443 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4444 around rs6000 and how it uses the CA register. See PR67145. */
4445 for (i = 0; i < n_ops; i++)
4446 if (ops[i].neg
4447 || !REG_P (ops[i].op)
4448 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4449 && fixed_regs[REGNO (ops[i].op)]
4450 && !global_regs[REGNO (ops[i].op)]
4451 && ops[i].op != frame_pointer_rtx
4452 && ops[i].op != arg_pointer_rtx
4453 && ops[i].op != stack_pointer_rtx))
4454 return NULL_RTX;
4455 goto gen_result;
4458 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4459 if (n_ops == 2
4460 && CONST_INT_P (ops[1].op)
4461 && CONSTANT_P (ops[0].op)
4462 && ops[0].neg)
4463 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4465 /* We suppressed creation of trivial CONST expressions in the
4466 combination loop to avoid recursion. Create one manually now.
4467 The combination loop should have ensured that there is exactly
4468 one CONST_INT, and the sort will have ensured that it is last
4469 in the array and that any other constant will be next-to-last. */
4471 if (n_ops > 1
4472 && CONST_INT_P (ops[n_ops - 1].op)
4473 && CONSTANT_P (ops[n_ops - 2].op))
4475 rtx value = ops[n_ops - 1].op;
4476 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4477 value = neg_const_int (mode, value);
4478 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4479 INTVAL (value));
4480 n_ops--;
4483 /* Put a non-negated operand first, if possible. */
4485 for (i = 0; i < n_ops && ops[i].neg; i++)
4486 continue;
4487 if (i == n_ops)
4488 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4489 else if (i != 0)
4491 tem = ops[0].op;
4492 ops[0] = ops[i];
4493 ops[i].op = tem;
4494 ops[i].neg = 1;
4497 /* Now make the result by performing the requested operations. */
4498 gen_result:
4499 result = ops[0].op;
4500 for (i = 1; i < n_ops; i++)
4501 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4502 mode, result, ops[i].op);
4504 return result;
4507 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4508 static bool
4509 plus_minus_operand_p (const_rtx x)
4511 return GET_CODE (x) == PLUS
4512 || GET_CODE (x) == MINUS
4513 || (GET_CODE (x) == CONST
4514 && GET_CODE (XEXP (x, 0)) == PLUS
4515 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4516 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4519 /* Like simplify_binary_operation except used for relational operators.
4520 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4521 not also be VOIDmode.
4523 CMP_MODE specifies in which mode the comparison is done in, so it is
4524 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4525 the operands or, if both are VOIDmode, the operands are compared in
4526 "infinite precision". */
4528 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4529 machine_mode cmp_mode, rtx op0, rtx op1)
4531 rtx tem, trueop0, trueop1;
4533 if (cmp_mode == VOIDmode)
4534 cmp_mode = GET_MODE (op0);
4535 if (cmp_mode == VOIDmode)
4536 cmp_mode = GET_MODE (op1);
4538 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4539 if (tem)
4541 if (SCALAR_FLOAT_MODE_P (mode))
4543 if (tem == const0_rtx)
4544 return CONST0_RTX (mode);
4545 #ifdef FLOAT_STORE_FLAG_VALUE
4547 REAL_VALUE_TYPE val;
4548 val = FLOAT_STORE_FLAG_VALUE (mode);
4549 return const_double_from_real_value (val, mode);
4551 #else
4552 return NULL_RTX;
4553 #endif
4555 if (VECTOR_MODE_P (mode))
4557 if (tem == const0_rtx)
4558 return CONST0_RTX (mode);
4559 #ifdef VECTOR_STORE_FLAG_VALUE
4561 int i, units;
4562 rtvec v;
4564 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4565 if (val == NULL_RTX)
4566 return NULL_RTX;
4567 if (val == const1_rtx)
4568 return CONST1_RTX (mode);
4570 units = GET_MODE_NUNITS (mode);
4571 v = rtvec_alloc (units);
4572 for (i = 0; i < units; i++)
4573 RTVEC_ELT (v, i) = val;
4574 return gen_rtx_raw_CONST_VECTOR (mode, v);
4576 #else
4577 return NULL_RTX;
4578 #endif
4581 return tem;
4584 /* For the following tests, ensure const0_rtx is op1. */
4585 if (swap_commutative_operands_p (op0, op1)
4586 || (op0 == const0_rtx && op1 != const0_rtx))
4587 std::swap (op0, op1), code = swap_condition (code);
4589 /* If op0 is a compare, extract the comparison arguments from it. */
4590 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4591 return simplify_gen_relational (code, mode, VOIDmode,
4592 XEXP (op0, 0), XEXP (op0, 1));
4594 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4595 || CC0_P (op0))
4596 return NULL_RTX;
4598 trueop0 = avoid_constant_pool_reference (op0);
4599 trueop1 = avoid_constant_pool_reference (op1);
4600 return simplify_relational_operation_1 (code, mode, cmp_mode,
4601 trueop0, trueop1);
4604 /* This part of simplify_relational_operation is only used when CMP_MODE
4605 is not in class MODE_CC (i.e. it is a real comparison).
4607 MODE is the mode of the result, while CMP_MODE specifies in which
4608 mode the comparison is done in, so it is the mode of the operands. */
4610 static rtx
4611 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4612 machine_mode cmp_mode, rtx op0, rtx op1)
4614 enum rtx_code op0code = GET_CODE (op0);
4616 if (op1 == const0_rtx && COMPARISON_P (op0))
4618 /* If op0 is a comparison, extract the comparison arguments
4619 from it. */
4620 if (code == NE)
4622 if (GET_MODE (op0) == mode)
4623 return simplify_rtx (op0);
4624 else
4625 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4626 XEXP (op0, 0), XEXP (op0, 1));
4628 else if (code == EQ)
4630 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4631 if (new_code != UNKNOWN)
4632 return simplify_gen_relational (new_code, mode, VOIDmode,
4633 XEXP (op0, 0), XEXP (op0, 1));
4637 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4638 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4639 if ((code == LTU || code == GEU)
4640 && GET_CODE (op0) == PLUS
4641 && CONST_INT_P (XEXP (op0, 1))
4642 && (rtx_equal_p (op1, XEXP (op0, 0))
4643 || rtx_equal_p (op1, XEXP (op0, 1)))
4644 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4645 && XEXP (op0, 1) != const0_rtx)
4647 rtx new_cmp
4648 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4649 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4650 cmp_mode, XEXP (op0, 0), new_cmp);
4653 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4654 transformed into (LTU a -C). */
4655 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4656 && CONST_INT_P (XEXP (op0, 1))
4657 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4658 && XEXP (op0, 1) != const0_rtx)
4660 rtx new_cmp
4661 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4662 return simplify_gen_relational (LTU, mode, cmp_mode,
4663 XEXP (op0, 0), new_cmp);
4666 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4667 if ((code == LTU || code == GEU)
4668 && GET_CODE (op0) == PLUS
4669 && rtx_equal_p (op1, XEXP (op0, 1))
4670 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4671 && !rtx_equal_p (op1, XEXP (op0, 0)))
4672 return simplify_gen_relational (code, mode, cmp_mode, op0,
4673 copy_rtx (XEXP (op0, 0)));
4675 if (op1 == const0_rtx)
4677 /* Canonicalize (GTU x 0) as (NE x 0). */
4678 if (code == GTU)
4679 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4680 /* Canonicalize (LEU x 0) as (EQ x 0). */
4681 if (code == LEU)
4682 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4684 else if (op1 == const1_rtx)
4686 switch (code)
4688 case GE:
4689 /* Canonicalize (GE x 1) as (GT x 0). */
4690 return simplify_gen_relational (GT, mode, cmp_mode,
4691 op0, const0_rtx);
4692 case GEU:
4693 /* Canonicalize (GEU x 1) as (NE x 0). */
4694 return simplify_gen_relational (NE, mode, cmp_mode,
4695 op0, const0_rtx);
4696 case LT:
4697 /* Canonicalize (LT x 1) as (LE x 0). */
4698 return simplify_gen_relational (LE, mode, cmp_mode,
4699 op0, const0_rtx);
4700 case LTU:
4701 /* Canonicalize (LTU x 1) as (EQ x 0). */
4702 return simplify_gen_relational (EQ, mode, cmp_mode,
4703 op0, const0_rtx);
4704 default:
4705 break;
4708 else if (op1 == constm1_rtx)
4710 /* Canonicalize (LE x -1) as (LT x 0). */
4711 if (code == LE)
4712 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4713 /* Canonicalize (GT x -1) as (GE x 0). */
4714 if (code == GT)
4715 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4718 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4719 if ((code == EQ || code == NE)
4720 && (op0code == PLUS || op0code == MINUS)
4721 && CONSTANT_P (op1)
4722 && CONSTANT_P (XEXP (op0, 1))
4723 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4725 rtx x = XEXP (op0, 0);
4726 rtx c = XEXP (op0, 1);
4727 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4728 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4730 /* Detect an infinite recursive condition, where we oscillate at this
4731 simplification case between:
4732 A + B == C <---> C - B == A,
4733 where A, B, and C are all constants with non-simplifiable expressions,
4734 usually SYMBOL_REFs. */
4735 if (GET_CODE (tem) == invcode
4736 && CONSTANT_P (x)
4737 && rtx_equal_p (c, XEXP (tem, 1)))
4738 return NULL_RTX;
4740 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4743 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4744 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4745 if (code == NE
4746 && op1 == const0_rtx
4747 && GET_MODE_CLASS (mode) == MODE_INT
4748 && cmp_mode != VOIDmode
4749 /* ??? Work-around BImode bugs in the ia64 backend. */
4750 && mode != BImode
4751 && cmp_mode != BImode
4752 && nonzero_bits (op0, cmp_mode) == 1
4753 && STORE_FLAG_VALUE == 1)
4754 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4755 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4756 : lowpart_subreg (mode, op0, cmp_mode);
4758 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4759 if ((code == EQ || code == NE)
4760 && op1 == const0_rtx
4761 && op0code == XOR)
4762 return simplify_gen_relational (code, mode, cmp_mode,
4763 XEXP (op0, 0), XEXP (op0, 1));
4765 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4766 if ((code == EQ || code == NE)
4767 && op0code == XOR
4768 && rtx_equal_p (XEXP (op0, 0), op1)
4769 && !side_effects_p (XEXP (op0, 0)))
4770 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4771 CONST0_RTX (mode));
4773 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4774 if ((code == EQ || code == NE)
4775 && op0code == XOR
4776 && rtx_equal_p (XEXP (op0, 1), op1)
4777 && !side_effects_p (XEXP (op0, 1)))
4778 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4779 CONST0_RTX (mode));
4781 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4782 if ((code == EQ || code == NE)
4783 && op0code == XOR
4784 && CONST_SCALAR_INT_P (op1)
4785 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4786 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4787 simplify_gen_binary (XOR, cmp_mode,
4788 XEXP (op0, 1), op1));
4790 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4791 can be implemented with a BICS instruction on some targets, or
4792 constant-folded if y is a constant. */
4793 if ((code == EQ || code == NE)
4794 && op0code == AND
4795 && rtx_equal_p (XEXP (op0, 0), op1)
4796 && !side_effects_p (op1)
4797 && op1 != CONST0_RTX (cmp_mode))
4799 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4800 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4802 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4803 CONST0_RTX (cmp_mode));
4806 /* Likewise for (eq/ne (and x y) y). */
4807 if ((code == EQ || code == NE)
4808 && op0code == AND
4809 && rtx_equal_p (XEXP (op0, 1), op1)
4810 && !side_effects_p (op1)
4811 && op1 != CONST0_RTX (cmp_mode))
4813 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4814 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4816 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4817 CONST0_RTX (cmp_mode));
4820 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4821 if ((code == EQ || code == NE)
4822 && GET_CODE (op0) == BSWAP
4823 && CONST_SCALAR_INT_P (op1))
4824 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4825 simplify_gen_unary (BSWAP, cmp_mode,
4826 op1, cmp_mode));
4828 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4829 if ((code == EQ || code == NE)
4830 && GET_CODE (op0) == BSWAP
4831 && GET_CODE (op1) == BSWAP)
4832 return simplify_gen_relational (code, mode, cmp_mode,
4833 XEXP (op0, 0), XEXP (op1, 0));
4835 if (op0code == POPCOUNT && op1 == const0_rtx)
4836 switch (code)
4838 case EQ:
4839 case LE:
4840 case LEU:
4841 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4842 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4843 XEXP (op0, 0), const0_rtx);
4845 case NE:
4846 case GT:
4847 case GTU:
4848 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4849 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4850 XEXP (op0, 0), const0_rtx);
4852 default:
4853 break;
4856 return NULL_RTX;
4859 enum
4861 CMP_EQ = 1,
4862 CMP_LT = 2,
4863 CMP_GT = 4,
4864 CMP_LTU = 8,
4865 CMP_GTU = 16
4869 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4870 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4871 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4872 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4873 For floating-point comparisons, assume that the operands were ordered. */
4875 static rtx
4876 comparison_result (enum rtx_code code, int known_results)
4878 switch (code)
4880 case EQ:
4881 case UNEQ:
4882 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4883 case NE:
4884 case LTGT:
4885 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4887 case LT:
4888 case UNLT:
4889 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4890 case GE:
4891 case UNGE:
4892 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4894 case GT:
4895 case UNGT:
4896 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4897 case LE:
4898 case UNLE:
4899 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4901 case LTU:
4902 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4903 case GEU:
4904 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4906 case GTU:
4907 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4908 case LEU:
4909 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4911 case ORDERED:
4912 return const_true_rtx;
4913 case UNORDERED:
4914 return const0_rtx;
4915 default:
4916 gcc_unreachable ();
4920 /* Check if the given comparison (done in the given MODE) is actually
4921 a tautology or a contradiction. If the mode is VOID_mode, the
4922 comparison is done in "infinite precision". If no simplification
4923 is possible, this function returns zero. Otherwise, it returns
4924 either const_true_rtx or const0_rtx. */
4927 simplify_const_relational_operation (enum rtx_code code,
4928 machine_mode mode,
4929 rtx op0, rtx op1)
4931 rtx tem;
4932 rtx trueop0;
4933 rtx trueop1;
4935 gcc_assert (mode != VOIDmode
4936 || (GET_MODE (op0) == VOIDmode
4937 && GET_MODE (op1) == VOIDmode));
4939 /* If op0 is a compare, extract the comparison arguments from it. */
4940 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4942 op1 = XEXP (op0, 1);
4943 op0 = XEXP (op0, 0);
4945 if (GET_MODE (op0) != VOIDmode)
4946 mode = GET_MODE (op0);
4947 else if (GET_MODE (op1) != VOIDmode)
4948 mode = GET_MODE (op1);
4949 else
4950 return 0;
4953 /* We can't simplify MODE_CC values since we don't know what the
4954 actual comparison is. */
4955 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4956 return 0;
4958 /* Make sure the constant is second. */
4959 if (swap_commutative_operands_p (op0, op1))
4961 std::swap (op0, op1);
4962 code = swap_condition (code);
4965 trueop0 = avoid_constant_pool_reference (op0);
4966 trueop1 = avoid_constant_pool_reference (op1);
4968 /* For integer comparisons of A and B maybe we can simplify A - B and can
4969 then simplify a comparison of that with zero. If A and B are both either
4970 a register or a CONST_INT, this can't help; testing for these cases will
4971 prevent infinite recursion here and speed things up.
4973 We can only do this for EQ and NE comparisons as otherwise we may
4974 lose or introduce overflow which we cannot disregard as undefined as
4975 we do not know the signedness of the operation on either the left or
4976 the right hand side of the comparison. */
4978 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4979 && (code == EQ || code == NE)
4980 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4981 && (REG_P (op1) || CONST_INT_P (trueop1)))
4982 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4983 /* We cannot do this if tem is a nonzero address. */
4984 && ! nonzero_address_p (tem))
4985 return simplify_const_relational_operation (signed_condition (code),
4986 mode, tem, const0_rtx);
4988 if (! HONOR_NANS (mode) && code == ORDERED)
4989 return const_true_rtx;
4991 if (! HONOR_NANS (mode) && code == UNORDERED)
4992 return const0_rtx;
4994 /* For modes without NaNs, if the two operands are equal, we know the
4995 result except if they have side-effects. Even with NaNs we know
4996 the result of unordered comparisons and, if signaling NaNs are
4997 irrelevant, also the result of LT/GT/LTGT. */
4998 if ((! HONOR_NANS (trueop0)
4999 || code == UNEQ || code == UNLE || code == UNGE
5000 || ((code == LT || code == GT || code == LTGT)
5001 && ! HONOR_SNANS (trueop0)))
5002 && rtx_equal_p (trueop0, trueop1)
5003 && ! side_effects_p (trueop0))
5004 return comparison_result (code, CMP_EQ);
5006 /* If the operands are floating-point constants, see if we can fold
5007 the result. */
5008 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5009 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5010 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5012 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5013 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5015 /* Comparisons are unordered iff at least one of the values is NaN. */
5016 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5017 switch (code)
5019 case UNEQ:
5020 case UNLT:
5021 case UNGT:
5022 case UNLE:
5023 case UNGE:
5024 case NE:
5025 case UNORDERED:
5026 return const_true_rtx;
5027 case EQ:
5028 case LT:
5029 case GT:
5030 case LE:
5031 case GE:
5032 case LTGT:
5033 case ORDERED:
5034 return const0_rtx;
5035 default:
5036 return 0;
5039 return comparison_result (code,
5040 (real_equal (d0, d1) ? CMP_EQ :
5041 real_less (d0, d1) ? CMP_LT : CMP_GT));
5044 /* Otherwise, see if the operands are both integers. */
5045 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5046 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5048 /* It would be nice if we really had a mode here. However, the
5049 largest int representable on the target is as good as
5050 infinite. */
5051 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5052 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
5053 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
5055 if (wi::eq_p (ptrueop0, ptrueop1))
5056 return comparison_result (code, CMP_EQ);
5057 else
5059 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5060 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5061 return comparison_result (code, cr);
5065 /* Optimize comparisons with upper and lower bounds. */
5066 if (HWI_COMPUTABLE_MODE_P (mode)
5067 && CONST_INT_P (trueop1)
5068 && !side_effects_p (trueop0))
5070 int sign;
5071 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5072 HOST_WIDE_INT val = INTVAL (trueop1);
5073 HOST_WIDE_INT mmin, mmax;
5075 if (code == GEU
5076 || code == LEU
5077 || code == GTU
5078 || code == LTU)
5079 sign = 0;
5080 else
5081 sign = 1;
5083 /* Get a reduced range if the sign bit is zero. */
5084 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5086 mmin = 0;
5087 mmax = nonzero;
5089 else
5091 rtx mmin_rtx, mmax_rtx;
5092 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5094 mmin = INTVAL (mmin_rtx);
5095 mmax = INTVAL (mmax_rtx);
5096 if (sign)
5098 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5100 mmin >>= (sign_copies - 1);
5101 mmax >>= (sign_copies - 1);
5105 switch (code)
5107 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5108 case GEU:
5109 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5110 return const_true_rtx;
5111 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5112 return const0_rtx;
5113 break;
5114 case GE:
5115 if (val <= mmin)
5116 return const_true_rtx;
5117 if (val > mmax)
5118 return const0_rtx;
5119 break;
5121 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5122 case LEU:
5123 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5124 return const_true_rtx;
5125 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5126 return const0_rtx;
5127 break;
5128 case LE:
5129 if (val >= mmax)
5130 return const_true_rtx;
5131 if (val < mmin)
5132 return const0_rtx;
5133 break;
5135 case EQ:
5136 /* x == y is always false for y out of range. */
5137 if (val < mmin || val > mmax)
5138 return const0_rtx;
5139 break;
5141 /* x > y is always false for y >= mmax, always true for y < mmin. */
5142 case GTU:
5143 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5144 return const0_rtx;
5145 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5146 return const_true_rtx;
5147 break;
5148 case GT:
5149 if (val >= mmax)
5150 return const0_rtx;
5151 if (val < mmin)
5152 return const_true_rtx;
5153 break;
5155 /* x < y is always false for y <= mmin, always true for y > mmax. */
5156 case LTU:
5157 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5158 return const0_rtx;
5159 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5160 return const_true_rtx;
5161 break;
5162 case LT:
5163 if (val <= mmin)
5164 return const0_rtx;
5165 if (val > mmax)
5166 return const_true_rtx;
5167 break;
5169 case NE:
5170 /* x != y is always true for y out of range. */
5171 if (val < mmin || val > mmax)
5172 return const_true_rtx;
5173 break;
5175 default:
5176 break;
5180 /* Optimize integer comparisons with zero. */
5181 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5183 /* Some addresses are known to be nonzero. We don't know
5184 their sign, but equality comparisons are known. */
5185 if (nonzero_address_p (trueop0))
5187 if (code == EQ || code == LEU)
5188 return const0_rtx;
5189 if (code == NE || code == GTU)
5190 return const_true_rtx;
5193 /* See if the first operand is an IOR with a constant. If so, we
5194 may be able to determine the result of this comparison. */
5195 if (GET_CODE (op0) == IOR)
5197 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5198 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5200 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5201 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5202 && (UINTVAL (inner_const)
5203 & (HOST_WIDE_INT_1U
5204 << sign_bitnum)));
5206 switch (code)
5208 case EQ:
5209 case LEU:
5210 return const0_rtx;
5211 case NE:
5212 case GTU:
5213 return const_true_rtx;
5214 case LT:
5215 case LE:
5216 if (has_sign)
5217 return const_true_rtx;
5218 break;
5219 case GT:
5220 case GE:
5221 if (has_sign)
5222 return const0_rtx;
5223 break;
5224 default:
5225 break;
5231 /* Optimize comparison of ABS with zero. */
5232 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5233 && (GET_CODE (trueop0) == ABS
5234 || (GET_CODE (trueop0) == FLOAT_EXTEND
5235 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5237 switch (code)
5239 case LT:
5240 /* Optimize abs(x) < 0.0. */
5241 if (!HONOR_SNANS (mode)
5242 && (!INTEGRAL_MODE_P (mode)
5243 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5245 if (INTEGRAL_MODE_P (mode)
5246 && (issue_strict_overflow_warning
5247 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5248 warning (OPT_Wstrict_overflow,
5249 ("assuming signed overflow does not occur when "
5250 "assuming abs (x) < 0 is false"));
5251 return const0_rtx;
5253 break;
5255 case GE:
5256 /* Optimize abs(x) >= 0.0. */
5257 if (!HONOR_NANS (mode)
5258 && (!INTEGRAL_MODE_P (mode)
5259 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5261 if (INTEGRAL_MODE_P (mode)
5262 && (issue_strict_overflow_warning
5263 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5264 warning (OPT_Wstrict_overflow,
5265 ("assuming signed overflow does not occur when "
5266 "assuming abs (x) >= 0 is true"));
5267 return const_true_rtx;
5269 break;
5271 case UNGE:
5272 /* Optimize ! (abs(x) < 0.0). */
5273 return const_true_rtx;
5275 default:
5276 break;
5280 return 0;
5283 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5284 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5285 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5286 can be simplified to that or NULL_RTX if not.
5287 Assume X is compared against zero with CMP_CODE and the true
5288 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5290 static rtx
5291 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5293 if (cmp_code != EQ && cmp_code != NE)
5294 return NULL_RTX;
5296 /* Result on X == 0 and X !=0 respectively. */
5297 rtx on_zero, on_nonzero;
5298 if (cmp_code == EQ)
5300 on_zero = true_val;
5301 on_nonzero = false_val;
5303 else
5305 on_zero = false_val;
5306 on_nonzero = true_val;
5309 rtx_code op_code = GET_CODE (on_nonzero);
5310 if ((op_code != CLZ && op_code != CTZ)
5311 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5312 || !CONST_INT_P (on_zero))
5313 return NULL_RTX;
5315 HOST_WIDE_INT op_val;
5316 if (((op_code == CLZ
5317 && CLZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero), op_val))
5318 || (op_code == CTZ
5319 && CTZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero), op_val)))
5320 && op_val == INTVAL (on_zero))
5321 return on_nonzero;
5323 return NULL_RTX;
5327 /* Simplify CODE, an operation with result mode MODE and three operands,
5328 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5329 a constant. Return 0 if no simplifications is possible. */
5332 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5333 machine_mode op0_mode, rtx op0, rtx op1,
5334 rtx op2)
5336 unsigned int width = GET_MODE_PRECISION (mode);
5337 bool any_change = false;
5338 rtx tem, trueop2;
5340 /* VOIDmode means "infinite" precision. */
5341 if (width == 0)
5342 width = HOST_BITS_PER_WIDE_INT;
5344 switch (code)
5346 case FMA:
5347 /* Simplify negations around the multiplication. */
5348 /* -a * -b + c => a * b + c. */
5349 if (GET_CODE (op0) == NEG)
5351 tem = simplify_unary_operation (NEG, mode, op1, mode);
5352 if (tem)
5353 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5355 else if (GET_CODE (op1) == NEG)
5357 tem = simplify_unary_operation (NEG, mode, op0, mode);
5358 if (tem)
5359 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5362 /* Canonicalize the two multiplication operands. */
5363 /* a * -b + c => -b * a + c. */
5364 if (swap_commutative_operands_p (op0, op1))
5365 std::swap (op0, op1), any_change = true;
5367 if (any_change)
5368 return gen_rtx_FMA (mode, op0, op1, op2);
5369 return NULL_RTX;
5371 case SIGN_EXTRACT:
5372 case ZERO_EXTRACT:
5373 if (CONST_INT_P (op0)
5374 && CONST_INT_P (op1)
5375 && CONST_INT_P (op2)
5376 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5377 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5379 /* Extracting a bit-field from a constant */
5380 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5381 HOST_WIDE_INT op1val = INTVAL (op1);
5382 HOST_WIDE_INT op2val = INTVAL (op2);
5383 if (BITS_BIG_ENDIAN)
5384 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5385 else
5386 val >>= op2val;
5388 if (HOST_BITS_PER_WIDE_INT != op1val)
5390 /* First zero-extend. */
5391 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5392 /* If desired, propagate sign bit. */
5393 if (code == SIGN_EXTRACT
5394 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5395 != 0)
5396 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5399 return gen_int_mode (val, mode);
5401 break;
5403 case IF_THEN_ELSE:
5404 if (CONST_INT_P (op0))
5405 return op0 != const0_rtx ? op1 : op2;
5407 /* Convert c ? a : a into "a". */
5408 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5409 return op1;
5411 /* Convert a != b ? a : b into "a". */
5412 if (GET_CODE (op0) == NE
5413 && ! side_effects_p (op0)
5414 && ! HONOR_NANS (mode)
5415 && ! HONOR_SIGNED_ZEROS (mode)
5416 && ((rtx_equal_p (XEXP (op0, 0), op1)
5417 && rtx_equal_p (XEXP (op0, 1), op2))
5418 || (rtx_equal_p (XEXP (op0, 0), op2)
5419 && rtx_equal_p (XEXP (op0, 1), op1))))
5420 return op1;
5422 /* Convert a == b ? a : b into "b". */
5423 if (GET_CODE (op0) == EQ
5424 && ! side_effects_p (op0)
5425 && ! HONOR_NANS (mode)
5426 && ! HONOR_SIGNED_ZEROS (mode)
5427 && ((rtx_equal_p (XEXP (op0, 0), op1)
5428 && rtx_equal_p (XEXP (op0, 1), op2))
5429 || (rtx_equal_p (XEXP (op0, 0), op2)
5430 && rtx_equal_p (XEXP (op0, 1), op1))))
5431 return op2;
5433 /* Convert (!c) != {0,...,0} ? a : b into
5434 c != {0,...,0} ? b : a for vector modes. */
5435 if (VECTOR_MODE_P (GET_MODE (op1))
5436 && GET_CODE (op0) == NE
5437 && GET_CODE (XEXP (op0, 0)) == NOT
5438 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5440 rtx cv = XEXP (op0, 1);
5441 int nunits = CONST_VECTOR_NUNITS (cv);
5442 bool ok = true;
5443 for (int i = 0; i < nunits; ++i)
5444 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5446 ok = false;
5447 break;
5449 if (ok)
5451 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5452 XEXP (XEXP (op0, 0), 0),
5453 XEXP (op0, 1));
5454 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5455 return retval;
5459 /* Convert x == 0 ? N : clz (x) into clz (x) when
5460 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5461 Similarly for ctz (x). */
5462 if (COMPARISON_P (op0) && !side_effects_p (op0)
5463 && XEXP (op0, 1) == const0_rtx)
5465 rtx simplified
5466 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5467 op1, op2);
5468 if (simplified)
5469 return simplified;
5472 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5474 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5475 ? GET_MODE (XEXP (op0, 1))
5476 : GET_MODE (XEXP (op0, 0)));
5477 rtx temp;
5479 /* Look for happy constants in op1 and op2. */
5480 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5482 HOST_WIDE_INT t = INTVAL (op1);
5483 HOST_WIDE_INT f = INTVAL (op2);
5485 if (t == STORE_FLAG_VALUE && f == 0)
5486 code = GET_CODE (op0);
5487 else if (t == 0 && f == STORE_FLAG_VALUE)
5489 enum rtx_code tmp;
5490 tmp = reversed_comparison_code (op0, NULL);
5491 if (tmp == UNKNOWN)
5492 break;
5493 code = tmp;
5495 else
5496 break;
5498 return simplify_gen_relational (code, mode, cmp_mode,
5499 XEXP (op0, 0), XEXP (op0, 1));
5502 if (cmp_mode == VOIDmode)
5503 cmp_mode = op0_mode;
5504 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5505 cmp_mode, XEXP (op0, 0),
5506 XEXP (op0, 1));
5508 /* See if any simplifications were possible. */
5509 if (temp)
5511 if (CONST_INT_P (temp))
5512 return temp == const0_rtx ? op2 : op1;
5513 else if (temp)
5514 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5517 break;
5519 case VEC_MERGE:
5520 gcc_assert (GET_MODE (op0) == mode);
5521 gcc_assert (GET_MODE (op1) == mode);
5522 gcc_assert (VECTOR_MODE_P (mode));
5523 trueop2 = avoid_constant_pool_reference (op2);
5524 if (CONST_INT_P (trueop2))
5526 int elt_size = GET_MODE_UNIT_SIZE (mode);
5527 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5528 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5529 unsigned HOST_WIDE_INT mask;
5530 if (n_elts == HOST_BITS_PER_WIDE_INT)
5531 mask = -1;
5532 else
5533 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5535 if (!(sel & mask) && !side_effects_p (op0))
5536 return op1;
5537 if ((sel & mask) == mask && !side_effects_p (op1))
5538 return op0;
5540 rtx trueop0 = avoid_constant_pool_reference (op0);
5541 rtx trueop1 = avoid_constant_pool_reference (op1);
5542 if (GET_CODE (trueop0) == CONST_VECTOR
5543 && GET_CODE (trueop1) == CONST_VECTOR)
5545 rtvec v = rtvec_alloc (n_elts);
5546 unsigned int i;
5548 for (i = 0; i < n_elts; i++)
5549 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5550 ? CONST_VECTOR_ELT (trueop0, i)
5551 : CONST_VECTOR_ELT (trueop1, i));
5552 return gen_rtx_CONST_VECTOR (mode, v);
5555 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5556 if no element from a appears in the result. */
5557 if (GET_CODE (op0) == VEC_MERGE)
5559 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5560 if (CONST_INT_P (tem))
5562 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5563 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5564 return simplify_gen_ternary (code, mode, mode,
5565 XEXP (op0, 1), op1, op2);
5566 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5567 return simplify_gen_ternary (code, mode, mode,
5568 XEXP (op0, 0), op1, op2);
5571 if (GET_CODE (op1) == VEC_MERGE)
5573 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5574 if (CONST_INT_P (tem))
5576 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5577 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5578 return simplify_gen_ternary (code, mode, mode,
5579 op0, XEXP (op1, 1), op2);
5580 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5581 return simplify_gen_ternary (code, mode, mode,
5582 op0, XEXP (op1, 0), op2);
5586 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5587 with a. */
5588 if (GET_CODE (op0) == VEC_DUPLICATE
5589 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5590 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5591 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5593 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5594 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5596 if (XEXP (XEXP (op0, 0), 0) == op1
5597 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5598 return op1;
5603 if (rtx_equal_p (op0, op1)
5604 && !side_effects_p (op2) && !side_effects_p (op1))
5605 return op0;
5607 break;
5609 default:
5610 gcc_unreachable ();
5613 return 0;
5616 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5617 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5618 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5620 Works by unpacking OP into a collection of 8-bit values
5621 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5622 and then repacking them again for OUTERMODE. */
5624 static rtx
5625 simplify_immed_subreg (machine_mode outermode, rtx op,
5626 machine_mode innermode, unsigned int byte)
5628 enum {
5629 value_bit = 8,
5630 value_mask = (1 << value_bit) - 1
5632 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5633 int value_start;
5634 int i;
5635 int elem;
5637 int num_elem;
5638 rtx * elems;
5639 int elem_bitsize;
5640 rtx result_s;
5641 rtvec result_v = NULL;
5642 enum mode_class outer_class;
5643 machine_mode outer_submode;
5644 int max_bitsize;
5646 /* Some ports misuse CCmode. */
5647 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5648 return op;
5650 /* We have no way to represent a complex constant at the rtl level. */
5651 if (COMPLEX_MODE_P (outermode))
5652 return NULL_RTX;
5654 /* We support any size mode. */
5655 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5656 GET_MODE_BITSIZE (innermode));
5658 /* Unpack the value. */
5660 if (GET_CODE (op) == CONST_VECTOR)
5662 num_elem = CONST_VECTOR_NUNITS (op);
5663 elems = &CONST_VECTOR_ELT (op, 0);
5664 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5666 else
5668 num_elem = 1;
5669 elems = &op;
5670 elem_bitsize = max_bitsize;
5672 /* If this asserts, it is too complicated; reducing value_bit may help. */
5673 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5674 /* I don't know how to handle endianness of sub-units. */
5675 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5677 for (elem = 0; elem < num_elem; elem++)
5679 unsigned char * vp;
5680 rtx el = elems[elem];
5682 /* Vectors are kept in target memory order. (This is probably
5683 a mistake.) */
5685 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5686 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5687 / BITS_PER_UNIT);
5688 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5689 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5690 unsigned bytele = (subword_byte % UNITS_PER_WORD
5691 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5692 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5695 switch (GET_CODE (el))
5697 case CONST_INT:
5698 for (i = 0;
5699 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5700 i += value_bit)
5701 *vp++ = INTVAL (el) >> i;
5702 /* CONST_INTs are always logically sign-extended. */
5703 for (; i < elem_bitsize; i += value_bit)
5704 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5705 break;
5707 case CONST_WIDE_INT:
5709 rtx_mode_t val = std::make_pair (el, innermode);
5710 unsigned char extend = wi::sign_mask (val);
5712 for (i = 0; i < elem_bitsize; i += value_bit)
5713 *vp++ = wi::extract_uhwi (val, i, value_bit);
5714 for (; i < elem_bitsize; i += value_bit)
5715 *vp++ = extend;
5717 break;
5719 case CONST_DOUBLE:
5720 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5722 unsigned char extend = 0;
5723 /* If this triggers, someone should have generated a
5724 CONST_INT instead. */
5725 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5727 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5728 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5729 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5731 *vp++
5732 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5733 i += value_bit;
5736 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5737 extend = -1;
5738 for (; i < elem_bitsize; i += value_bit)
5739 *vp++ = extend;
5741 else
5743 /* This is big enough for anything on the platform. */
5744 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5745 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5747 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5748 gcc_assert (bitsize <= elem_bitsize);
5749 gcc_assert (bitsize % value_bit == 0);
5751 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5752 GET_MODE (el));
5754 /* real_to_target produces its result in words affected by
5755 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5756 and use WORDS_BIG_ENDIAN instead; see the documentation
5757 of SUBREG in rtl.texi. */
5758 for (i = 0; i < bitsize; i += value_bit)
5760 int ibase;
5761 if (WORDS_BIG_ENDIAN)
5762 ibase = bitsize - 1 - i;
5763 else
5764 ibase = i;
5765 *vp++ = tmp[ibase / 32] >> i % 32;
5768 /* It shouldn't matter what's done here, so fill it with
5769 zero. */
5770 for (; i < elem_bitsize; i += value_bit)
5771 *vp++ = 0;
5773 break;
5775 case CONST_FIXED:
5776 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5778 for (i = 0; i < elem_bitsize; i += value_bit)
5779 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5781 else
5783 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5784 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5785 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5786 i += value_bit)
5787 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5788 >> (i - HOST_BITS_PER_WIDE_INT);
5789 for (; i < elem_bitsize; i += value_bit)
5790 *vp++ = 0;
5792 break;
5794 default:
5795 gcc_unreachable ();
5799 /* Now, pick the right byte to start with. */
5800 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5801 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5802 will already have offset 0. */
5803 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5805 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5806 - byte);
5807 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5808 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5809 byte = (subword_byte % UNITS_PER_WORD
5810 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5813 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5814 so if it's become negative it will instead be very large.) */
5815 gcc_assert (byte < GET_MODE_SIZE (innermode));
5817 /* Convert from bytes to chunks of size value_bit. */
5818 value_start = byte * (BITS_PER_UNIT / value_bit);
5820 /* Re-pack the value. */
5821 num_elem = GET_MODE_NUNITS (outermode);
5823 if (VECTOR_MODE_P (outermode))
5825 result_v = rtvec_alloc (num_elem);
5826 elems = &RTVEC_ELT (result_v, 0);
5828 else
5829 elems = &result_s;
5831 outer_submode = GET_MODE_INNER (outermode);
5832 outer_class = GET_MODE_CLASS (outer_submode);
5833 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5835 gcc_assert (elem_bitsize % value_bit == 0);
5836 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5838 for (elem = 0; elem < num_elem; elem++)
5840 unsigned char *vp;
5842 /* Vectors are stored in target memory order. (This is probably
5843 a mistake.) */
5845 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5846 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5847 / BITS_PER_UNIT);
5848 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5849 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5850 unsigned bytele = (subword_byte % UNITS_PER_WORD
5851 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5852 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5855 switch (outer_class)
5857 case MODE_INT:
5858 case MODE_PARTIAL_INT:
5860 int u;
5861 int base = 0;
5862 int units
5863 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5864 / HOST_BITS_PER_WIDE_INT;
5865 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5866 wide_int r;
5868 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5869 return NULL_RTX;
5870 for (u = 0; u < units; u++)
5872 unsigned HOST_WIDE_INT buf = 0;
5873 for (i = 0;
5874 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5875 i += value_bit)
5876 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5878 tmp[u] = buf;
5879 base += HOST_BITS_PER_WIDE_INT;
5881 r = wide_int::from_array (tmp, units,
5882 GET_MODE_PRECISION (outer_submode));
5883 #if TARGET_SUPPORTS_WIDE_INT == 0
5884 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5885 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5886 return NULL_RTX;
5887 #endif
5888 elems[elem] = immed_wide_int_const (r, outer_submode);
5890 break;
5892 case MODE_FLOAT:
5893 case MODE_DECIMAL_FLOAT:
5895 REAL_VALUE_TYPE r;
5896 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5898 /* real_from_target wants its input in words affected by
5899 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5900 and use WORDS_BIG_ENDIAN instead; see the documentation
5901 of SUBREG in rtl.texi. */
5902 for (i = 0; i < max_bitsize / 32; i++)
5903 tmp[i] = 0;
5904 for (i = 0; i < elem_bitsize; i += value_bit)
5906 int ibase;
5907 if (WORDS_BIG_ENDIAN)
5908 ibase = elem_bitsize - 1 - i;
5909 else
5910 ibase = i;
5911 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5914 real_from_target (&r, tmp, outer_submode);
5915 elems[elem] = const_double_from_real_value (r, outer_submode);
5917 break;
5919 case MODE_FRACT:
5920 case MODE_UFRACT:
5921 case MODE_ACCUM:
5922 case MODE_UACCUM:
5924 FIXED_VALUE_TYPE f;
5925 f.data.low = 0;
5926 f.data.high = 0;
5927 f.mode = outer_submode;
5929 for (i = 0;
5930 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5931 i += value_bit)
5932 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5933 for (; i < elem_bitsize; i += value_bit)
5934 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5935 << (i - HOST_BITS_PER_WIDE_INT));
5937 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5939 break;
5941 default:
5942 gcc_unreachable ();
5945 if (VECTOR_MODE_P (outermode))
5946 return gen_rtx_CONST_VECTOR (outermode, result_v);
5947 else
5948 return result_s;
5951 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5952 Return 0 if no simplifications are possible. */
5954 simplify_subreg (machine_mode outermode, rtx op,
5955 machine_mode innermode, unsigned int byte)
5957 /* Little bit of sanity checking. */
5958 gcc_assert (innermode != VOIDmode);
5959 gcc_assert (outermode != VOIDmode);
5960 gcc_assert (innermode != BLKmode);
5961 gcc_assert (outermode != BLKmode);
5963 gcc_assert (GET_MODE (op) == innermode
5964 || GET_MODE (op) == VOIDmode);
5966 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5967 return NULL_RTX;
5969 if (byte >= GET_MODE_SIZE (innermode))
5970 return NULL_RTX;
5972 if (outermode == innermode && !byte)
5973 return op;
5975 if (CONST_SCALAR_INT_P (op)
5976 || CONST_DOUBLE_AS_FLOAT_P (op)
5977 || GET_CODE (op) == CONST_FIXED
5978 || GET_CODE (op) == CONST_VECTOR)
5979 return simplify_immed_subreg (outermode, op, innermode, byte);
5981 /* Changing mode twice with SUBREG => just change it once,
5982 or not at all if changing back op starting mode. */
5983 if (GET_CODE (op) == SUBREG)
5985 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5986 int final_offset = byte + SUBREG_BYTE (op);
5987 rtx newx;
5989 if (outermode == innermostmode
5990 && byte == 0 && SUBREG_BYTE (op) == 0)
5991 return SUBREG_REG (op);
5993 /* The SUBREG_BYTE represents offset, as if the value were stored
5994 in memory. Irritating exception is paradoxical subreg, where
5995 we define SUBREG_BYTE to be 0. On big endian machines, this
5996 value should be negative. For a moment, undo this exception. */
5997 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5999 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
6000 if (WORDS_BIG_ENDIAN)
6001 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6002 if (BYTES_BIG_ENDIAN)
6003 final_offset += difference % UNITS_PER_WORD;
6005 if (SUBREG_BYTE (op) == 0
6006 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
6008 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
6009 if (WORDS_BIG_ENDIAN)
6010 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6011 if (BYTES_BIG_ENDIAN)
6012 final_offset += difference % UNITS_PER_WORD;
6015 /* See whether resulting subreg will be paradoxical. */
6016 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
6018 /* In nonparadoxical subregs we can't handle negative offsets. */
6019 if (final_offset < 0)
6020 return NULL_RTX;
6021 /* Bail out in case resulting subreg would be incorrect. */
6022 if (final_offset % GET_MODE_SIZE (outermode)
6023 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
6024 return NULL_RTX;
6026 else
6028 int offset = 0;
6029 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
6031 /* In paradoxical subreg, see if we are still looking on lower part.
6032 If so, our SUBREG_BYTE will be 0. */
6033 if (WORDS_BIG_ENDIAN)
6034 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6035 if (BYTES_BIG_ENDIAN)
6036 offset += difference % UNITS_PER_WORD;
6037 if (offset == final_offset)
6038 final_offset = 0;
6039 else
6040 return NULL_RTX;
6043 /* Recurse for further possible simplifications. */
6044 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6045 final_offset);
6046 if (newx)
6047 return newx;
6048 if (validate_subreg (outermode, innermostmode,
6049 SUBREG_REG (op), final_offset))
6051 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6052 if (SUBREG_PROMOTED_VAR_P (op)
6053 && SUBREG_PROMOTED_SIGN (op) >= 0
6054 && GET_MODE_CLASS (outermode) == MODE_INT
6055 && IN_RANGE (GET_MODE_SIZE (outermode),
6056 GET_MODE_SIZE (innermode),
6057 GET_MODE_SIZE (innermostmode))
6058 && subreg_lowpart_p (newx))
6060 SUBREG_PROMOTED_VAR_P (newx) = 1;
6061 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6063 return newx;
6065 return NULL_RTX;
6068 /* SUBREG of a hard register => just change the register number
6069 and/or mode. If the hard register is not valid in that mode,
6070 suppress this simplification. If the hard register is the stack,
6071 frame, or argument pointer, leave this as a SUBREG. */
6073 if (REG_P (op) && HARD_REGISTER_P (op))
6075 unsigned int regno, final_regno;
6077 regno = REGNO (op);
6078 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6079 if (HARD_REGISTER_NUM_P (final_regno))
6081 rtx x;
6082 int final_offset = byte;
6084 /* Adjust offset for paradoxical subregs. */
6085 if (byte == 0
6086 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6088 int difference = (GET_MODE_SIZE (innermode)
6089 - GET_MODE_SIZE (outermode));
6090 if (WORDS_BIG_ENDIAN)
6091 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6092 if (BYTES_BIG_ENDIAN)
6093 final_offset += difference % UNITS_PER_WORD;
6096 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
6098 /* Propagate original regno. We don't have any way to specify
6099 the offset inside original regno, so do so only for lowpart.
6100 The information is used only by alias analysis that can not
6101 grog partial register anyway. */
6103 if (subreg_lowpart_offset (outermode, innermode) == byte)
6104 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6105 return x;
6109 /* If we have a SUBREG of a register that we are replacing and we are
6110 replacing it with a MEM, make a new MEM and try replacing the
6111 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6112 or if we would be widening it. */
6114 if (MEM_P (op)
6115 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6116 /* Allow splitting of volatile memory references in case we don't
6117 have instruction to move the whole thing. */
6118 && (! MEM_VOLATILE_P (op)
6119 || ! have_insn_for (SET, innermode))
6120 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6121 return adjust_address_nv (op, outermode, byte);
6123 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6124 of two parts. */
6125 if (GET_CODE (op) == CONCAT
6126 || GET_CODE (op) == VEC_CONCAT)
6128 unsigned int part_size, final_offset;
6129 rtx part, res;
6131 enum machine_mode part_mode = GET_MODE (XEXP (op, 0));
6132 if (part_mode == VOIDmode)
6133 part_mode = GET_MODE_INNER (GET_MODE (op));
6134 part_size = GET_MODE_SIZE (part_mode);
6135 if (byte < part_size)
6137 part = XEXP (op, 0);
6138 final_offset = byte;
6140 else
6142 part = XEXP (op, 1);
6143 final_offset = byte - part_size;
6146 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6147 return NULL_RTX;
6149 part_mode = GET_MODE (part);
6150 if (part_mode == VOIDmode)
6151 part_mode = GET_MODE_INNER (GET_MODE (op));
6152 res = simplify_subreg (outermode, part, part_mode, final_offset);
6153 if (res)
6154 return res;
6155 if (validate_subreg (outermode, part_mode, part, final_offset))
6156 return gen_rtx_SUBREG (outermode, part, final_offset);
6157 return NULL_RTX;
6160 /* A SUBREG resulting from a zero extension may fold to zero if
6161 it extracts higher bits that the ZERO_EXTEND's source bits. */
6162 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6164 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6165 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6166 return CONST0_RTX (outermode);
6169 if (SCALAR_INT_MODE_P (outermode)
6170 && SCALAR_INT_MODE_P (innermode)
6171 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6172 && byte == subreg_lowpart_offset (outermode, innermode))
6174 rtx tem = simplify_truncation (outermode, op, innermode);
6175 if (tem)
6176 return tem;
6179 return NULL_RTX;
6182 /* Make a SUBREG operation or equivalent if it folds. */
6185 simplify_gen_subreg (machine_mode outermode, rtx op,
6186 machine_mode innermode, unsigned int byte)
6188 rtx newx;
6190 newx = simplify_subreg (outermode, op, innermode, byte);
6191 if (newx)
6192 return newx;
6194 if (GET_CODE (op) == SUBREG
6195 || GET_CODE (op) == CONCAT
6196 || GET_MODE (op) == VOIDmode)
6197 return NULL_RTX;
6199 if (validate_subreg (outermode, innermode, op, byte))
6200 return gen_rtx_SUBREG (outermode, op, byte);
6202 return NULL_RTX;
6205 /* Generates a subreg to get the least significant part of EXPR (in mode
6206 INNER_MODE) to OUTER_MODE. */
6209 lowpart_subreg (machine_mode outer_mode, rtx expr,
6210 machine_mode inner_mode)
6212 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6213 subreg_lowpart_offset (outer_mode, inner_mode));
6216 /* Simplify X, an rtx expression.
6218 Return the simplified expression or NULL if no simplifications
6219 were possible.
6221 This is the preferred entry point into the simplification routines;
6222 however, we still allow passes to call the more specific routines.
6224 Right now GCC has three (yes, three) major bodies of RTL simplification
6225 code that need to be unified.
6227 1. fold_rtx in cse.c. This code uses various CSE specific
6228 information to aid in RTL simplification.
6230 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6231 it uses combine specific information to aid in RTL
6232 simplification.
6234 3. The routines in this file.
6237 Long term we want to only have one body of simplification code; to
6238 get to that state I recommend the following steps:
6240 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6241 which are not pass dependent state into these routines.
6243 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6244 use this routine whenever possible.
6246 3. Allow for pass dependent state to be provided to these
6247 routines and add simplifications based on the pass dependent
6248 state. Remove code from cse.c & combine.c that becomes
6249 redundant/dead.
6251 It will take time, but ultimately the compiler will be easier to
6252 maintain and improve. It's totally silly that when we add a
6253 simplification that it needs to be added to 4 places (3 for RTL
6254 simplification and 1 for tree simplification. */
6257 simplify_rtx (const_rtx x)
6259 const enum rtx_code code = GET_CODE (x);
6260 const machine_mode mode = GET_MODE (x);
6262 switch (GET_RTX_CLASS (code))
6264 case RTX_UNARY:
6265 return simplify_unary_operation (code, mode,
6266 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6267 case RTX_COMM_ARITH:
6268 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6269 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6271 /* Fall through. */
6273 case RTX_BIN_ARITH:
6274 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6276 case RTX_TERNARY:
6277 case RTX_BITFIELD_OPS:
6278 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6279 XEXP (x, 0), XEXP (x, 1),
6280 XEXP (x, 2));
6282 case RTX_COMPARE:
6283 case RTX_COMM_COMPARE:
6284 return simplify_relational_operation (code, mode,
6285 ((GET_MODE (XEXP (x, 0))
6286 != VOIDmode)
6287 ? GET_MODE (XEXP (x, 0))
6288 : GET_MODE (XEXP (x, 1))),
6289 XEXP (x, 0),
6290 XEXP (x, 1));
6292 case RTX_EXTRA:
6293 if (code == SUBREG)
6294 return simplify_subreg (mode, SUBREG_REG (x),
6295 GET_MODE (SUBREG_REG (x)),
6296 SUBREG_BYTE (x));
6297 break;
6299 case RTX_OBJ:
6300 if (code == LO_SUM)
6302 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6303 if (GET_CODE (XEXP (x, 0)) == HIGH
6304 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6305 return XEXP (x, 1);
6307 break;
6309 default:
6310 break;
6312 return NULL;