[31/77] Use scalar_int_mode for move2add
[official-gcc.git] / gcc / simplify-rtx.c
blob2255ccf1408adf85775e73111a5426dad6d32af0
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
37 /* Simplification and canonicalization of RTL. */
39 /* Much code operates on (low, high) pairs; the low value is an
40 unsigned wide int, the high value a signed wide int. We
41 occasionally need to sign extend from low to high as if low were a
42 signed wide int. */
43 #define HWI_SIGN_EXTEND(low) \
44 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
46 static rtx neg_const_int (machine_mode, const_rtx);
47 static bool plus_minus_operand_p (const_rtx);
48 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
49 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
50 unsigned int);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
59 /* Negate a CONST_INT rtx. */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
63 unsigned HOST_WIDE_INT val = -UINTVAL (i);
65 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
66 && val == UINTVAL (i))
67 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
68 mode);
69 return gen_int_mode (val, mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 bool
76 mode_signbit_p (machine_mode mode, const_rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80 scalar_int_mode int_mode;
82 if (!is_int_mode (mode, &int_mode))
83 return false;
85 width = GET_MODE_PRECISION (int_mode);
86 if (width == 0)
87 return false;
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92 #if TARGET_SUPPORTS_WIDE_INT
93 else if (CONST_WIDE_INT_P (x))
95 unsigned int i;
96 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
97 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
98 return false;
99 for (i = 0; i < elts - 1; i++)
100 if (CONST_WIDE_INT_ELT (x, i) != 0)
101 return false;
102 val = CONST_WIDE_INT_ELT (x, elts - 1);
103 width %= HOST_BITS_PER_WIDE_INT;
104 if (width == 0)
105 width = HOST_BITS_PER_WIDE_INT;
107 #else
108 else if (width <= HOST_BITS_PER_DOUBLE_INT
109 && CONST_DOUBLE_AS_INT_P (x)
110 && CONST_DOUBLE_LOW (x) == 0)
112 val = CONST_DOUBLE_HIGH (x);
113 width -= HOST_BITS_PER_WIDE_INT;
115 #endif
116 else
117 /* X is not an integer constant. */
118 return false;
120 if (width < HOST_BITS_PER_WIDE_INT)
121 val &= (HOST_WIDE_INT_1U << width) - 1;
122 return val == (HOST_WIDE_INT_1U << (width - 1));
125 /* Test whether VAL is equal to the most significant bit of mode MODE
126 (after masking with the mode mask of MODE). Returns false if the
127 precision of MODE is too large to handle. */
129 bool
130 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
132 unsigned int width;
133 scalar_int_mode int_mode;
135 if (!is_int_mode (mode, &int_mode))
136 return false;
138 width = GET_MODE_PRECISION (int_mode);
139 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
140 return false;
142 val &= GET_MODE_MASK (int_mode);
143 return val == (HOST_WIDE_INT_1U << (width - 1));
146 /* Test whether the most significant bit of mode MODE is set in VAL.
147 Returns false if the precision of MODE is too large to handle. */
148 bool
149 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
151 unsigned int width;
153 scalar_int_mode int_mode;
154 if (!is_int_mode (mode, &int_mode))
155 return false;
157 width = GET_MODE_PRECISION (int_mode);
158 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
159 return false;
161 val &= HOST_WIDE_INT_1U << (width - 1);
162 return val != 0;
165 /* Test whether the most significant bit of mode MODE is clear in VAL.
166 Returns false if the precision of MODE is too large to handle. */
167 bool
168 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
170 unsigned int width;
172 scalar_int_mode int_mode;
173 if (!is_int_mode (mode, &int_mode))
174 return false;
176 width = GET_MODE_PRECISION (int_mode);
177 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
178 return false;
180 val &= HOST_WIDE_INT_1U << (width - 1);
181 return val == 0;
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
188 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
189 rtx op1)
191 rtx tem;
193 /* If this simplifies, do it. */
194 tem = simplify_binary_operation (code, mode, op0, op1);
195 if (tem)
196 return tem;
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0, op1))
201 std::swap (op0, op1);
203 return gen_rtx_fmt_ee (code, mode, op0, op1);
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
209 avoid_constant_pool_reference (rtx x)
211 rtx c, tmp, addr;
212 machine_mode cmode;
213 HOST_WIDE_INT offset = 0;
215 switch (GET_CODE (x))
217 case MEM:
218 break;
220 case FLOAT_EXTEND:
221 /* Handle float extensions of constant pool references. */
222 tmp = XEXP (x, 0);
223 c = avoid_constant_pool_reference (tmp);
224 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
226 GET_MODE (x));
227 return x;
229 default:
230 return x;
233 if (GET_MODE (x) == BLKmode)
234 return x;
236 addr = XEXP (x, 0);
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr = targetm.delegitimize_address (addr);
241 /* Split the address into a base and integer offset. */
242 if (GET_CODE (addr) == CONST
243 && GET_CODE (XEXP (addr, 0)) == PLUS
244 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
246 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
247 addr = XEXP (XEXP (addr, 0), 0);
250 if (GET_CODE (addr) == LO_SUM)
251 addr = XEXP (addr, 1);
253 /* If this is a constant pool reference, we can turn it into its
254 constant and hope that simplifications happen. */
255 if (GET_CODE (addr) == SYMBOL_REF
256 && CONSTANT_POOL_ADDRESS_P (addr))
258 c = get_pool_constant (addr);
259 cmode = get_pool_mode (addr);
261 /* If we're accessing the constant in a different mode than it was
262 originally stored, attempt to fix that up via subreg simplifications.
263 If that fails we have no choice but to return the original memory. */
264 if (offset == 0 && cmode == GET_MODE (x))
265 return c;
266 else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
268 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
269 if (tem && CONSTANT_P (tem))
270 return tem;
274 return x;
277 /* Simplify a MEM based on its attributes. This is the default
278 delegitimize_address target hook, and it's recommended that every
279 overrider call it. */
282 delegitimize_mem_from_attrs (rtx x)
284 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285 use their base addresses as equivalent. */
286 if (MEM_P (x)
287 && MEM_EXPR (x)
288 && MEM_OFFSET_KNOWN_P (x))
290 tree decl = MEM_EXPR (x);
291 machine_mode mode = GET_MODE (x);
292 HOST_WIDE_INT offset = 0;
294 switch (TREE_CODE (decl))
296 default:
297 decl = NULL;
298 break;
300 case VAR_DECL:
301 break;
303 case ARRAY_REF:
304 case ARRAY_RANGE_REF:
305 case COMPONENT_REF:
306 case BIT_FIELD_REF:
307 case REALPART_EXPR:
308 case IMAGPART_EXPR:
309 case VIEW_CONVERT_EXPR:
311 HOST_WIDE_INT bitsize, bitpos;
312 tree toffset;
313 int unsignedp, reversep, volatilep = 0;
315 decl
316 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
317 &unsignedp, &reversep, &volatilep);
318 if (bitsize != GET_MODE_BITSIZE (mode)
319 || (bitpos % BITS_PER_UNIT)
320 || (toffset && !tree_fits_shwi_p (toffset)))
321 decl = NULL;
322 else
324 offset += bitpos / BITS_PER_UNIT;
325 if (toffset)
326 offset += tree_to_shwi (toffset);
328 break;
332 if (decl
333 && mode == GET_MODE (x)
334 && VAR_P (decl)
335 && (TREE_STATIC (decl)
336 || DECL_THREAD_LOCAL_P (decl))
337 && DECL_RTL_SET_P (decl)
338 && MEM_P (DECL_RTL (decl)))
340 rtx newx;
342 offset += MEM_OFFSET (x);
344 newx = DECL_RTL (decl);
346 if (MEM_P (newx))
348 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
350 /* Avoid creating a new MEM needlessly if we already had
351 the same address. We do if there's no OFFSET and the
352 old address X is identical to NEWX, or if X is of the
353 form (plus NEWX OFFSET), or the NEWX is of the form
354 (plus Y (const_int Z)) and X is that with the offset
355 added: (plus Y (const_int Z+OFFSET)). */
356 if (!((offset == 0
357 || (GET_CODE (o) == PLUS
358 && GET_CODE (XEXP (o, 1)) == CONST_INT
359 && (offset == INTVAL (XEXP (o, 1))
360 || (GET_CODE (n) == PLUS
361 && GET_CODE (XEXP (n, 1)) == CONST_INT
362 && (INTVAL (XEXP (n, 1)) + offset
363 == INTVAL (XEXP (o, 1)))
364 && (n = XEXP (n, 0))))
365 && (o = XEXP (o, 0))))
366 && rtx_equal_p (o, n)))
367 x = adjust_address_nv (newx, mode, offset);
369 else if (GET_MODE (x) == GET_MODE (newx)
370 && offset == 0)
371 x = newx;
375 return x;
378 /* Make a unary operation by first seeing if it folds and otherwise making
379 the specified operation. */
382 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
383 machine_mode op_mode)
385 rtx tem;
387 /* If this simplifies, use it. */
388 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
389 return tem;
391 return gen_rtx_fmt_e (code, mode, op);
394 /* Likewise for ternary operations. */
397 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
398 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
400 rtx tem;
402 /* If this simplifies, use it. */
403 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
404 op0, op1, op2)))
405 return tem;
407 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
410 /* Likewise, for relational operations.
411 CMP_MODE specifies mode comparison is done in. */
414 simplify_gen_relational (enum rtx_code code, machine_mode mode,
415 machine_mode cmp_mode, rtx op0, rtx op1)
417 rtx tem;
419 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
420 op0, op1)))
421 return tem;
423 return gen_rtx_fmt_ee (code, mode, op0, op1);
426 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
427 and simplify the result. If FN is non-NULL, call this callback on each
428 X, if it returns non-NULL, replace X with its return value and simplify the
429 result. */
432 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
433 rtx (*fn) (rtx, const_rtx, void *), void *data)
435 enum rtx_code code = GET_CODE (x);
436 machine_mode mode = GET_MODE (x);
437 machine_mode op_mode;
438 const char *fmt;
439 rtx op0, op1, op2, newx, op;
440 rtvec vec, newvec;
441 int i, j;
443 if (__builtin_expect (fn != NULL, 0))
445 newx = fn (x, old_rtx, data);
446 if (newx)
447 return newx;
449 else if (rtx_equal_p (x, old_rtx))
450 return copy_rtx ((rtx) data);
452 switch (GET_RTX_CLASS (code))
454 case RTX_UNARY:
455 op0 = XEXP (x, 0);
456 op_mode = GET_MODE (op0);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 if (op0 == XEXP (x, 0))
459 return x;
460 return simplify_gen_unary (code, mode, op0, op_mode);
462 case RTX_BIN_ARITH:
463 case RTX_COMM_ARITH:
464 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
465 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
466 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
467 return x;
468 return simplify_gen_binary (code, mode, op0, op1);
470 case RTX_COMPARE:
471 case RTX_COMM_COMPARE:
472 op0 = XEXP (x, 0);
473 op1 = XEXP (x, 1);
474 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
475 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
476 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
477 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
478 return x;
479 return simplify_gen_relational (code, mode, op_mode, op0, op1);
481 case RTX_TERNARY:
482 case RTX_BITFIELD_OPS:
483 op0 = XEXP (x, 0);
484 op_mode = GET_MODE (op0);
485 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
486 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
487 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
488 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
489 return x;
490 if (op_mode == VOIDmode)
491 op_mode = GET_MODE (op0);
492 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
494 case RTX_EXTRA:
495 if (code == SUBREG)
497 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
498 if (op0 == SUBREG_REG (x))
499 return x;
500 op0 = simplify_gen_subreg (GET_MODE (x), op0,
501 GET_MODE (SUBREG_REG (x)),
502 SUBREG_BYTE (x));
503 return op0 ? op0 : x;
505 break;
507 case RTX_OBJ:
508 if (code == MEM)
510 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
511 if (op0 == XEXP (x, 0))
512 return x;
513 return replace_equiv_address_nv (x, op0);
515 else if (code == LO_SUM)
517 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
518 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
520 /* (lo_sum (high x) y) -> y where x and y have the same base. */
521 if (GET_CODE (op0) == HIGH)
523 rtx base0, base1, offset0, offset1;
524 split_const (XEXP (op0, 0), &base0, &offset0);
525 split_const (op1, &base1, &offset1);
526 if (rtx_equal_p (base0, base1))
527 return op1;
530 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
531 return x;
532 return gen_rtx_LO_SUM (mode, op0, op1);
534 break;
536 default:
537 break;
540 newx = x;
541 fmt = GET_RTX_FORMAT (code);
542 for (i = 0; fmt[i]; i++)
543 switch (fmt[i])
545 case 'E':
546 vec = XVEC (x, i);
547 newvec = XVEC (newx, i);
548 for (j = 0; j < GET_NUM_ELEM (vec); j++)
550 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
551 old_rtx, fn, data);
552 if (op != RTVEC_ELT (vec, j))
554 if (newvec == vec)
556 newvec = shallow_copy_rtvec (vec);
557 if (x == newx)
558 newx = shallow_copy_rtx (x);
559 XVEC (newx, i) = newvec;
561 RTVEC_ELT (newvec, j) = op;
564 break;
566 case 'e':
567 if (XEXP (x, i))
569 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
570 if (op != XEXP (x, i))
572 if (x == newx)
573 newx = shallow_copy_rtx (x);
574 XEXP (newx, i) = op;
577 break;
579 return newx;
582 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
583 resulting RTX. Return a new RTX which is as simplified as possible. */
586 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
588 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
591 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
592 Only handle cases where the truncated value is inherently an rvalue.
594 RTL provides two ways of truncating a value:
596 1. a lowpart subreg. This form is only a truncation when both
597 the outer and inner modes (here MODE and OP_MODE respectively)
598 are scalar integers, and only then when the subreg is used as
599 an rvalue.
601 It is only valid to form such truncating subregs if the
602 truncation requires no action by the target. The onus for
603 proving this is on the creator of the subreg -- e.g. the
604 caller to simplify_subreg or simplify_gen_subreg -- and typically
605 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
607 2. a TRUNCATE. This form handles both scalar and compound integers.
609 The first form is preferred where valid. However, the TRUNCATE
610 handling in simplify_unary_operation turns the second form into the
611 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
612 so it is generally safe to form rvalue truncations using:
614 simplify_gen_unary (TRUNCATE, ...)
616 and leave simplify_unary_operation to work out which representation
617 should be used.
619 Because of the proof requirements on (1), simplify_truncation must
620 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
621 regardless of whether the outer truncation came from a SUBREG or a
622 TRUNCATE. For example, if the caller has proven that an SImode
623 truncation of:
625 (and:DI X Y)
627 is a no-op and can be represented as a subreg, it does not follow
628 that SImode truncations of X and Y are also no-ops. On a target
629 like 64-bit MIPS that requires SImode values to be stored in
630 sign-extended form, an SImode truncation of:
632 (and:DI (reg:DI X) (const_int 63))
634 is trivially a no-op because only the lower 6 bits can be set.
635 However, X is still an arbitrary 64-bit number and so we cannot
636 assume that truncating it too is a no-op. */
638 static rtx
639 simplify_truncation (machine_mode mode, rtx op,
640 machine_mode op_mode)
642 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
643 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
644 scalar_int_mode int_mode, int_op_mode, subreg_mode;
646 gcc_assert (precision <= op_precision);
648 /* Optimize truncations of zero and sign extended values. */
649 if (GET_CODE (op) == ZERO_EXTEND
650 || GET_CODE (op) == SIGN_EXTEND)
652 /* There are three possibilities. If MODE is the same as the
653 origmode, we can omit both the extension and the subreg.
654 If MODE is not larger than the origmode, we can apply the
655 truncation without the extension. Finally, if the outermode
656 is larger than the origmode, we can just extend to the appropriate
657 mode. */
658 machine_mode origmode = GET_MODE (XEXP (op, 0));
659 if (mode == origmode)
660 return XEXP (op, 0);
661 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
662 return simplify_gen_unary (TRUNCATE, mode,
663 XEXP (op, 0), origmode);
664 else
665 return simplify_gen_unary (GET_CODE (op), mode,
666 XEXP (op, 0), origmode);
669 /* If the machine can perform operations in the truncated mode, distribute
670 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
671 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
672 if (1
673 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
674 && (GET_CODE (op) == PLUS
675 || GET_CODE (op) == MINUS
676 || GET_CODE (op) == MULT))
678 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
679 if (op0)
681 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
682 if (op1)
683 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
687 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
688 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
689 the outer subreg is effectively a truncation to the original mode. */
690 if ((GET_CODE (op) == LSHIFTRT
691 || GET_CODE (op) == ASHIFTRT)
692 /* Ensure that OP_MODE is at least twice as wide as MODE
693 to avoid the possibility that an outer LSHIFTRT shifts by more
694 than the sign extension's sign_bit_copies and introduces zeros
695 into the high bits of the result. */
696 && 2 * precision <= op_precision
697 && CONST_INT_P (XEXP (op, 1))
698 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
699 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
700 && UINTVAL (XEXP (op, 1)) < precision)
701 return simplify_gen_binary (ASHIFTRT, mode,
702 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
704 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
705 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
706 the outer subreg is effectively a truncation to the original mode. */
707 if ((GET_CODE (op) == LSHIFTRT
708 || GET_CODE (op) == ASHIFTRT)
709 && CONST_INT_P (XEXP (op, 1))
710 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
711 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
712 && UINTVAL (XEXP (op, 1)) < precision)
713 return simplify_gen_binary (LSHIFTRT, mode,
714 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
716 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
717 to (ashift:QI (x:QI) C), where C is a suitable small constant and
718 the outer subreg is effectively a truncation to the original mode. */
719 if (GET_CODE (op) == ASHIFT
720 && CONST_INT_P (XEXP (op, 1))
721 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
722 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
723 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
724 && UINTVAL (XEXP (op, 1)) < precision)
725 return simplify_gen_binary (ASHIFT, mode,
726 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
728 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
729 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
730 and C2. */
731 if (GET_CODE (op) == AND
732 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
733 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
734 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
735 && CONST_INT_P (XEXP (op, 1)))
737 rtx op0 = (XEXP (XEXP (op, 0), 0));
738 rtx shift_op = XEXP (XEXP (op, 0), 1);
739 rtx mask_op = XEXP (op, 1);
740 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
741 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
743 if (shift < precision
744 /* If doing this transform works for an X with all bits set,
745 it works for any X. */
746 && ((GET_MODE_MASK (mode) >> shift) & mask)
747 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
748 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
749 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
751 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
752 return simplify_gen_binary (AND, mode, op0, mask_op);
756 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
757 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
758 changing len. */
759 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
760 && REG_P (XEXP (op, 0))
761 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
762 && CONST_INT_P (XEXP (op, 1))
763 && CONST_INT_P (XEXP (op, 2)))
765 rtx op0 = XEXP (op, 0);
766 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
767 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
768 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
770 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
771 if (op0)
773 pos -= op_precision - precision;
774 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
775 XEXP (op, 1), GEN_INT (pos));
778 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
780 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
781 if (op0)
782 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
783 XEXP (op, 1), XEXP (op, 2));
787 /* Recognize a word extraction from a multi-word subreg. */
788 if ((GET_CODE (op) == LSHIFTRT
789 || GET_CODE (op) == ASHIFTRT)
790 && SCALAR_INT_MODE_P (mode)
791 && SCALAR_INT_MODE_P (op_mode)
792 && precision >= BITS_PER_WORD
793 && 2 * precision <= op_precision
794 && CONST_INT_P (XEXP (op, 1))
795 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
796 && UINTVAL (XEXP (op, 1)) < op_precision)
798 int byte = subreg_lowpart_offset (mode, op_mode);
799 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
800 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
801 (WORDS_BIG_ENDIAN
802 ? byte - shifted_bytes
803 : byte + shifted_bytes));
806 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
807 and try replacing the TRUNCATE and shift with it. Don't do this
808 if the MEM has a mode-dependent address. */
809 if ((GET_CODE (op) == LSHIFTRT
810 || GET_CODE (op) == ASHIFTRT)
811 && is_a <scalar_int_mode> (mode, &int_mode)
812 && is_a <scalar_int_mode> (op_mode, &int_op_mode)
813 && MEM_P (XEXP (op, 0))
814 && CONST_INT_P (XEXP (op, 1))
815 && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
816 && INTVAL (XEXP (op, 1)) > 0
817 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
818 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
819 MEM_ADDR_SPACE (XEXP (op, 0)))
820 && ! MEM_VOLATILE_P (XEXP (op, 0))
821 && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
822 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
824 int byte = subreg_lowpart_offset (int_mode, int_op_mode);
825 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
826 return adjust_address_nv (XEXP (op, 0), int_mode,
827 (WORDS_BIG_ENDIAN
828 ? byte - shifted_bytes
829 : byte + shifted_bytes));
832 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
833 (OP:SI foo:SI) if OP is NEG or ABS. */
834 if ((GET_CODE (op) == ABS
835 || GET_CODE (op) == NEG)
836 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
837 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
838 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
839 return simplify_gen_unary (GET_CODE (op), mode,
840 XEXP (XEXP (op, 0), 0), mode);
842 /* (truncate:A (subreg:B (truncate:C X) 0)) is
843 (truncate:A X). */
844 if (GET_CODE (op) == SUBREG
845 && is_a <scalar_int_mode> (mode, &int_mode)
846 && SCALAR_INT_MODE_P (op_mode)
847 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
848 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
849 && subreg_lowpart_p (op))
851 rtx inner = XEXP (SUBREG_REG (op), 0);
852 if (GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (subreg_mode))
853 return simplify_gen_unary (TRUNCATE, int_mode, inner,
854 GET_MODE (inner));
855 else
856 /* If subreg above is paradoxical and C is narrower
857 than A, return (subreg:A (truncate:C X) 0). */
858 return simplify_gen_subreg (int_mode, SUBREG_REG (op), subreg_mode, 0);
861 /* (truncate:A (truncate:B X)) is (truncate:A X). */
862 if (GET_CODE (op) == TRUNCATE)
863 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
864 GET_MODE (XEXP (op, 0)));
866 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
867 in mode A. */
868 if (GET_CODE (op) == IOR
869 && SCALAR_INT_MODE_P (mode)
870 && SCALAR_INT_MODE_P (op_mode)
871 && CONST_INT_P (XEXP (op, 1))
872 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
873 return constm1_rtx;
875 return NULL_RTX;
878 /* Try to simplify a unary operation CODE whose output mode is to be
879 MODE with input operand OP whose mode was originally OP_MODE.
880 Return zero if no simplification can be made. */
882 simplify_unary_operation (enum rtx_code code, machine_mode mode,
883 rtx op, machine_mode op_mode)
885 rtx trueop, tem;
887 trueop = avoid_constant_pool_reference (op);
889 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
890 if (tem)
891 return tem;
893 return simplify_unary_operation_1 (code, mode, op);
896 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
897 to be exact. */
899 static bool
900 exact_int_to_float_conversion_p (const_rtx op)
902 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
903 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
904 /* Constants shouldn't reach here. */
905 gcc_assert (op0_mode != VOIDmode);
906 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
907 int in_bits = in_prec;
908 if (HWI_COMPUTABLE_MODE_P (op0_mode))
910 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
911 if (GET_CODE (op) == FLOAT)
912 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
913 else if (GET_CODE (op) == UNSIGNED_FLOAT)
914 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
915 else
916 gcc_unreachable ();
917 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
919 return in_bits <= out_bits;
922 /* Perform some simplifications we can do even if the operands
923 aren't constant. */
924 static rtx
925 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
927 enum rtx_code reversed;
928 rtx temp;
929 scalar_int_mode inner, int_mode, op0_mode;
931 switch (code)
933 case NOT:
934 /* (not (not X)) == X. */
935 if (GET_CODE (op) == NOT)
936 return XEXP (op, 0);
938 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
939 comparison is all ones. */
940 if (COMPARISON_P (op)
941 && (mode == BImode || STORE_FLAG_VALUE == -1)
942 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
943 return simplify_gen_relational (reversed, mode, VOIDmode,
944 XEXP (op, 0), XEXP (op, 1));
946 /* (not (plus X -1)) can become (neg X). */
947 if (GET_CODE (op) == PLUS
948 && XEXP (op, 1) == constm1_rtx)
949 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
951 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
952 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
953 and MODE_VECTOR_INT. */
954 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
955 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
956 CONSTM1_RTX (mode));
958 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
959 if (GET_CODE (op) == XOR
960 && CONST_INT_P (XEXP (op, 1))
961 && (temp = simplify_unary_operation (NOT, mode,
962 XEXP (op, 1), mode)) != 0)
963 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
965 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
966 if (GET_CODE (op) == PLUS
967 && CONST_INT_P (XEXP (op, 1))
968 && mode_signbit_p (mode, XEXP (op, 1))
969 && (temp = simplify_unary_operation (NOT, mode,
970 XEXP (op, 1), mode)) != 0)
971 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
974 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
975 operands other than 1, but that is not valid. We could do a
976 similar simplification for (not (lshiftrt C X)) where C is
977 just the sign bit, but this doesn't seem common enough to
978 bother with. */
979 if (GET_CODE (op) == ASHIFT
980 && XEXP (op, 0) == const1_rtx)
982 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
983 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
986 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
987 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
988 so we can perform the above simplification. */
989 if (STORE_FLAG_VALUE == -1
990 && is_a <scalar_int_mode> (mode, &int_mode)
991 && GET_CODE (op) == ASHIFTRT
992 && CONST_INT_P (XEXP (op, 1))
993 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
994 return simplify_gen_relational (GE, int_mode, VOIDmode,
995 XEXP (op, 0), const0_rtx);
998 if (GET_CODE (op) == SUBREG
999 && subreg_lowpart_p (op)
1000 && (GET_MODE_SIZE (GET_MODE (op))
1001 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
1002 && GET_CODE (SUBREG_REG (op)) == ASHIFT
1003 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
1005 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
1006 rtx x;
1008 x = gen_rtx_ROTATE (inner_mode,
1009 simplify_gen_unary (NOT, inner_mode, const1_rtx,
1010 inner_mode),
1011 XEXP (SUBREG_REG (op), 1));
1012 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
1013 if (temp)
1014 return temp;
1017 /* Apply De Morgan's laws to reduce number of patterns for machines
1018 with negating logical insns (and-not, nand, etc.). If result has
1019 only one NOT, put it first, since that is how the patterns are
1020 coded. */
1021 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1023 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1024 machine_mode op_mode;
1026 op_mode = GET_MODE (in1);
1027 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1029 op_mode = GET_MODE (in2);
1030 if (op_mode == VOIDmode)
1031 op_mode = mode;
1032 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1034 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1035 std::swap (in1, in2);
1037 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1038 mode, in1, in2);
1041 /* (not (bswap x)) -> (bswap (not x)). */
1042 if (GET_CODE (op) == BSWAP)
1044 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1045 return simplify_gen_unary (BSWAP, mode, x, mode);
1047 break;
1049 case NEG:
1050 /* (neg (neg X)) == X. */
1051 if (GET_CODE (op) == NEG)
1052 return XEXP (op, 0);
1054 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1055 If comparison is not reversible use
1056 x ? y : (neg y). */
1057 if (GET_CODE (op) == IF_THEN_ELSE)
1059 rtx cond = XEXP (op, 0);
1060 rtx true_rtx = XEXP (op, 1);
1061 rtx false_rtx = XEXP (op, 2);
1063 if ((GET_CODE (true_rtx) == NEG
1064 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1065 || (GET_CODE (false_rtx) == NEG
1066 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1068 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1069 temp = reversed_comparison (cond, mode);
1070 else
1072 temp = cond;
1073 std::swap (true_rtx, false_rtx);
1075 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1076 mode, temp, true_rtx, false_rtx);
1080 /* (neg (plus X 1)) can become (not X). */
1081 if (GET_CODE (op) == PLUS
1082 && XEXP (op, 1) == const1_rtx)
1083 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1085 /* Similarly, (neg (not X)) is (plus X 1). */
1086 if (GET_CODE (op) == NOT)
1087 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1088 CONST1_RTX (mode));
1090 /* (neg (minus X Y)) can become (minus Y X). This transformation
1091 isn't safe for modes with signed zeros, since if X and Y are
1092 both +0, (minus Y X) is the same as (minus X Y). If the
1093 rounding mode is towards +infinity (or -infinity) then the two
1094 expressions will be rounded differently. */
1095 if (GET_CODE (op) == MINUS
1096 && !HONOR_SIGNED_ZEROS (mode)
1097 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1098 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1100 if (GET_CODE (op) == PLUS
1101 && !HONOR_SIGNED_ZEROS (mode)
1102 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1104 /* (neg (plus A C)) is simplified to (minus -C A). */
1105 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1106 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1108 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1109 if (temp)
1110 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1113 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1114 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1115 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1118 /* (neg (mult A B)) becomes (mult A (neg B)).
1119 This works even for floating-point values. */
1120 if (GET_CODE (op) == MULT
1121 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1123 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1124 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1127 /* NEG commutes with ASHIFT since it is multiplication. Only do
1128 this if we can then eliminate the NEG (e.g., if the operand
1129 is a constant). */
1130 if (GET_CODE (op) == ASHIFT)
1132 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1133 if (temp)
1134 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1137 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1138 C is equal to the width of MODE minus 1. */
1139 if (GET_CODE (op) == ASHIFTRT
1140 && CONST_INT_P (XEXP (op, 1))
1141 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1142 return simplify_gen_binary (LSHIFTRT, mode,
1143 XEXP (op, 0), XEXP (op, 1));
1145 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1146 C is equal to the width of MODE minus 1. */
1147 if (GET_CODE (op) == LSHIFTRT
1148 && CONST_INT_P (XEXP (op, 1))
1149 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1150 return simplify_gen_binary (ASHIFTRT, mode,
1151 XEXP (op, 0), XEXP (op, 1));
1153 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1154 if (GET_CODE (op) == XOR
1155 && XEXP (op, 1) == const1_rtx
1156 && nonzero_bits (XEXP (op, 0), mode) == 1)
1157 return plus_constant (mode, XEXP (op, 0), -1);
1159 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1160 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1161 if (GET_CODE (op) == LT
1162 && XEXP (op, 1) == const0_rtx
1163 && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1165 int isize = GET_MODE_PRECISION (inner);
1166 if (STORE_FLAG_VALUE == 1)
1168 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1169 GEN_INT (isize - 1));
1170 if (mode == inner)
1171 return temp;
1172 if (GET_MODE_PRECISION (mode) > isize)
1173 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1174 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1176 else if (STORE_FLAG_VALUE == -1)
1178 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1179 GEN_INT (isize - 1));
1180 if (mode == inner)
1181 return temp;
1182 if (GET_MODE_PRECISION (mode) > isize)
1183 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1184 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1187 break;
1189 case TRUNCATE:
1190 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1191 with the umulXi3_highpart patterns. */
1192 if (GET_CODE (op) == LSHIFTRT
1193 && GET_CODE (XEXP (op, 0)) == MULT)
1194 break;
1196 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1198 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1200 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1201 if (temp)
1202 return temp;
1204 /* We can't handle truncation to a partial integer mode here
1205 because we don't know the real bitsize of the partial
1206 integer mode. */
1207 break;
1210 if (GET_MODE (op) != VOIDmode)
1212 temp = simplify_truncation (mode, op, GET_MODE (op));
1213 if (temp)
1214 return temp;
1217 /* If we know that the value is already truncated, we can
1218 replace the TRUNCATE with a SUBREG. */
1219 if (GET_MODE_NUNITS (mode) == 1
1220 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1221 || truncated_to_mode (mode, op)))
1223 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1224 if (temp)
1225 return temp;
1228 /* A truncate of a comparison can be replaced with a subreg if
1229 STORE_FLAG_VALUE permits. This is like the previous test,
1230 but it works even if the comparison is done in a mode larger
1231 than HOST_BITS_PER_WIDE_INT. */
1232 if (HWI_COMPUTABLE_MODE_P (mode)
1233 && COMPARISON_P (op)
1234 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1236 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1237 if (temp)
1238 return temp;
1241 /* A truncate of a memory is just loading the low part of the memory
1242 if we are not changing the meaning of the address. */
1243 if (GET_CODE (op) == MEM
1244 && !VECTOR_MODE_P (mode)
1245 && !MEM_VOLATILE_P (op)
1246 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1248 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1249 if (temp)
1250 return temp;
1253 break;
1255 case FLOAT_TRUNCATE:
1256 if (DECIMAL_FLOAT_MODE_P (mode))
1257 break;
1259 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1260 if (GET_CODE (op) == FLOAT_EXTEND
1261 && GET_MODE (XEXP (op, 0)) == mode)
1262 return XEXP (op, 0);
1264 /* (float_truncate:SF (float_truncate:DF foo:XF))
1265 = (float_truncate:SF foo:XF).
1266 This may eliminate double rounding, so it is unsafe.
1268 (float_truncate:SF (float_extend:XF foo:DF))
1269 = (float_truncate:SF foo:DF).
1271 (float_truncate:DF (float_extend:XF foo:SF))
1272 = (float_extend:DF foo:SF). */
1273 if ((GET_CODE (op) == FLOAT_TRUNCATE
1274 && flag_unsafe_math_optimizations)
1275 || GET_CODE (op) == FLOAT_EXTEND)
1276 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1277 0)))
1278 > GET_MODE_SIZE (mode)
1279 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1280 mode,
1281 XEXP (op, 0), mode);
1283 /* (float_truncate (float x)) is (float x) */
1284 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1285 && (flag_unsafe_math_optimizations
1286 || exact_int_to_float_conversion_p (op)))
1287 return simplify_gen_unary (GET_CODE (op), mode,
1288 XEXP (op, 0),
1289 GET_MODE (XEXP (op, 0)));
1291 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1292 (OP:SF foo:SF) if OP is NEG or ABS. */
1293 if ((GET_CODE (op) == ABS
1294 || GET_CODE (op) == NEG)
1295 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1296 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1297 return simplify_gen_unary (GET_CODE (op), mode,
1298 XEXP (XEXP (op, 0), 0), mode);
1300 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1301 is (float_truncate:SF x). */
1302 if (GET_CODE (op) == SUBREG
1303 && subreg_lowpart_p (op)
1304 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1305 return SUBREG_REG (op);
1306 break;
1308 case FLOAT_EXTEND:
1309 if (DECIMAL_FLOAT_MODE_P (mode))
1310 break;
1312 /* (float_extend (float_extend x)) is (float_extend x)
1314 (float_extend (float x)) is (float x) assuming that double
1315 rounding can't happen.
1317 if (GET_CODE (op) == FLOAT_EXTEND
1318 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1319 && exact_int_to_float_conversion_p (op)))
1320 return simplify_gen_unary (GET_CODE (op), mode,
1321 XEXP (op, 0),
1322 GET_MODE (XEXP (op, 0)));
1324 break;
1326 case ABS:
1327 /* (abs (neg <foo>)) -> (abs <foo>) */
1328 if (GET_CODE (op) == NEG)
1329 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1330 GET_MODE (XEXP (op, 0)));
1332 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1333 do nothing. */
1334 if (GET_MODE (op) == VOIDmode)
1335 break;
1337 /* If operand is something known to be positive, ignore the ABS. */
1338 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1339 || val_signbit_known_clear_p (GET_MODE (op),
1340 nonzero_bits (op, GET_MODE (op))))
1341 return op;
1343 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1344 if (is_a <scalar_int_mode> (mode, &int_mode)
1345 && (num_sign_bit_copies (op, int_mode)
1346 == GET_MODE_PRECISION (int_mode)))
1347 return gen_rtx_NEG (int_mode, op);
1349 break;
1351 case FFS:
1352 /* (ffs (*_extend <X>)) = (ffs <X>) */
1353 if (GET_CODE (op) == SIGN_EXTEND
1354 || GET_CODE (op) == ZERO_EXTEND)
1355 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1356 GET_MODE (XEXP (op, 0)));
1357 break;
1359 case POPCOUNT:
1360 switch (GET_CODE (op))
1362 case BSWAP:
1363 case ZERO_EXTEND:
1364 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1365 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1366 GET_MODE (XEXP (op, 0)));
1368 case ROTATE:
1369 case ROTATERT:
1370 /* Rotations don't affect popcount. */
1371 if (!side_effects_p (XEXP (op, 1)))
1372 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1373 GET_MODE (XEXP (op, 0)));
1374 break;
1376 default:
1377 break;
1379 break;
1381 case PARITY:
1382 switch (GET_CODE (op))
1384 case NOT:
1385 case BSWAP:
1386 case ZERO_EXTEND:
1387 case SIGN_EXTEND:
1388 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1389 GET_MODE (XEXP (op, 0)));
1391 case ROTATE:
1392 case ROTATERT:
1393 /* Rotations don't affect parity. */
1394 if (!side_effects_p (XEXP (op, 1)))
1395 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1396 GET_MODE (XEXP (op, 0)));
1397 break;
1399 default:
1400 break;
1402 break;
1404 case BSWAP:
1405 /* (bswap (bswap x)) -> x. */
1406 if (GET_CODE (op) == BSWAP)
1407 return XEXP (op, 0);
1408 break;
1410 case FLOAT:
1411 /* (float (sign_extend <X>)) = (float <X>). */
1412 if (GET_CODE (op) == SIGN_EXTEND)
1413 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1414 GET_MODE (XEXP (op, 0)));
1415 break;
1417 case SIGN_EXTEND:
1418 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1419 becomes just the MINUS if its mode is MODE. This allows
1420 folding switch statements on machines using casesi (such as
1421 the VAX). */
1422 if (GET_CODE (op) == TRUNCATE
1423 && GET_MODE (XEXP (op, 0)) == mode
1424 && GET_CODE (XEXP (op, 0)) == MINUS
1425 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1426 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1427 return XEXP (op, 0);
1429 /* Extending a widening multiplication should be canonicalized to
1430 a wider widening multiplication. */
1431 if (GET_CODE (op) == MULT)
1433 rtx lhs = XEXP (op, 0);
1434 rtx rhs = XEXP (op, 1);
1435 enum rtx_code lcode = GET_CODE (lhs);
1436 enum rtx_code rcode = GET_CODE (rhs);
1438 /* Widening multiplies usually extend both operands, but sometimes
1439 they use a shift to extract a portion of a register. */
1440 if ((lcode == SIGN_EXTEND
1441 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1442 && (rcode == SIGN_EXTEND
1443 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1445 machine_mode lmode = GET_MODE (lhs);
1446 machine_mode rmode = GET_MODE (rhs);
1447 int bits;
1449 if (lcode == ASHIFTRT)
1450 /* Number of bits not shifted off the end. */
1451 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1452 else /* lcode == SIGN_EXTEND */
1453 /* Size of inner mode. */
1454 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1456 if (rcode == ASHIFTRT)
1457 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1458 else /* rcode == SIGN_EXTEND */
1459 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1461 /* We can only widen multiplies if the result is mathematiclly
1462 equivalent. I.e. if overflow was impossible. */
1463 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1464 return simplify_gen_binary
1465 (MULT, mode,
1466 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1467 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1471 /* Check for a sign extension of a subreg of a promoted
1472 variable, where the promotion is sign-extended, and the
1473 target mode is the same as the variable's promotion. */
1474 if (GET_CODE (op) == SUBREG
1475 && SUBREG_PROMOTED_VAR_P (op)
1476 && SUBREG_PROMOTED_SIGNED_P (op)
1477 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1479 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1480 if (temp)
1481 return temp;
1484 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1485 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1486 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1488 gcc_assert (GET_MODE_PRECISION (mode)
1489 > GET_MODE_PRECISION (GET_MODE (op)));
1490 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1491 GET_MODE (XEXP (op, 0)));
1494 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1495 is (sign_extend:M (subreg:O <X>)) if there is mode with
1496 GET_MODE_BITSIZE (N) - I bits.
1497 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1498 is similarly (zero_extend:M (subreg:O <X>)). */
1499 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1500 && GET_CODE (XEXP (op, 0)) == ASHIFT
1501 && is_a <scalar_int_mode> (mode, &int_mode)
1502 && CONST_INT_P (XEXP (op, 1))
1503 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1504 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1506 scalar_int_mode tmode;
1507 gcc_assert (GET_MODE_BITSIZE (int_mode)
1508 > GET_MODE_BITSIZE (GET_MODE (op)));
1509 if (int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1510 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1512 rtx inner =
1513 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1514 if (inner)
1515 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1516 ? SIGN_EXTEND : ZERO_EXTEND,
1517 int_mode, inner, tmode);
1521 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1522 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1523 if (GET_CODE (op) == LSHIFTRT
1524 && CONST_INT_P (XEXP (op, 1))
1525 && XEXP (op, 1) != const0_rtx)
1526 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1528 #if defined(POINTERS_EXTEND_UNSIGNED)
1529 /* As we do not know which address space the pointer is referring to,
1530 we can do this only if the target does not support different pointer
1531 or address modes depending on the address space. */
1532 if (target_default_pointer_address_modes_p ()
1533 && ! POINTERS_EXTEND_UNSIGNED
1534 && mode == Pmode && GET_MODE (op) == ptr_mode
1535 && (CONSTANT_P (op)
1536 || (GET_CODE (op) == SUBREG
1537 && REG_P (SUBREG_REG (op))
1538 && REG_POINTER (SUBREG_REG (op))
1539 && GET_MODE (SUBREG_REG (op)) == Pmode))
1540 && !targetm.have_ptr_extend ())
1542 temp
1543 = convert_memory_address_addr_space_1 (Pmode, op,
1544 ADDR_SPACE_GENERIC, false,
1545 true);
1546 if (temp)
1547 return temp;
1549 #endif
1550 break;
1552 case ZERO_EXTEND:
1553 /* Check for a zero extension of a subreg of a promoted
1554 variable, where the promotion is zero-extended, and the
1555 target mode is the same as the variable's promotion. */
1556 if (GET_CODE (op) == SUBREG
1557 && SUBREG_PROMOTED_VAR_P (op)
1558 && SUBREG_PROMOTED_UNSIGNED_P (op)
1559 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1561 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1562 if (temp)
1563 return temp;
1566 /* Extending a widening multiplication should be canonicalized to
1567 a wider widening multiplication. */
1568 if (GET_CODE (op) == MULT)
1570 rtx lhs = XEXP (op, 0);
1571 rtx rhs = XEXP (op, 1);
1572 enum rtx_code lcode = GET_CODE (lhs);
1573 enum rtx_code rcode = GET_CODE (rhs);
1575 /* Widening multiplies usually extend both operands, but sometimes
1576 they use a shift to extract a portion of a register. */
1577 if ((lcode == ZERO_EXTEND
1578 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1579 && (rcode == ZERO_EXTEND
1580 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1582 machine_mode lmode = GET_MODE (lhs);
1583 machine_mode rmode = GET_MODE (rhs);
1584 int bits;
1586 if (lcode == LSHIFTRT)
1587 /* Number of bits not shifted off the end. */
1588 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1589 else /* lcode == ZERO_EXTEND */
1590 /* Size of inner mode. */
1591 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1593 if (rcode == LSHIFTRT)
1594 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1595 else /* rcode == ZERO_EXTEND */
1596 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1598 /* We can only widen multiplies if the result is mathematiclly
1599 equivalent. I.e. if overflow was impossible. */
1600 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1601 return simplify_gen_binary
1602 (MULT, mode,
1603 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1604 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1608 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1609 if (GET_CODE (op) == ZERO_EXTEND)
1610 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1611 GET_MODE (XEXP (op, 0)));
1613 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1614 is (zero_extend:M (subreg:O <X>)) if there is mode with
1615 GET_MODE_PRECISION (N) - I bits. */
1616 if (GET_CODE (op) == LSHIFTRT
1617 && GET_CODE (XEXP (op, 0)) == ASHIFT
1618 && is_a <scalar_int_mode> (mode, &int_mode)
1619 && CONST_INT_P (XEXP (op, 1))
1620 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1621 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1623 scalar_int_mode tmode;
1624 if (int_mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1625 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1627 rtx inner =
1628 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1629 if (inner)
1630 return simplify_gen_unary (ZERO_EXTEND, int_mode,
1631 inner, tmode);
1635 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1636 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1637 of mode N. E.g.
1638 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1639 (and:SI (reg:SI) (const_int 63)). */
1640 if (GET_CODE (op) == SUBREG
1641 && is_a <scalar_int_mode> (mode, &int_mode)
1642 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1643 && GET_MODE_PRECISION (GET_MODE (op)) < GET_MODE_PRECISION (op0_mode)
1644 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1645 && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1646 && subreg_lowpart_p (op)
1647 && (nonzero_bits (SUBREG_REG (op), op0_mode)
1648 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1650 if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1651 return SUBREG_REG (op);
1652 return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1653 op0_mode);
1656 #if defined(POINTERS_EXTEND_UNSIGNED)
1657 /* As we do not know which address space the pointer is referring to,
1658 we can do this only if the target does not support different pointer
1659 or address modes depending on the address space. */
1660 if (target_default_pointer_address_modes_p ()
1661 && POINTERS_EXTEND_UNSIGNED > 0
1662 && mode == Pmode && GET_MODE (op) == ptr_mode
1663 && (CONSTANT_P (op)
1664 || (GET_CODE (op) == SUBREG
1665 && REG_P (SUBREG_REG (op))
1666 && REG_POINTER (SUBREG_REG (op))
1667 && GET_MODE (SUBREG_REG (op)) == Pmode))
1668 && !targetm.have_ptr_extend ())
1670 temp
1671 = convert_memory_address_addr_space_1 (Pmode, op,
1672 ADDR_SPACE_GENERIC, false,
1673 true);
1674 if (temp)
1675 return temp;
1677 #endif
1678 break;
1680 default:
1681 break;
1684 return 0;
1687 /* Try to compute the value of a unary operation CODE whose output mode is to
1688 be MODE with input operand OP whose mode was originally OP_MODE.
1689 Return zero if the value cannot be computed. */
1691 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1692 rtx op, machine_mode op_mode)
1694 unsigned int width = GET_MODE_PRECISION (mode);
1696 if (code == VEC_DUPLICATE)
1698 gcc_assert (VECTOR_MODE_P (mode));
1699 if (GET_MODE (op) != VOIDmode)
1701 if (!VECTOR_MODE_P (GET_MODE (op)))
1702 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1703 else
1704 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1705 (GET_MODE (op)));
1707 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1708 || GET_CODE (op) == CONST_VECTOR)
1710 int elt_size = GET_MODE_UNIT_SIZE (mode);
1711 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1712 rtvec v = rtvec_alloc (n_elts);
1713 unsigned int i;
1715 if (GET_CODE (op) != CONST_VECTOR)
1716 for (i = 0; i < n_elts; i++)
1717 RTVEC_ELT (v, i) = op;
1718 else
1720 machine_mode inmode = GET_MODE (op);
1721 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1722 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1724 gcc_assert (in_n_elts < n_elts);
1725 gcc_assert ((n_elts % in_n_elts) == 0);
1726 for (i = 0; i < n_elts; i++)
1727 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1729 return gen_rtx_CONST_VECTOR (mode, v);
1733 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1735 int elt_size = GET_MODE_UNIT_SIZE (mode);
1736 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1737 machine_mode opmode = GET_MODE (op);
1738 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1739 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1740 rtvec v = rtvec_alloc (n_elts);
1741 unsigned int i;
1743 gcc_assert (op_n_elts == n_elts);
1744 for (i = 0; i < n_elts; i++)
1746 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1747 CONST_VECTOR_ELT (op, i),
1748 GET_MODE_INNER (opmode));
1749 if (!x)
1750 return 0;
1751 RTVEC_ELT (v, i) = x;
1753 return gen_rtx_CONST_VECTOR (mode, v);
1756 /* The order of these tests is critical so that, for example, we don't
1757 check the wrong mode (input vs. output) for a conversion operation,
1758 such as FIX. At some point, this should be simplified. */
1760 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1762 REAL_VALUE_TYPE d;
1764 if (op_mode == VOIDmode)
1766 /* CONST_INT have VOIDmode as the mode. We assume that all
1767 the bits of the constant are significant, though, this is
1768 a dangerous assumption as many times CONST_INTs are
1769 created and used with garbage in the bits outside of the
1770 precision of the implied mode of the const_int. */
1771 op_mode = MAX_MODE_INT;
1774 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1776 /* Avoid the folding if flag_signaling_nans is on and
1777 operand is a signaling NaN. */
1778 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1779 return 0;
1781 d = real_value_truncate (mode, d);
1782 return const_double_from_real_value (d, mode);
1784 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1786 REAL_VALUE_TYPE d;
1788 if (op_mode == VOIDmode)
1790 /* CONST_INT have VOIDmode as the mode. We assume that all
1791 the bits of the constant are significant, though, this is
1792 a dangerous assumption as many times CONST_INTs are
1793 created and used with garbage in the bits outside of the
1794 precision of the implied mode of the const_int. */
1795 op_mode = MAX_MODE_INT;
1798 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1800 /* Avoid the folding if flag_signaling_nans is on and
1801 operand is a signaling NaN. */
1802 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1803 return 0;
1805 d = real_value_truncate (mode, d);
1806 return const_double_from_real_value (d, mode);
1809 if (CONST_SCALAR_INT_P (op) && width > 0)
1811 wide_int result;
1812 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1813 rtx_mode_t op0 = rtx_mode_t (op, imode);
1814 int int_value;
1816 #if TARGET_SUPPORTS_WIDE_INT == 0
1817 /* This assert keeps the simplification from producing a result
1818 that cannot be represented in a CONST_DOUBLE but a lot of
1819 upstream callers expect that this function never fails to
1820 simplify something and so you if you added this to the test
1821 above the code would die later anyway. If this assert
1822 happens, you just need to make the port support wide int. */
1823 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1824 #endif
1826 switch (code)
1828 case NOT:
1829 result = wi::bit_not (op0);
1830 break;
1832 case NEG:
1833 result = wi::neg (op0);
1834 break;
1836 case ABS:
1837 result = wi::abs (op0);
1838 break;
1840 case FFS:
1841 result = wi::shwi (wi::ffs (op0), mode);
1842 break;
1844 case CLZ:
1845 if (wi::ne_p (op0, 0))
1846 int_value = wi::clz (op0);
1847 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1848 int_value = GET_MODE_PRECISION (mode);
1849 result = wi::shwi (int_value, mode);
1850 break;
1852 case CLRSB:
1853 result = wi::shwi (wi::clrsb (op0), mode);
1854 break;
1856 case CTZ:
1857 if (wi::ne_p (op0, 0))
1858 int_value = wi::ctz (op0);
1859 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1860 int_value = GET_MODE_PRECISION (mode);
1861 result = wi::shwi (int_value, mode);
1862 break;
1864 case POPCOUNT:
1865 result = wi::shwi (wi::popcount (op0), mode);
1866 break;
1868 case PARITY:
1869 result = wi::shwi (wi::parity (op0), mode);
1870 break;
1872 case BSWAP:
1873 result = wide_int (op0).bswap ();
1874 break;
1876 case TRUNCATE:
1877 case ZERO_EXTEND:
1878 result = wide_int::from (op0, width, UNSIGNED);
1879 break;
1881 case SIGN_EXTEND:
1882 result = wide_int::from (op0, width, SIGNED);
1883 break;
1885 case SQRT:
1886 default:
1887 return 0;
1890 return immed_wide_int_const (result, mode);
1893 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1894 && SCALAR_FLOAT_MODE_P (mode)
1895 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1897 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1898 switch (code)
1900 case SQRT:
1901 return 0;
1902 case ABS:
1903 d = real_value_abs (&d);
1904 break;
1905 case NEG:
1906 d = real_value_negate (&d);
1907 break;
1908 case FLOAT_TRUNCATE:
1909 /* Don't perform the operation if flag_signaling_nans is on
1910 and the operand is a signaling NaN. */
1911 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1912 return NULL_RTX;
1913 d = real_value_truncate (mode, d);
1914 break;
1915 case FLOAT_EXTEND:
1916 /* Don't perform the operation if flag_signaling_nans is on
1917 and the operand is a signaling NaN. */
1918 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1919 return NULL_RTX;
1920 /* All this does is change the mode, unless changing
1921 mode class. */
1922 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1923 real_convert (&d, mode, &d);
1924 break;
1925 case FIX:
1926 /* Don't perform the operation if flag_signaling_nans is on
1927 and the operand is a signaling NaN. */
1928 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1929 return NULL_RTX;
1930 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1931 break;
1932 case NOT:
1934 long tmp[4];
1935 int i;
1937 real_to_target (tmp, &d, GET_MODE (op));
1938 for (i = 0; i < 4; i++)
1939 tmp[i] = ~tmp[i];
1940 real_from_target (&d, tmp, mode);
1941 break;
1943 default:
1944 gcc_unreachable ();
1946 return const_double_from_real_value (d, mode);
1948 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1949 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1950 && GET_MODE_CLASS (mode) == MODE_INT
1951 && width > 0)
1953 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1954 operators are intentionally left unspecified (to ease implementation
1955 by target backends), for consistency, this routine implements the
1956 same semantics for constant folding as used by the middle-end. */
1958 /* This was formerly used only for non-IEEE float.
1959 eggert@twinsun.com says it is safe for IEEE also. */
1960 REAL_VALUE_TYPE t;
1961 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1962 wide_int wmax, wmin;
1963 /* This is part of the abi to real_to_integer, but we check
1964 things before making this call. */
1965 bool fail;
1967 switch (code)
1969 case FIX:
1970 if (REAL_VALUE_ISNAN (*x))
1971 return const0_rtx;
1973 /* Test against the signed upper bound. */
1974 wmax = wi::max_value (width, SIGNED);
1975 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1976 if (real_less (&t, x))
1977 return immed_wide_int_const (wmax, mode);
1979 /* Test against the signed lower bound. */
1980 wmin = wi::min_value (width, SIGNED);
1981 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1982 if (real_less (x, &t))
1983 return immed_wide_int_const (wmin, mode);
1985 return immed_wide_int_const (real_to_integer (x, &fail, width),
1986 mode);
1988 case UNSIGNED_FIX:
1989 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1990 return const0_rtx;
1992 /* Test against the unsigned upper bound. */
1993 wmax = wi::max_value (width, UNSIGNED);
1994 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1995 if (real_less (&t, x))
1996 return immed_wide_int_const (wmax, mode);
1998 return immed_wide_int_const (real_to_integer (x, &fail, width),
1999 mode);
2001 default:
2002 gcc_unreachable ();
2006 return NULL_RTX;
2009 /* Subroutine of simplify_binary_operation to simplify a binary operation
2010 CODE that can commute with byte swapping, with result mode MODE and
2011 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2012 Return zero if no simplification or canonicalization is possible. */
2014 static rtx
2015 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
2016 rtx op0, rtx op1)
2018 rtx tem;
2020 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2021 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2023 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2024 simplify_gen_unary (BSWAP, mode, op1, mode));
2025 return simplify_gen_unary (BSWAP, mode, tem, mode);
2028 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2029 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2031 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2032 return simplify_gen_unary (BSWAP, mode, tem, mode);
2035 return NULL_RTX;
2038 /* Subroutine of simplify_binary_operation to simplify a commutative,
2039 associative binary operation CODE with result mode MODE, operating
2040 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2041 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2042 canonicalization is possible. */
2044 static rtx
2045 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2046 rtx op0, rtx op1)
2048 rtx tem;
2050 /* Linearize the operator to the left. */
2051 if (GET_CODE (op1) == code)
2053 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2054 if (GET_CODE (op0) == code)
2056 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2057 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2060 /* "a op (b op c)" becomes "(b op c) op a". */
2061 if (! swap_commutative_operands_p (op1, op0))
2062 return simplify_gen_binary (code, mode, op1, op0);
2064 std::swap (op0, op1);
2067 if (GET_CODE (op0) == code)
2069 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2070 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2072 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2073 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2076 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2077 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2078 if (tem != 0)
2079 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2081 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2082 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2083 if (tem != 0)
2084 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2087 return 0;
2091 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2092 and OP1. Return 0 if no simplification is possible.
2094 Don't use this for relational operations such as EQ or LT.
2095 Use simplify_relational_operation instead. */
2097 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2098 rtx op0, rtx op1)
2100 rtx trueop0, trueop1;
2101 rtx tem;
2103 /* Relational operations don't work here. We must know the mode
2104 of the operands in order to do the comparison correctly.
2105 Assuming a full word can give incorrect results.
2106 Consider comparing 128 with -128 in QImode. */
2107 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2108 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2110 /* Make sure the constant is second. */
2111 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2112 && swap_commutative_operands_p (op0, op1))
2113 std::swap (op0, op1);
2115 trueop0 = avoid_constant_pool_reference (op0);
2116 trueop1 = avoid_constant_pool_reference (op1);
2118 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2119 if (tem)
2120 return tem;
2121 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2123 if (tem)
2124 return tem;
2126 /* If the above steps did not result in a simplification and op0 or op1
2127 were constant pool references, use the referenced constants directly. */
2128 if (trueop0 != op0 || trueop1 != op1)
2129 return simplify_gen_binary (code, mode, trueop0, trueop1);
2131 return NULL_RTX;
2134 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2135 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2136 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2137 actual constants. */
2139 static rtx
2140 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2141 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2143 rtx tem, reversed, opleft, opright;
2144 HOST_WIDE_INT val;
2145 unsigned int width = GET_MODE_PRECISION (mode);
2146 scalar_int_mode int_mode, inner_mode;
2148 /* Even if we can't compute a constant result,
2149 there are some cases worth simplifying. */
2151 switch (code)
2153 case PLUS:
2154 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2155 when x is NaN, infinite, or finite and nonzero. They aren't
2156 when x is -0 and the rounding mode is not towards -infinity,
2157 since (-0) + 0 is then 0. */
2158 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2159 return op0;
2161 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2162 transformations are safe even for IEEE. */
2163 if (GET_CODE (op0) == NEG)
2164 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2165 else if (GET_CODE (op1) == NEG)
2166 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2168 /* (~a) + 1 -> -a */
2169 if (INTEGRAL_MODE_P (mode)
2170 && GET_CODE (op0) == NOT
2171 && trueop1 == const1_rtx)
2172 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2174 /* Handle both-operands-constant cases. We can only add
2175 CONST_INTs to constants since the sum of relocatable symbols
2176 can't be handled by most assemblers. Don't add CONST_INT
2177 to CONST_INT since overflow won't be computed properly if wider
2178 than HOST_BITS_PER_WIDE_INT. */
2180 if ((GET_CODE (op0) == CONST
2181 || GET_CODE (op0) == SYMBOL_REF
2182 || GET_CODE (op0) == LABEL_REF)
2183 && CONST_INT_P (op1))
2184 return plus_constant (mode, op0, INTVAL (op1));
2185 else if ((GET_CODE (op1) == CONST
2186 || GET_CODE (op1) == SYMBOL_REF
2187 || GET_CODE (op1) == LABEL_REF)
2188 && CONST_INT_P (op0))
2189 return plus_constant (mode, op1, INTVAL (op0));
2191 /* See if this is something like X * C - X or vice versa or
2192 if the multiplication is written as a shift. If so, we can
2193 distribute and make a new multiply, shift, or maybe just
2194 have X (if C is 2 in the example above). But don't make
2195 something more expensive than we had before. */
2197 if (is_a <scalar_int_mode> (mode, &int_mode))
2199 rtx lhs = op0, rhs = op1;
2201 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2202 wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2204 if (GET_CODE (lhs) == NEG)
2206 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2207 lhs = XEXP (lhs, 0);
2209 else if (GET_CODE (lhs) == MULT
2210 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2212 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2213 lhs = XEXP (lhs, 0);
2215 else if (GET_CODE (lhs) == ASHIFT
2216 && CONST_INT_P (XEXP (lhs, 1))
2217 && INTVAL (XEXP (lhs, 1)) >= 0
2218 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2220 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2221 GET_MODE_PRECISION (int_mode));
2222 lhs = XEXP (lhs, 0);
2225 if (GET_CODE (rhs) == NEG)
2227 coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2228 rhs = XEXP (rhs, 0);
2230 else if (GET_CODE (rhs) == MULT
2231 && CONST_INT_P (XEXP (rhs, 1)))
2233 coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2234 rhs = XEXP (rhs, 0);
2236 else if (GET_CODE (rhs) == ASHIFT
2237 && CONST_INT_P (XEXP (rhs, 1))
2238 && INTVAL (XEXP (rhs, 1)) >= 0
2239 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2241 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2242 GET_MODE_PRECISION (int_mode));
2243 rhs = XEXP (rhs, 0);
2246 if (rtx_equal_p (lhs, rhs))
2248 rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2249 rtx coeff;
2250 bool speed = optimize_function_for_speed_p (cfun);
2252 coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2254 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2255 return (set_src_cost (tem, int_mode, speed)
2256 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2260 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2261 if (CONST_SCALAR_INT_P (op1)
2262 && GET_CODE (op0) == XOR
2263 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2264 && mode_signbit_p (mode, op1))
2265 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2266 simplify_gen_binary (XOR, mode, op1,
2267 XEXP (op0, 1)));
2269 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2270 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2271 && GET_CODE (op0) == MULT
2272 && GET_CODE (XEXP (op0, 0)) == NEG)
2274 rtx in1, in2;
2276 in1 = XEXP (XEXP (op0, 0), 0);
2277 in2 = XEXP (op0, 1);
2278 return simplify_gen_binary (MINUS, mode, op1,
2279 simplify_gen_binary (MULT, mode,
2280 in1, in2));
2283 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2284 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2285 is 1. */
2286 if (COMPARISON_P (op0)
2287 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2288 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2289 && (reversed = reversed_comparison (op0, mode)))
2290 return
2291 simplify_gen_unary (NEG, mode, reversed, mode);
2293 /* If one of the operands is a PLUS or a MINUS, see if we can
2294 simplify this by the associative law.
2295 Don't use the associative law for floating point.
2296 The inaccuracy makes it nonassociative,
2297 and subtle programs can break if operations are associated. */
2299 if (INTEGRAL_MODE_P (mode)
2300 && (plus_minus_operand_p (op0)
2301 || plus_minus_operand_p (op1))
2302 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2303 return tem;
2305 /* Reassociate floating point addition only when the user
2306 specifies associative math operations. */
2307 if (FLOAT_MODE_P (mode)
2308 && flag_associative_math)
2310 tem = simplify_associative_operation (code, mode, op0, op1);
2311 if (tem)
2312 return tem;
2314 break;
2316 case COMPARE:
2317 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2318 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2319 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2320 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2322 rtx xop00 = XEXP (op0, 0);
2323 rtx xop10 = XEXP (op1, 0);
2325 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2326 return xop00;
2328 if (REG_P (xop00) && REG_P (xop10)
2329 && REGNO (xop00) == REGNO (xop10)
2330 && GET_MODE (xop00) == mode
2331 && GET_MODE (xop10) == mode
2332 && GET_MODE_CLASS (mode) == MODE_CC)
2333 return xop00;
2335 break;
2337 case MINUS:
2338 /* We can't assume x-x is 0 even with non-IEEE floating point,
2339 but since it is zero except in very strange circumstances, we
2340 will treat it as zero with -ffinite-math-only. */
2341 if (rtx_equal_p (trueop0, trueop1)
2342 && ! side_effects_p (op0)
2343 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2344 return CONST0_RTX (mode);
2346 /* Change subtraction from zero into negation. (0 - x) is the
2347 same as -x when x is NaN, infinite, or finite and nonzero.
2348 But if the mode has signed zeros, and does not round towards
2349 -infinity, then 0 - 0 is 0, not -0. */
2350 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2351 return simplify_gen_unary (NEG, mode, op1, mode);
2353 /* (-1 - a) is ~a, unless the expression contains symbolic
2354 constants, in which case not retaining additions and
2355 subtractions could cause invalid assembly to be produced. */
2356 if (trueop0 == constm1_rtx
2357 && !contains_symbolic_reference_p (op1))
2358 return simplify_gen_unary (NOT, mode, op1, mode);
2360 /* Subtracting 0 has no effect unless the mode has signed zeros
2361 and supports rounding towards -infinity. In such a case,
2362 0 - 0 is -0. */
2363 if (!(HONOR_SIGNED_ZEROS (mode)
2364 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2365 && trueop1 == CONST0_RTX (mode))
2366 return op0;
2368 /* See if this is something like X * C - X or vice versa or
2369 if the multiplication is written as a shift. If so, we can
2370 distribute and make a new multiply, shift, or maybe just
2371 have X (if C is 2 in the example above). But don't make
2372 something more expensive than we had before. */
2374 if (is_a <scalar_int_mode> (mode, &int_mode))
2376 rtx lhs = op0, rhs = op1;
2378 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2379 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2381 if (GET_CODE (lhs) == NEG)
2383 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2384 lhs = XEXP (lhs, 0);
2386 else if (GET_CODE (lhs) == MULT
2387 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2389 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2390 lhs = XEXP (lhs, 0);
2392 else if (GET_CODE (lhs) == ASHIFT
2393 && CONST_INT_P (XEXP (lhs, 1))
2394 && INTVAL (XEXP (lhs, 1)) >= 0
2395 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2397 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2398 GET_MODE_PRECISION (int_mode));
2399 lhs = XEXP (lhs, 0);
2402 if (GET_CODE (rhs) == NEG)
2404 negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2405 rhs = XEXP (rhs, 0);
2407 else if (GET_CODE (rhs) == MULT
2408 && CONST_INT_P (XEXP (rhs, 1)))
2410 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2411 rhs = XEXP (rhs, 0);
2413 else if (GET_CODE (rhs) == ASHIFT
2414 && CONST_INT_P (XEXP (rhs, 1))
2415 && INTVAL (XEXP (rhs, 1)) >= 0
2416 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2418 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2419 GET_MODE_PRECISION (int_mode));
2420 negcoeff1 = -negcoeff1;
2421 rhs = XEXP (rhs, 0);
2424 if (rtx_equal_p (lhs, rhs))
2426 rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
2427 rtx coeff;
2428 bool speed = optimize_function_for_speed_p (cfun);
2430 coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
2432 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2433 return (set_src_cost (tem, int_mode, speed)
2434 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2438 /* (a - (-b)) -> (a + b). True even for IEEE. */
2439 if (GET_CODE (op1) == NEG)
2440 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2442 /* (-x - c) may be simplified as (-c - x). */
2443 if (GET_CODE (op0) == NEG
2444 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2446 tem = simplify_unary_operation (NEG, mode, op1, mode);
2447 if (tem)
2448 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2451 /* Don't let a relocatable value get a negative coeff. */
2452 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2453 return simplify_gen_binary (PLUS, mode,
2454 op0,
2455 neg_const_int (mode, op1));
2457 /* (x - (x & y)) -> (x & ~y) */
2458 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2460 if (rtx_equal_p (op0, XEXP (op1, 0)))
2462 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2463 GET_MODE (XEXP (op1, 1)));
2464 return simplify_gen_binary (AND, mode, op0, tem);
2466 if (rtx_equal_p (op0, XEXP (op1, 1)))
2468 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2469 GET_MODE (XEXP (op1, 0)));
2470 return simplify_gen_binary (AND, mode, op0, tem);
2474 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2475 by reversing the comparison code if valid. */
2476 if (STORE_FLAG_VALUE == 1
2477 && trueop0 == const1_rtx
2478 && COMPARISON_P (op1)
2479 && (reversed = reversed_comparison (op1, mode)))
2480 return reversed;
2482 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2483 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2484 && GET_CODE (op1) == MULT
2485 && GET_CODE (XEXP (op1, 0)) == NEG)
2487 rtx in1, in2;
2489 in1 = XEXP (XEXP (op1, 0), 0);
2490 in2 = XEXP (op1, 1);
2491 return simplify_gen_binary (PLUS, mode,
2492 simplify_gen_binary (MULT, mode,
2493 in1, in2),
2494 op0);
2497 /* Canonicalize (minus (neg A) (mult B C)) to
2498 (minus (mult (neg B) C) A). */
2499 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2500 && GET_CODE (op1) == MULT
2501 && GET_CODE (op0) == NEG)
2503 rtx in1, in2;
2505 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2506 in2 = XEXP (op1, 1);
2507 return simplify_gen_binary (MINUS, mode,
2508 simplify_gen_binary (MULT, mode,
2509 in1, in2),
2510 XEXP (op0, 0));
2513 /* If one of the operands is a PLUS or a MINUS, see if we can
2514 simplify this by the associative law. This will, for example,
2515 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2516 Don't use the associative law for floating point.
2517 The inaccuracy makes it nonassociative,
2518 and subtle programs can break if operations are associated. */
2520 if (INTEGRAL_MODE_P (mode)
2521 && (plus_minus_operand_p (op0)
2522 || plus_minus_operand_p (op1))
2523 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2524 return tem;
2525 break;
2527 case MULT:
2528 if (trueop1 == constm1_rtx)
2529 return simplify_gen_unary (NEG, mode, op0, mode);
2531 if (GET_CODE (op0) == NEG)
2533 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2534 /* If op1 is a MULT as well and simplify_unary_operation
2535 just moved the NEG to the second operand, simplify_gen_binary
2536 below could through simplify_associative_operation move
2537 the NEG around again and recurse endlessly. */
2538 if (temp
2539 && GET_CODE (op1) == MULT
2540 && GET_CODE (temp) == MULT
2541 && XEXP (op1, 0) == XEXP (temp, 0)
2542 && GET_CODE (XEXP (temp, 1)) == NEG
2543 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2544 temp = NULL_RTX;
2545 if (temp)
2546 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2548 if (GET_CODE (op1) == NEG)
2550 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2551 /* If op0 is a MULT as well and simplify_unary_operation
2552 just moved the NEG to the second operand, simplify_gen_binary
2553 below could through simplify_associative_operation move
2554 the NEG around again and recurse endlessly. */
2555 if (temp
2556 && GET_CODE (op0) == MULT
2557 && GET_CODE (temp) == MULT
2558 && XEXP (op0, 0) == XEXP (temp, 0)
2559 && GET_CODE (XEXP (temp, 1)) == NEG
2560 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2561 temp = NULL_RTX;
2562 if (temp)
2563 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2566 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2567 x is NaN, since x * 0 is then also NaN. Nor is it valid
2568 when the mode has signed zeros, since multiplying a negative
2569 number by 0 will give -0, not 0. */
2570 if (!HONOR_NANS (mode)
2571 && !HONOR_SIGNED_ZEROS (mode)
2572 && trueop1 == CONST0_RTX (mode)
2573 && ! side_effects_p (op0))
2574 return op1;
2576 /* In IEEE floating point, x*1 is not equivalent to x for
2577 signalling NaNs. */
2578 if (!HONOR_SNANS (mode)
2579 && trueop1 == CONST1_RTX (mode))
2580 return op0;
2582 /* Convert multiply by constant power of two into shift. */
2583 if (CONST_SCALAR_INT_P (trueop1))
2585 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2586 if (val >= 0)
2587 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2590 /* x*2 is x+x and x*(-1) is -x */
2591 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2592 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2593 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2594 && GET_MODE (op0) == mode)
2596 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2598 if (real_equal (d1, &dconst2))
2599 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2601 if (!HONOR_SNANS (mode)
2602 && real_equal (d1, &dconstm1))
2603 return simplify_gen_unary (NEG, mode, op0, mode);
2606 /* Optimize -x * -x as x * x. */
2607 if (FLOAT_MODE_P (mode)
2608 && GET_CODE (op0) == NEG
2609 && GET_CODE (op1) == NEG
2610 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2611 && !side_effects_p (XEXP (op0, 0)))
2612 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2614 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2615 if (SCALAR_FLOAT_MODE_P (mode)
2616 && GET_CODE (op0) == ABS
2617 && GET_CODE (op1) == ABS
2618 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2619 && !side_effects_p (XEXP (op0, 0)))
2620 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2622 /* Reassociate multiplication, but for floating point MULTs
2623 only when the user specifies unsafe math optimizations. */
2624 if (! FLOAT_MODE_P (mode)
2625 || flag_unsafe_math_optimizations)
2627 tem = simplify_associative_operation (code, mode, op0, op1);
2628 if (tem)
2629 return tem;
2631 break;
2633 case IOR:
2634 if (trueop1 == CONST0_RTX (mode))
2635 return op0;
2636 if (INTEGRAL_MODE_P (mode)
2637 && trueop1 == CONSTM1_RTX (mode)
2638 && !side_effects_p (op0))
2639 return op1;
2640 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2641 return op0;
2642 /* A | (~A) -> -1 */
2643 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2644 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2645 && ! side_effects_p (op0)
2646 && SCALAR_INT_MODE_P (mode))
2647 return constm1_rtx;
2649 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2650 if (CONST_INT_P (op1)
2651 && HWI_COMPUTABLE_MODE_P (mode)
2652 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2653 && !side_effects_p (op0))
2654 return op1;
2656 /* Canonicalize (X & C1) | C2. */
2657 if (GET_CODE (op0) == AND
2658 && CONST_INT_P (trueop1)
2659 && CONST_INT_P (XEXP (op0, 1)))
2661 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2662 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2663 HOST_WIDE_INT c2 = INTVAL (trueop1);
2665 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2666 if ((c1 & c2) == c1
2667 && !side_effects_p (XEXP (op0, 0)))
2668 return trueop1;
2670 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2671 if (((c1|c2) & mask) == mask)
2672 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2674 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2675 if (((c1 & ~c2) & mask) != (c1 & mask))
2677 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2678 gen_int_mode (c1 & ~c2, mode));
2679 return simplify_gen_binary (IOR, mode, tem, op1);
2683 /* Convert (A & B) | A to A. */
2684 if (GET_CODE (op0) == AND
2685 && (rtx_equal_p (XEXP (op0, 0), op1)
2686 || rtx_equal_p (XEXP (op0, 1), op1))
2687 && ! side_effects_p (XEXP (op0, 0))
2688 && ! side_effects_p (XEXP (op0, 1)))
2689 return op1;
2691 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2692 mode size to (rotate A CX). */
2694 if (GET_CODE (op1) == ASHIFT
2695 || GET_CODE (op1) == SUBREG)
2697 opleft = op1;
2698 opright = op0;
2700 else
2702 opright = op1;
2703 opleft = op0;
2706 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2707 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2708 && CONST_INT_P (XEXP (opleft, 1))
2709 && CONST_INT_P (XEXP (opright, 1))
2710 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2711 == GET_MODE_PRECISION (mode)))
2712 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2714 /* Same, but for ashift that has been "simplified" to a wider mode
2715 by simplify_shift_const. */
2717 if (GET_CODE (opleft) == SUBREG
2718 && is_a <scalar_int_mode> (mode, &int_mode)
2719 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
2720 &inner_mode)
2721 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2722 && GET_CODE (opright) == LSHIFTRT
2723 && GET_CODE (XEXP (opright, 0)) == SUBREG
2724 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2725 && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
2726 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2727 SUBREG_REG (XEXP (opright, 0)))
2728 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2729 && CONST_INT_P (XEXP (opright, 1))
2730 && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
2731 + INTVAL (XEXP (opright, 1))
2732 == GET_MODE_PRECISION (int_mode)))
2733 return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
2734 XEXP (SUBREG_REG (opleft), 1));
2736 /* If we have (ior (and (X C1) C2)), simplify this by making
2737 C1 as small as possible if C1 actually changes. */
2738 if (CONST_INT_P (op1)
2739 && (HWI_COMPUTABLE_MODE_P (mode)
2740 || INTVAL (op1) > 0)
2741 && GET_CODE (op0) == AND
2742 && CONST_INT_P (XEXP (op0, 1))
2743 && CONST_INT_P (op1)
2744 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2746 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2747 gen_int_mode (UINTVAL (XEXP (op0, 1))
2748 & ~UINTVAL (op1),
2749 mode));
2750 return simplify_gen_binary (IOR, mode, tmp, op1);
2753 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2754 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2755 the PLUS does not affect any of the bits in OP1: then we can do
2756 the IOR as a PLUS and we can associate. This is valid if OP1
2757 can be safely shifted left C bits. */
2758 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2759 && GET_CODE (XEXP (op0, 0)) == PLUS
2760 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2761 && CONST_INT_P (XEXP (op0, 1))
2762 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2764 int count = INTVAL (XEXP (op0, 1));
2765 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2767 if (mask >> count == INTVAL (trueop1)
2768 && trunc_int_for_mode (mask, mode) == mask
2769 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2770 return simplify_gen_binary (ASHIFTRT, mode,
2771 plus_constant (mode, XEXP (op0, 0),
2772 mask),
2773 XEXP (op0, 1));
2776 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2777 if (tem)
2778 return tem;
2780 tem = simplify_associative_operation (code, mode, op0, op1);
2781 if (tem)
2782 return tem;
2783 break;
2785 case XOR:
2786 if (trueop1 == CONST0_RTX (mode))
2787 return op0;
2788 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2789 return simplify_gen_unary (NOT, mode, op0, mode);
2790 if (rtx_equal_p (trueop0, trueop1)
2791 && ! side_effects_p (op0)
2792 && GET_MODE_CLASS (mode) != MODE_CC)
2793 return CONST0_RTX (mode);
2795 /* Canonicalize XOR of the most significant bit to PLUS. */
2796 if (CONST_SCALAR_INT_P (op1)
2797 && mode_signbit_p (mode, op1))
2798 return simplify_gen_binary (PLUS, mode, op0, op1);
2799 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2800 if (CONST_SCALAR_INT_P (op1)
2801 && GET_CODE (op0) == PLUS
2802 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2803 && mode_signbit_p (mode, XEXP (op0, 1)))
2804 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2805 simplify_gen_binary (XOR, mode, op1,
2806 XEXP (op0, 1)));
2808 /* If we are XORing two things that have no bits in common,
2809 convert them into an IOR. This helps to detect rotation encoded
2810 using those methods and possibly other simplifications. */
2812 if (HWI_COMPUTABLE_MODE_P (mode)
2813 && (nonzero_bits (op0, mode)
2814 & nonzero_bits (op1, mode)) == 0)
2815 return (simplify_gen_binary (IOR, mode, op0, op1));
2817 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2818 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2819 (NOT y). */
2821 int num_negated = 0;
2823 if (GET_CODE (op0) == NOT)
2824 num_negated++, op0 = XEXP (op0, 0);
2825 if (GET_CODE (op1) == NOT)
2826 num_negated++, op1 = XEXP (op1, 0);
2828 if (num_negated == 2)
2829 return simplify_gen_binary (XOR, mode, op0, op1);
2830 else if (num_negated == 1)
2831 return simplify_gen_unary (NOT, mode,
2832 simplify_gen_binary (XOR, mode, op0, op1),
2833 mode);
2836 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2837 correspond to a machine insn or result in further simplifications
2838 if B is a constant. */
2840 if (GET_CODE (op0) == AND
2841 && rtx_equal_p (XEXP (op0, 1), op1)
2842 && ! side_effects_p (op1))
2843 return simplify_gen_binary (AND, mode,
2844 simplify_gen_unary (NOT, mode,
2845 XEXP (op0, 0), mode),
2846 op1);
2848 else if (GET_CODE (op0) == AND
2849 && rtx_equal_p (XEXP (op0, 0), op1)
2850 && ! side_effects_p (op1))
2851 return simplify_gen_binary (AND, mode,
2852 simplify_gen_unary (NOT, mode,
2853 XEXP (op0, 1), mode),
2854 op1);
2856 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2857 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2858 out bits inverted twice and not set by C. Similarly, given
2859 (xor (and (xor A B) C) D), simplify without inverting C in
2860 the xor operand: (xor (and A C) (B&C)^D).
2862 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2863 && GET_CODE (XEXP (op0, 0)) == XOR
2864 && CONST_INT_P (op1)
2865 && CONST_INT_P (XEXP (op0, 1))
2866 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2868 enum rtx_code op = GET_CODE (op0);
2869 rtx a = XEXP (XEXP (op0, 0), 0);
2870 rtx b = XEXP (XEXP (op0, 0), 1);
2871 rtx c = XEXP (op0, 1);
2872 rtx d = op1;
2873 HOST_WIDE_INT bval = INTVAL (b);
2874 HOST_WIDE_INT cval = INTVAL (c);
2875 HOST_WIDE_INT dval = INTVAL (d);
2876 HOST_WIDE_INT xcval;
2878 if (op == IOR)
2879 xcval = ~cval;
2880 else
2881 xcval = cval;
2883 return simplify_gen_binary (XOR, mode,
2884 simplify_gen_binary (op, mode, a, c),
2885 gen_int_mode ((bval & xcval) ^ dval,
2886 mode));
2889 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2890 we can transform like this:
2891 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2892 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2893 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2894 Attempt a few simplifications when B and C are both constants. */
2895 if (GET_CODE (op0) == AND
2896 && CONST_INT_P (op1)
2897 && CONST_INT_P (XEXP (op0, 1)))
2899 rtx a = XEXP (op0, 0);
2900 rtx b = XEXP (op0, 1);
2901 rtx c = op1;
2902 HOST_WIDE_INT bval = INTVAL (b);
2903 HOST_WIDE_INT cval = INTVAL (c);
2905 /* Instead of computing ~A&C, we compute its negated value,
2906 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2907 optimize for sure. If it does not simplify, we still try
2908 to compute ~A&C below, but since that always allocates
2909 RTL, we don't try that before committing to returning a
2910 simplified expression. */
2911 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2912 GEN_INT (~cval));
2914 if ((~cval & bval) == 0)
2916 rtx na_c = NULL_RTX;
2917 if (n_na_c)
2918 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2919 else
2921 /* If ~A does not simplify, don't bother: we don't
2922 want to simplify 2 operations into 3, and if na_c
2923 were to simplify with na, n_na_c would have
2924 simplified as well. */
2925 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2926 if (na)
2927 na_c = simplify_gen_binary (AND, mode, na, c);
2930 /* Try to simplify ~A&C | ~B&C. */
2931 if (na_c != NULL_RTX)
2932 return simplify_gen_binary (IOR, mode, na_c,
2933 gen_int_mode (~bval & cval, mode));
2935 else
2937 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2938 if (n_na_c == CONSTM1_RTX (mode))
2940 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2941 gen_int_mode (~cval & bval,
2942 mode));
2943 return simplify_gen_binary (IOR, mode, a_nc_b,
2944 gen_int_mode (~bval & cval,
2945 mode));
2950 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
2951 do (ior (and A ~C) (and B C)) which is a machine instruction on some
2952 machines, and also has shorter instruction path length. */
2953 if (GET_CODE (op0) == AND
2954 && GET_CODE (XEXP (op0, 0)) == XOR
2955 && CONST_INT_P (XEXP (op0, 1))
2956 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
2958 rtx a = trueop1;
2959 rtx b = XEXP (XEXP (op0, 0), 1);
2960 rtx c = XEXP (op0, 1);
2961 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2962 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
2963 rtx bc = simplify_gen_binary (AND, mode, b, c);
2964 return simplify_gen_binary (IOR, mode, a_nc, bc);
2966 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
2967 else if (GET_CODE (op0) == AND
2968 && GET_CODE (XEXP (op0, 0)) == XOR
2969 && CONST_INT_P (XEXP (op0, 1))
2970 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
2972 rtx a = XEXP (XEXP (op0, 0), 0);
2973 rtx b = trueop1;
2974 rtx c = XEXP (op0, 1);
2975 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2976 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
2977 rtx ac = simplify_gen_binary (AND, mode, a, c);
2978 return simplify_gen_binary (IOR, mode, ac, b_nc);
2981 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2982 comparison if STORE_FLAG_VALUE is 1. */
2983 if (STORE_FLAG_VALUE == 1
2984 && trueop1 == const1_rtx
2985 && COMPARISON_P (op0)
2986 && (reversed = reversed_comparison (op0, mode)))
2987 return reversed;
2989 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2990 is (lt foo (const_int 0)), so we can perform the above
2991 simplification if STORE_FLAG_VALUE is 1. */
2993 if (is_a <scalar_int_mode> (mode, &int_mode)
2994 && STORE_FLAG_VALUE == 1
2995 && trueop1 == const1_rtx
2996 && GET_CODE (op0) == LSHIFTRT
2997 && CONST_INT_P (XEXP (op0, 1))
2998 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
2999 return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
3001 /* (xor (comparison foo bar) (const_int sign-bit))
3002 when STORE_FLAG_VALUE is the sign bit. */
3003 if (is_a <scalar_int_mode> (mode, &int_mode)
3004 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
3005 && trueop1 == const_true_rtx
3006 && COMPARISON_P (op0)
3007 && (reversed = reversed_comparison (op0, int_mode)))
3008 return reversed;
3010 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3011 if (tem)
3012 return tem;
3014 tem = simplify_associative_operation (code, mode, op0, op1);
3015 if (tem)
3016 return tem;
3017 break;
3019 case AND:
3020 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3021 return trueop1;
3022 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3023 return op0;
3024 if (HWI_COMPUTABLE_MODE_P (mode))
3026 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3027 HOST_WIDE_INT nzop1;
3028 if (CONST_INT_P (trueop1))
3030 HOST_WIDE_INT val1 = INTVAL (trueop1);
3031 /* If we are turning off bits already known off in OP0, we need
3032 not do an AND. */
3033 if ((nzop0 & ~val1) == 0)
3034 return op0;
3036 nzop1 = nonzero_bits (trueop1, mode);
3037 /* If we are clearing all the nonzero bits, the result is zero. */
3038 if ((nzop1 & nzop0) == 0
3039 && !side_effects_p (op0) && !side_effects_p (op1))
3040 return CONST0_RTX (mode);
3042 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3043 && GET_MODE_CLASS (mode) != MODE_CC)
3044 return op0;
3045 /* A & (~A) -> 0 */
3046 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3047 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3048 && ! side_effects_p (op0)
3049 && GET_MODE_CLASS (mode) != MODE_CC)
3050 return CONST0_RTX (mode);
3052 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3053 there are no nonzero bits of C outside of X's mode. */
3054 if ((GET_CODE (op0) == SIGN_EXTEND
3055 || GET_CODE (op0) == ZERO_EXTEND)
3056 && CONST_INT_P (trueop1)
3057 && HWI_COMPUTABLE_MODE_P (mode)
3058 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3059 & UINTVAL (trueop1)) == 0)
3061 machine_mode imode = GET_MODE (XEXP (op0, 0));
3062 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3063 gen_int_mode (INTVAL (trueop1),
3064 imode));
3065 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3068 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3069 we might be able to further simplify the AND with X and potentially
3070 remove the truncation altogether. */
3071 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3073 rtx x = XEXP (op0, 0);
3074 machine_mode xmode = GET_MODE (x);
3075 tem = simplify_gen_binary (AND, xmode, x,
3076 gen_int_mode (INTVAL (trueop1), xmode));
3077 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3080 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3081 if (GET_CODE (op0) == IOR
3082 && CONST_INT_P (trueop1)
3083 && CONST_INT_P (XEXP (op0, 1)))
3085 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3086 return simplify_gen_binary (IOR, mode,
3087 simplify_gen_binary (AND, mode,
3088 XEXP (op0, 0), op1),
3089 gen_int_mode (tmp, mode));
3092 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3093 insn (and may simplify more). */
3094 if (GET_CODE (op0) == XOR
3095 && rtx_equal_p (XEXP (op0, 0), op1)
3096 && ! side_effects_p (op1))
3097 return simplify_gen_binary (AND, mode,
3098 simplify_gen_unary (NOT, mode,
3099 XEXP (op0, 1), mode),
3100 op1);
3102 if (GET_CODE (op0) == XOR
3103 && rtx_equal_p (XEXP (op0, 1), op1)
3104 && ! side_effects_p (op1))
3105 return simplify_gen_binary (AND, mode,
3106 simplify_gen_unary (NOT, mode,
3107 XEXP (op0, 0), mode),
3108 op1);
3110 /* Similarly for (~(A ^ B)) & A. */
3111 if (GET_CODE (op0) == NOT
3112 && GET_CODE (XEXP (op0, 0)) == XOR
3113 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3114 && ! side_effects_p (op1))
3115 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3117 if (GET_CODE (op0) == NOT
3118 && GET_CODE (XEXP (op0, 0)) == XOR
3119 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3120 && ! side_effects_p (op1))
3121 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3123 /* Convert (A | B) & A to A. */
3124 if (GET_CODE (op0) == IOR
3125 && (rtx_equal_p (XEXP (op0, 0), op1)
3126 || rtx_equal_p (XEXP (op0, 1), op1))
3127 && ! side_effects_p (XEXP (op0, 0))
3128 && ! side_effects_p (XEXP (op0, 1)))
3129 return op1;
3131 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3132 ((A & N) + B) & M -> (A + B) & M
3133 Similarly if (N & M) == 0,
3134 ((A | N) + B) & M -> (A + B) & M
3135 and for - instead of + and/or ^ instead of |.
3136 Also, if (N & M) == 0, then
3137 (A +- N) & M -> A & M. */
3138 if (CONST_INT_P (trueop1)
3139 && HWI_COMPUTABLE_MODE_P (mode)
3140 && ~UINTVAL (trueop1)
3141 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3142 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3144 rtx pmop[2];
3145 int which;
3147 pmop[0] = XEXP (op0, 0);
3148 pmop[1] = XEXP (op0, 1);
3150 if (CONST_INT_P (pmop[1])
3151 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3152 return simplify_gen_binary (AND, mode, pmop[0], op1);
3154 for (which = 0; which < 2; which++)
3156 tem = pmop[which];
3157 switch (GET_CODE (tem))
3159 case AND:
3160 if (CONST_INT_P (XEXP (tem, 1))
3161 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3162 == UINTVAL (trueop1))
3163 pmop[which] = XEXP (tem, 0);
3164 break;
3165 case IOR:
3166 case XOR:
3167 if (CONST_INT_P (XEXP (tem, 1))
3168 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3169 pmop[which] = XEXP (tem, 0);
3170 break;
3171 default:
3172 break;
3176 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3178 tem = simplify_gen_binary (GET_CODE (op0), mode,
3179 pmop[0], pmop[1]);
3180 return simplify_gen_binary (code, mode, tem, op1);
3184 /* (and X (ior (not X) Y) -> (and X Y) */
3185 if (GET_CODE (op1) == IOR
3186 && GET_CODE (XEXP (op1, 0)) == NOT
3187 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3188 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3190 /* (and (ior (not X) Y) X) -> (and X Y) */
3191 if (GET_CODE (op0) == IOR
3192 && GET_CODE (XEXP (op0, 0)) == NOT
3193 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3194 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3196 /* (and X (ior Y (not X)) -> (and X Y) */
3197 if (GET_CODE (op1) == IOR
3198 && GET_CODE (XEXP (op1, 1)) == NOT
3199 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3200 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3202 /* (and (ior Y (not X)) X) -> (and X Y) */
3203 if (GET_CODE (op0) == IOR
3204 && GET_CODE (XEXP (op0, 1)) == NOT
3205 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3206 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3208 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3209 if (tem)
3210 return tem;
3212 tem = simplify_associative_operation (code, mode, op0, op1);
3213 if (tem)
3214 return tem;
3215 break;
3217 case UDIV:
3218 /* 0/x is 0 (or x&0 if x has side-effects). */
3219 if (trueop0 == CONST0_RTX (mode)
3220 && !cfun->can_throw_non_call_exceptions)
3222 if (side_effects_p (op1))
3223 return simplify_gen_binary (AND, mode, op1, trueop0);
3224 return trueop0;
3226 /* x/1 is x. */
3227 if (trueop1 == CONST1_RTX (mode))
3229 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3230 if (tem)
3231 return tem;
3233 /* Convert divide by power of two into shift. */
3234 if (CONST_INT_P (trueop1)
3235 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3236 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3237 break;
3239 case DIV:
3240 /* Handle floating point and integers separately. */
3241 if (SCALAR_FLOAT_MODE_P (mode))
3243 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3244 safe for modes with NaNs, since 0.0 / 0.0 will then be
3245 NaN rather than 0.0. Nor is it safe for modes with signed
3246 zeros, since dividing 0 by a negative number gives -0.0 */
3247 if (trueop0 == CONST0_RTX (mode)
3248 && !HONOR_NANS (mode)
3249 && !HONOR_SIGNED_ZEROS (mode)
3250 && ! side_effects_p (op1))
3251 return op0;
3252 /* x/1.0 is x. */
3253 if (trueop1 == CONST1_RTX (mode)
3254 && !HONOR_SNANS (mode))
3255 return op0;
3257 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3258 && trueop1 != CONST0_RTX (mode))
3260 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3262 /* x/-1.0 is -x. */
3263 if (real_equal (d1, &dconstm1)
3264 && !HONOR_SNANS (mode))
3265 return simplify_gen_unary (NEG, mode, op0, mode);
3267 /* Change FP division by a constant into multiplication.
3268 Only do this with -freciprocal-math. */
3269 if (flag_reciprocal_math
3270 && !real_equal (d1, &dconst0))
3272 REAL_VALUE_TYPE d;
3273 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3274 tem = const_double_from_real_value (d, mode);
3275 return simplify_gen_binary (MULT, mode, op0, tem);
3279 else if (SCALAR_INT_MODE_P (mode))
3281 /* 0/x is 0 (or x&0 if x has side-effects). */
3282 if (trueop0 == CONST0_RTX (mode)
3283 && !cfun->can_throw_non_call_exceptions)
3285 if (side_effects_p (op1))
3286 return simplify_gen_binary (AND, mode, op1, trueop0);
3287 return trueop0;
3289 /* x/1 is x. */
3290 if (trueop1 == CONST1_RTX (mode))
3292 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3293 if (tem)
3294 return tem;
3296 /* x/-1 is -x. */
3297 if (trueop1 == constm1_rtx)
3299 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3300 if (x)
3301 return simplify_gen_unary (NEG, mode, x, mode);
3304 break;
3306 case UMOD:
3307 /* 0%x is 0 (or x&0 if x has side-effects). */
3308 if (trueop0 == CONST0_RTX (mode))
3310 if (side_effects_p (op1))
3311 return simplify_gen_binary (AND, mode, op1, trueop0);
3312 return trueop0;
3314 /* x%1 is 0 (of x&0 if x has side-effects). */
3315 if (trueop1 == CONST1_RTX (mode))
3317 if (side_effects_p (op0))
3318 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3319 return CONST0_RTX (mode);
3321 /* Implement modulus by power of two as AND. */
3322 if (CONST_INT_P (trueop1)
3323 && exact_log2 (UINTVAL (trueop1)) > 0)
3324 return simplify_gen_binary (AND, mode, op0,
3325 gen_int_mode (INTVAL (op1) - 1, mode));
3326 break;
3328 case MOD:
3329 /* 0%x is 0 (or x&0 if x has side-effects). */
3330 if (trueop0 == CONST0_RTX (mode))
3332 if (side_effects_p (op1))
3333 return simplify_gen_binary (AND, mode, op1, trueop0);
3334 return trueop0;
3336 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3337 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3339 if (side_effects_p (op0))
3340 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3341 return CONST0_RTX (mode);
3343 break;
3345 case ROTATERT:
3346 case ROTATE:
3347 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3348 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3349 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3350 amount instead. */
3351 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3352 if (CONST_INT_P (trueop1)
3353 && IN_RANGE (INTVAL (trueop1),
3354 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3355 GET_MODE_PRECISION (mode) - 1))
3356 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3357 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3358 - INTVAL (trueop1)));
3359 #endif
3360 /* FALLTHRU */
3361 case ASHIFTRT:
3362 if (trueop1 == CONST0_RTX (mode))
3363 return op0;
3364 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3365 return op0;
3366 /* Rotating ~0 always results in ~0. */
3367 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3368 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3369 && ! side_effects_p (op1))
3370 return op0;
3372 canonicalize_shift:
3373 /* Given:
3374 scalar modes M1, M2
3375 scalar constants c1, c2
3376 size (M2) > size (M1)
3377 c1 == size (M2) - size (M1)
3378 optimize:
3379 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3380 <low_part>)
3381 (const_int <c2>))
3383 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3384 <low_part>). */
3385 if ((code == ASHIFTRT || code == LSHIFTRT)
3386 && is_a <scalar_int_mode> (mode, &int_mode)
3387 && SUBREG_P (op0)
3388 && CONST_INT_P (op1)
3389 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3390 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
3391 &inner_mode)
3392 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3393 && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
3394 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3395 == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
3396 && subreg_lowpart_p (op0))
3398 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3399 + INTVAL (op1));
3400 tmp = simplify_gen_binary (code, inner_mode,
3401 XEXP (SUBREG_REG (op0), 0),
3402 tmp);
3403 return lowpart_subreg (int_mode, tmp, inner_mode);
3406 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3408 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3409 if (val != INTVAL (op1))
3410 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3412 break;
3414 case ASHIFT:
3415 case SS_ASHIFT:
3416 case US_ASHIFT:
3417 if (trueop1 == CONST0_RTX (mode))
3418 return op0;
3419 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3420 return op0;
3421 goto canonicalize_shift;
3423 case LSHIFTRT:
3424 if (trueop1 == CONST0_RTX (mode))
3425 return op0;
3426 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3427 return op0;
3428 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3429 if (GET_CODE (op0) == CLZ
3430 && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
3431 && CONST_INT_P (trueop1)
3432 && STORE_FLAG_VALUE == 1
3433 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3435 unsigned HOST_WIDE_INT zero_val = 0;
3437 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
3438 && zero_val == GET_MODE_PRECISION (inner_mode)
3439 && INTVAL (trueop1) == exact_log2 (zero_val))
3440 return simplify_gen_relational (EQ, mode, inner_mode,
3441 XEXP (op0, 0), const0_rtx);
3443 goto canonicalize_shift;
3445 case SMIN:
3446 if (width <= HOST_BITS_PER_WIDE_INT
3447 && mode_signbit_p (mode, trueop1)
3448 && ! side_effects_p (op0))
3449 return op1;
3450 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3451 return op0;
3452 tem = simplify_associative_operation (code, mode, op0, op1);
3453 if (tem)
3454 return tem;
3455 break;
3457 case SMAX:
3458 if (width <= HOST_BITS_PER_WIDE_INT
3459 && CONST_INT_P (trueop1)
3460 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3461 && ! side_effects_p (op0))
3462 return op1;
3463 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3464 return op0;
3465 tem = simplify_associative_operation (code, mode, op0, op1);
3466 if (tem)
3467 return tem;
3468 break;
3470 case UMIN:
3471 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3472 return op1;
3473 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3474 return op0;
3475 tem = simplify_associative_operation (code, mode, op0, op1);
3476 if (tem)
3477 return tem;
3478 break;
3480 case UMAX:
3481 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3482 return op1;
3483 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3484 return op0;
3485 tem = simplify_associative_operation (code, mode, op0, op1);
3486 if (tem)
3487 return tem;
3488 break;
3490 case SS_PLUS:
3491 case US_PLUS:
3492 case SS_MINUS:
3493 case US_MINUS:
3494 case SS_MULT:
3495 case US_MULT:
3496 case SS_DIV:
3497 case US_DIV:
3498 /* ??? There are simplifications that can be done. */
3499 return 0;
3501 case VEC_SELECT:
3502 if (!VECTOR_MODE_P (mode))
3504 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3505 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3506 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3507 gcc_assert (XVECLEN (trueop1, 0) == 1);
3508 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3510 if (GET_CODE (trueop0) == CONST_VECTOR)
3511 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3512 (trueop1, 0, 0)));
3514 /* Extract a scalar element from a nested VEC_SELECT expression
3515 (with optional nested VEC_CONCAT expression). Some targets
3516 (i386) extract scalar element from a vector using chain of
3517 nested VEC_SELECT expressions. When input operand is a memory
3518 operand, this operation can be simplified to a simple scalar
3519 load from an offseted memory address. */
3520 if (GET_CODE (trueop0) == VEC_SELECT)
3522 rtx op0 = XEXP (trueop0, 0);
3523 rtx op1 = XEXP (trueop0, 1);
3525 machine_mode opmode = GET_MODE (op0);
3526 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3527 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3529 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3530 int elem;
3532 rtvec vec;
3533 rtx tmp_op, tmp;
3535 gcc_assert (GET_CODE (op1) == PARALLEL);
3536 gcc_assert (i < n_elts);
3538 /* Select element, pointed by nested selector. */
3539 elem = INTVAL (XVECEXP (op1, 0, i));
3541 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3542 if (GET_CODE (op0) == VEC_CONCAT)
3544 rtx op00 = XEXP (op0, 0);
3545 rtx op01 = XEXP (op0, 1);
3547 machine_mode mode00, mode01;
3548 int n_elts00, n_elts01;
3550 mode00 = GET_MODE (op00);
3551 mode01 = GET_MODE (op01);
3553 /* Find out number of elements of each operand. */
3554 if (VECTOR_MODE_P (mode00))
3556 elt_size = GET_MODE_UNIT_SIZE (mode00);
3557 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3559 else
3560 n_elts00 = 1;
3562 if (VECTOR_MODE_P (mode01))
3564 elt_size = GET_MODE_UNIT_SIZE (mode01);
3565 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3567 else
3568 n_elts01 = 1;
3570 gcc_assert (n_elts == n_elts00 + n_elts01);
3572 /* Select correct operand of VEC_CONCAT
3573 and adjust selector. */
3574 if (elem < n_elts01)
3575 tmp_op = op00;
3576 else
3578 tmp_op = op01;
3579 elem -= n_elts00;
3582 else
3583 tmp_op = op0;
3585 vec = rtvec_alloc (1);
3586 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3588 tmp = gen_rtx_fmt_ee (code, mode,
3589 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3590 return tmp;
3592 if (GET_CODE (trueop0) == VEC_DUPLICATE
3593 && GET_MODE (XEXP (trueop0, 0)) == mode)
3594 return XEXP (trueop0, 0);
3596 else
3598 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3599 gcc_assert (GET_MODE_INNER (mode)
3600 == GET_MODE_INNER (GET_MODE (trueop0)));
3601 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3603 if (GET_CODE (trueop0) == CONST_VECTOR)
3605 int elt_size = GET_MODE_UNIT_SIZE (mode);
3606 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3607 rtvec v = rtvec_alloc (n_elts);
3608 unsigned int i;
3610 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3611 for (i = 0; i < n_elts; i++)
3613 rtx x = XVECEXP (trueop1, 0, i);
3615 gcc_assert (CONST_INT_P (x));
3616 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3617 INTVAL (x));
3620 return gen_rtx_CONST_VECTOR (mode, v);
3623 /* Recognize the identity. */
3624 if (GET_MODE (trueop0) == mode)
3626 bool maybe_ident = true;
3627 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3629 rtx j = XVECEXP (trueop1, 0, i);
3630 if (!CONST_INT_P (j) || INTVAL (j) != i)
3632 maybe_ident = false;
3633 break;
3636 if (maybe_ident)
3637 return trueop0;
3640 /* If we build {a,b} then permute it, build the result directly. */
3641 if (XVECLEN (trueop1, 0) == 2
3642 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3643 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3644 && GET_CODE (trueop0) == VEC_CONCAT
3645 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3646 && GET_MODE (XEXP (trueop0, 0)) == mode
3647 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3648 && GET_MODE (XEXP (trueop0, 1)) == mode)
3650 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3651 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3652 rtx subop0, subop1;
3654 gcc_assert (i0 < 4 && i1 < 4);
3655 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3656 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3658 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3661 if (XVECLEN (trueop1, 0) == 2
3662 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3663 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3664 && GET_CODE (trueop0) == VEC_CONCAT
3665 && GET_MODE (trueop0) == mode)
3667 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3668 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3669 rtx subop0, subop1;
3671 gcc_assert (i0 < 2 && i1 < 2);
3672 subop0 = XEXP (trueop0, i0);
3673 subop1 = XEXP (trueop0, i1);
3675 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3678 /* If we select one half of a vec_concat, return that. */
3679 if (GET_CODE (trueop0) == VEC_CONCAT
3680 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3682 rtx subop0 = XEXP (trueop0, 0);
3683 rtx subop1 = XEXP (trueop0, 1);
3684 machine_mode mode0 = GET_MODE (subop0);
3685 machine_mode mode1 = GET_MODE (subop1);
3686 int li = GET_MODE_UNIT_SIZE (mode0);
3687 int l0 = GET_MODE_SIZE (mode0) / li;
3688 int l1 = GET_MODE_SIZE (mode1) / li;
3689 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3690 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3692 bool success = true;
3693 for (int i = 1; i < l0; ++i)
3695 rtx j = XVECEXP (trueop1, 0, i);
3696 if (!CONST_INT_P (j) || INTVAL (j) != i)
3698 success = false;
3699 break;
3702 if (success)
3703 return subop0;
3705 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3707 bool success = true;
3708 for (int i = 1; i < l1; ++i)
3710 rtx j = XVECEXP (trueop1, 0, i);
3711 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3713 success = false;
3714 break;
3717 if (success)
3718 return subop1;
3723 if (XVECLEN (trueop1, 0) == 1
3724 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3725 && GET_CODE (trueop0) == VEC_CONCAT)
3727 rtx vec = trueop0;
3728 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3730 /* Try to find the element in the VEC_CONCAT. */
3731 while (GET_MODE (vec) != mode
3732 && GET_CODE (vec) == VEC_CONCAT)
3734 HOST_WIDE_INT vec_size;
3736 if (CONST_INT_P (XEXP (vec, 0)))
3738 /* vec_concat of two const_ints doesn't make sense with
3739 respect to modes. */
3740 if (CONST_INT_P (XEXP (vec, 1)))
3741 return 0;
3743 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3744 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3746 else
3747 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3749 if (offset < vec_size)
3750 vec = XEXP (vec, 0);
3751 else
3753 offset -= vec_size;
3754 vec = XEXP (vec, 1);
3756 vec = avoid_constant_pool_reference (vec);
3759 if (GET_MODE (vec) == mode)
3760 return vec;
3763 /* If we select elements in a vec_merge that all come from the same
3764 operand, select from that operand directly. */
3765 if (GET_CODE (op0) == VEC_MERGE)
3767 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3768 if (CONST_INT_P (trueop02))
3770 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3771 bool all_operand0 = true;
3772 bool all_operand1 = true;
3773 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3775 rtx j = XVECEXP (trueop1, 0, i);
3776 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3777 all_operand1 = false;
3778 else
3779 all_operand0 = false;
3781 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3782 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3783 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3784 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3788 /* If we have two nested selects that are inverses of each
3789 other, replace them with the source operand. */
3790 if (GET_CODE (trueop0) == VEC_SELECT
3791 && GET_MODE (XEXP (trueop0, 0)) == mode)
3793 rtx op0_subop1 = XEXP (trueop0, 1);
3794 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3795 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3797 /* Apply the outer ordering vector to the inner one. (The inner
3798 ordering vector is expressly permitted to be of a different
3799 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3800 then the two VEC_SELECTs cancel. */
3801 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3803 rtx x = XVECEXP (trueop1, 0, i);
3804 if (!CONST_INT_P (x))
3805 return 0;
3806 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3807 if (!CONST_INT_P (y) || i != INTVAL (y))
3808 return 0;
3810 return XEXP (trueop0, 0);
3813 return 0;
3814 case VEC_CONCAT:
3816 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3817 ? GET_MODE (trueop0)
3818 : GET_MODE_INNER (mode));
3819 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3820 ? GET_MODE (trueop1)
3821 : GET_MODE_INNER (mode));
3823 gcc_assert (VECTOR_MODE_P (mode));
3824 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3825 == GET_MODE_SIZE (mode));
3827 if (VECTOR_MODE_P (op0_mode))
3828 gcc_assert (GET_MODE_INNER (mode)
3829 == GET_MODE_INNER (op0_mode));
3830 else
3831 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3833 if (VECTOR_MODE_P (op1_mode))
3834 gcc_assert (GET_MODE_INNER (mode)
3835 == GET_MODE_INNER (op1_mode));
3836 else
3837 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3839 if ((GET_CODE (trueop0) == CONST_VECTOR
3840 || CONST_SCALAR_INT_P (trueop0)
3841 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3842 && (GET_CODE (trueop1) == CONST_VECTOR
3843 || CONST_SCALAR_INT_P (trueop1)
3844 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3846 int elt_size = GET_MODE_UNIT_SIZE (mode);
3847 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3848 rtvec v = rtvec_alloc (n_elts);
3849 unsigned int i;
3850 unsigned in_n_elts = 1;
3852 if (VECTOR_MODE_P (op0_mode))
3853 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3854 for (i = 0; i < n_elts; i++)
3856 if (i < in_n_elts)
3858 if (!VECTOR_MODE_P (op0_mode))
3859 RTVEC_ELT (v, i) = trueop0;
3860 else
3861 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3863 else
3865 if (!VECTOR_MODE_P (op1_mode))
3866 RTVEC_ELT (v, i) = trueop1;
3867 else
3868 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3869 i - in_n_elts);
3873 return gen_rtx_CONST_VECTOR (mode, v);
3876 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3877 Restrict the transformation to avoid generating a VEC_SELECT with a
3878 mode unrelated to its operand. */
3879 if (GET_CODE (trueop0) == VEC_SELECT
3880 && GET_CODE (trueop1) == VEC_SELECT
3881 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3882 && GET_MODE (XEXP (trueop0, 0)) == mode)
3884 rtx par0 = XEXP (trueop0, 1);
3885 rtx par1 = XEXP (trueop1, 1);
3886 int len0 = XVECLEN (par0, 0);
3887 int len1 = XVECLEN (par1, 0);
3888 rtvec vec = rtvec_alloc (len0 + len1);
3889 for (int i = 0; i < len0; i++)
3890 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3891 for (int i = 0; i < len1; i++)
3892 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3893 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3894 gen_rtx_PARALLEL (VOIDmode, vec));
3897 return 0;
3899 default:
3900 gcc_unreachable ();
3903 return 0;
3907 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3908 rtx op0, rtx op1)
3910 if (VECTOR_MODE_P (mode)
3911 && code != VEC_CONCAT
3912 && GET_CODE (op0) == CONST_VECTOR
3913 && GET_CODE (op1) == CONST_VECTOR)
3915 unsigned n_elts = GET_MODE_NUNITS (mode);
3916 machine_mode op0mode = GET_MODE (op0);
3917 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3918 machine_mode op1mode = GET_MODE (op1);
3919 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3920 rtvec v = rtvec_alloc (n_elts);
3921 unsigned int i;
3923 gcc_assert (op0_n_elts == n_elts);
3924 gcc_assert (op1_n_elts == n_elts);
3925 for (i = 0; i < n_elts; i++)
3927 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3928 CONST_VECTOR_ELT (op0, i),
3929 CONST_VECTOR_ELT (op1, i));
3930 if (!x)
3931 return 0;
3932 RTVEC_ELT (v, i) = x;
3935 return gen_rtx_CONST_VECTOR (mode, v);
3938 if (VECTOR_MODE_P (mode)
3939 && code == VEC_CONCAT
3940 && (CONST_SCALAR_INT_P (op0)
3941 || GET_CODE (op0) == CONST_FIXED
3942 || CONST_DOUBLE_AS_FLOAT_P (op0))
3943 && (CONST_SCALAR_INT_P (op1)
3944 || CONST_DOUBLE_AS_FLOAT_P (op1)
3945 || GET_CODE (op1) == CONST_FIXED))
3947 unsigned n_elts = GET_MODE_NUNITS (mode);
3948 rtvec v = rtvec_alloc (n_elts);
3950 gcc_assert (n_elts >= 2);
3951 if (n_elts == 2)
3953 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3954 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3956 RTVEC_ELT (v, 0) = op0;
3957 RTVEC_ELT (v, 1) = op1;
3959 else
3961 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3962 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3963 unsigned i;
3965 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3966 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3967 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3969 for (i = 0; i < op0_n_elts; ++i)
3970 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3971 for (i = 0; i < op1_n_elts; ++i)
3972 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3975 return gen_rtx_CONST_VECTOR (mode, v);
3978 if (SCALAR_FLOAT_MODE_P (mode)
3979 && CONST_DOUBLE_AS_FLOAT_P (op0)
3980 && CONST_DOUBLE_AS_FLOAT_P (op1)
3981 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3983 if (code == AND
3984 || code == IOR
3985 || code == XOR)
3987 long tmp0[4];
3988 long tmp1[4];
3989 REAL_VALUE_TYPE r;
3990 int i;
3992 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3993 GET_MODE (op0));
3994 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3995 GET_MODE (op1));
3996 for (i = 0; i < 4; i++)
3998 switch (code)
4000 case AND:
4001 tmp0[i] &= tmp1[i];
4002 break;
4003 case IOR:
4004 tmp0[i] |= tmp1[i];
4005 break;
4006 case XOR:
4007 tmp0[i] ^= tmp1[i];
4008 break;
4009 default:
4010 gcc_unreachable ();
4013 real_from_target (&r, tmp0, mode);
4014 return const_double_from_real_value (r, mode);
4016 else
4018 REAL_VALUE_TYPE f0, f1, value, result;
4019 const REAL_VALUE_TYPE *opr0, *opr1;
4020 bool inexact;
4022 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4023 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4025 if (HONOR_SNANS (mode)
4026 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4027 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4028 return 0;
4030 real_convert (&f0, mode, opr0);
4031 real_convert (&f1, mode, opr1);
4033 if (code == DIV
4034 && real_equal (&f1, &dconst0)
4035 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4036 return 0;
4038 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4039 && flag_trapping_math
4040 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4042 int s0 = REAL_VALUE_NEGATIVE (f0);
4043 int s1 = REAL_VALUE_NEGATIVE (f1);
4045 switch (code)
4047 case PLUS:
4048 /* Inf + -Inf = NaN plus exception. */
4049 if (s0 != s1)
4050 return 0;
4051 break;
4052 case MINUS:
4053 /* Inf - Inf = NaN plus exception. */
4054 if (s0 == s1)
4055 return 0;
4056 break;
4057 case DIV:
4058 /* Inf / Inf = NaN plus exception. */
4059 return 0;
4060 default:
4061 break;
4065 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4066 && flag_trapping_math
4067 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4068 || (REAL_VALUE_ISINF (f1)
4069 && real_equal (&f0, &dconst0))))
4070 /* Inf * 0 = NaN plus exception. */
4071 return 0;
4073 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4074 &f0, &f1);
4075 real_convert (&result, mode, &value);
4077 /* Don't constant fold this floating point operation if
4078 the result has overflowed and flag_trapping_math. */
4080 if (flag_trapping_math
4081 && MODE_HAS_INFINITIES (mode)
4082 && REAL_VALUE_ISINF (result)
4083 && !REAL_VALUE_ISINF (f0)
4084 && !REAL_VALUE_ISINF (f1))
4085 /* Overflow plus exception. */
4086 return 0;
4088 /* Don't constant fold this floating point operation if the
4089 result may dependent upon the run-time rounding mode and
4090 flag_rounding_math is set, or if GCC's software emulation
4091 is unable to accurately represent the result. */
4093 if ((flag_rounding_math
4094 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4095 && (inexact || !real_identical (&result, &value)))
4096 return NULL_RTX;
4098 return const_double_from_real_value (result, mode);
4102 /* We can fold some multi-word operations. */
4103 scalar_int_mode int_mode;
4104 if (is_a <scalar_int_mode> (mode, &int_mode)
4105 && CONST_SCALAR_INT_P (op0)
4106 && CONST_SCALAR_INT_P (op1))
4108 wide_int result;
4109 bool overflow;
4110 rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4111 rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4113 #if TARGET_SUPPORTS_WIDE_INT == 0
4114 /* This assert keeps the simplification from producing a result
4115 that cannot be represented in a CONST_DOUBLE but a lot of
4116 upstream callers expect that this function never fails to
4117 simplify something and so you if you added this to the test
4118 above the code would die later anyway. If this assert
4119 happens, you just need to make the port support wide int. */
4120 gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4121 #endif
4122 switch (code)
4124 case MINUS:
4125 result = wi::sub (pop0, pop1);
4126 break;
4128 case PLUS:
4129 result = wi::add (pop0, pop1);
4130 break;
4132 case MULT:
4133 result = wi::mul (pop0, pop1);
4134 break;
4136 case DIV:
4137 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4138 if (overflow)
4139 return NULL_RTX;
4140 break;
4142 case MOD:
4143 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4144 if (overflow)
4145 return NULL_RTX;
4146 break;
4148 case UDIV:
4149 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4150 if (overflow)
4151 return NULL_RTX;
4152 break;
4154 case UMOD:
4155 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4156 if (overflow)
4157 return NULL_RTX;
4158 break;
4160 case AND:
4161 result = wi::bit_and (pop0, pop1);
4162 break;
4164 case IOR:
4165 result = wi::bit_or (pop0, pop1);
4166 break;
4168 case XOR:
4169 result = wi::bit_xor (pop0, pop1);
4170 break;
4172 case SMIN:
4173 result = wi::smin (pop0, pop1);
4174 break;
4176 case SMAX:
4177 result = wi::smax (pop0, pop1);
4178 break;
4180 case UMIN:
4181 result = wi::umin (pop0, pop1);
4182 break;
4184 case UMAX:
4185 result = wi::umax (pop0, pop1);
4186 break;
4188 case LSHIFTRT:
4189 case ASHIFTRT:
4190 case ASHIFT:
4192 wide_int wop1 = pop1;
4193 if (SHIFT_COUNT_TRUNCATED)
4194 wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
4195 else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
4196 return NULL_RTX;
4198 switch (code)
4200 case LSHIFTRT:
4201 result = wi::lrshift (pop0, wop1);
4202 break;
4204 case ASHIFTRT:
4205 result = wi::arshift (pop0, wop1);
4206 break;
4208 case ASHIFT:
4209 result = wi::lshift (pop0, wop1);
4210 break;
4212 default:
4213 gcc_unreachable ();
4215 break;
4217 case ROTATE:
4218 case ROTATERT:
4220 if (wi::neg_p (pop1))
4221 return NULL_RTX;
4223 switch (code)
4225 case ROTATE:
4226 result = wi::lrotate (pop0, pop1);
4227 break;
4229 case ROTATERT:
4230 result = wi::rrotate (pop0, pop1);
4231 break;
4233 default:
4234 gcc_unreachable ();
4236 break;
4238 default:
4239 return NULL_RTX;
4241 return immed_wide_int_const (result, int_mode);
4244 return NULL_RTX;
4249 /* Return a positive integer if X should sort after Y. The value
4250 returned is 1 if and only if X and Y are both regs. */
4252 static int
4253 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4255 int result;
4257 result = (commutative_operand_precedence (y)
4258 - commutative_operand_precedence (x));
4259 if (result)
4260 return result + result;
4262 /* Group together equal REGs to do more simplification. */
4263 if (REG_P (x) && REG_P (y))
4264 return REGNO (x) > REGNO (y);
4266 return 0;
4269 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4270 operands may be another PLUS or MINUS.
4272 Rather than test for specific case, we do this by a brute-force method
4273 and do all possible simplifications until no more changes occur. Then
4274 we rebuild the operation.
4276 May return NULL_RTX when no changes were made. */
4278 static rtx
4279 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4280 rtx op1)
4282 struct simplify_plus_minus_op_data
4284 rtx op;
4285 short neg;
4286 } ops[16];
4287 rtx result, tem;
4288 int n_ops = 2;
4289 int changed, n_constants, canonicalized = 0;
4290 int i, j;
4292 memset (ops, 0, sizeof ops);
4294 /* Set up the two operands and then expand them until nothing has been
4295 changed. If we run out of room in our array, give up; this should
4296 almost never happen. */
4298 ops[0].op = op0;
4299 ops[0].neg = 0;
4300 ops[1].op = op1;
4301 ops[1].neg = (code == MINUS);
4305 changed = 0;
4306 n_constants = 0;
4308 for (i = 0; i < n_ops; i++)
4310 rtx this_op = ops[i].op;
4311 int this_neg = ops[i].neg;
4312 enum rtx_code this_code = GET_CODE (this_op);
4314 switch (this_code)
4316 case PLUS:
4317 case MINUS:
4318 if (n_ops == ARRAY_SIZE (ops))
4319 return NULL_RTX;
4321 ops[n_ops].op = XEXP (this_op, 1);
4322 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4323 n_ops++;
4325 ops[i].op = XEXP (this_op, 0);
4326 changed = 1;
4327 /* If this operand was negated then we will potentially
4328 canonicalize the expression. Similarly if we don't
4329 place the operands adjacent we're re-ordering the
4330 expression and thus might be performing a
4331 canonicalization. Ignore register re-ordering.
4332 ??? It might be better to shuffle the ops array here,
4333 but then (plus (plus (A, B), plus (C, D))) wouldn't
4334 be seen as non-canonical. */
4335 if (this_neg
4336 || (i != n_ops - 2
4337 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4338 canonicalized = 1;
4339 break;
4341 case NEG:
4342 ops[i].op = XEXP (this_op, 0);
4343 ops[i].neg = ! this_neg;
4344 changed = 1;
4345 canonicalized = 1;
4346 break;
4348 case CONST:
4349 if (n_ops != ARRAY_SIZE (ops)
4350 && GET_CODE (XEXP (this_op, 0)) == PLUS
4351 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4352 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4354 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4355 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4356 ops[n_ops].neg = this_neg;
4357 n_ops++;
4358 changed = 1;
4359 canonicalized = 1;
4361 break;
4363 case NOT:
4364 /* ~a -> (-a - 1) */
4365 if (n_ops != ARRAY_SIZE (ops))
4367 ops[n_ops].op = CONSTM1_RTX (mode);
4368 ops[n_ops++].neg = this_neg;
4369 ops[i].op = XEXP (this_op, 0);
4370 ops[i].neg = !this_neg;
4371 changed = 1;
4372 canonicalized = 1;
4374 break;
4376 case CONST_INT:
4377 n_constants++;
4378 if (this_neg)
4380 ops[i].op = neg_const_int (mode, this_op);
4381 ops[i].neg = 0;
4382 changed = 1;
4383 canonicalized = 1;
4385 break;
4387 default:
4388 break;
4392 while (changed);
4394 if (n_constants > 1)
4395 canonicalized = 1;
4397 gcc_assert (n_ops >= 2);
4399 /* If we only have two operands, we can avoid the loops. */
4400 if (n_ops == 2)
4402 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4403 rtx lhs, rhs;
4405 /* Get the two operands. Be careful with the order, especially for
4406 the cases where code == MINUS. */
4407 if (ops[0].neg && ops[1].neg)
4409 lhs = gen_rtx_NEG (mode, ops[0].op);
4410 rhs = ops[1].op;
4412 else if (ops[0].neg)
4414 lhs = ops[1].op;
4415 rhs = ops[0].op;
4417 else
4419 lhs = ops[0].op;
4420 rhs = ops[1].op;
4423 return simplify_const_binary_operation (code, mode, lhs, rhs);
4426 /* Now simplify each pair of operands until nothing changes. */
4427 while (1)
4429 /* Insertion sort is good enough for a small array. */
4430 for (i = 1; i < n_ops; i++)
4432 struct simplify_plus_minus_op_data save;
4433 int cmp;
4435 j = i - 1;
4436 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4437 if (cmp <= 0)
4438 continue;
4439 /* Just swapping registers doesn't count as canonicalization. */
4440 if (cmp != 1)
4441 canonicalized = 1;
4443 save = ops[i];
4445 ops[j + 1] = ops[j];
4446 while (j--
4447 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4448 ops[j + 1] = save;
4451 changed = 0;
4452 for (i = n_ops - 1; i > 0; i--)
4453 for (j = i - 1; j >= 0; j--)
4455 rtx lhs = ops[j].op, rhs = ops[i].op;
4456 int lneg = ops[j].neg, rneg = ops[i].neg;
4458 if (lhs != 0 && rhs != 0)
4460 enum rtx_code ncode = PLUS;
4462 if (lneg != rneg)
4464 ncode = MINUS;
4465 if (lneg)
4466 std::swap (lhs, rhs);
4468 else if (swap_commutative_operands_p (lhs, rhs))
4469 std::swap (lhs, rhs);
4471 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4472 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4474 rtx tem_lhs, tem_rhs;
4476 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4477 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4478 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4479 tem_rhs);
4481 if (tem && !CONSTANT_P (tem))
4482 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4484 else
4485 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4487 if (tem)
4489 /* Reject "simplifications" that just wrap the two
4490 arguments in a CONST. Failure to do so can result
4491 in infinite recursion with simplify_binary_operation
4492 when it calls us to simplify CONST operations.
4493 Also, if we find such a simplification, don't try
4494 any more combinations with this rhs: We must have
4495 something like symbol+offset, ie. one of the
4496 trivial CONST expressions we handle later. */
4497 if (GET_CODE (tem) == CONST
4498 && GET_CODE (XEXP (tem, 0)) == ncode
4499 && XEXP (XEXP (tem, 0), 0) == lhs
4500 && XEXP (XEXP (tem, 0), 1) == rhs)
4501 break;
4502 lneg &= rneg;
4503 if (GET_CODE (tem) == NEG)
4504 tem = XEXP (tem, 0), lneg = !lneg;
4505 if (CONST_INT_P (tem) && lneg)
4506 tem = neg_const_int (mode, tem), lneg = 0;
4508 ops[i].op = tem;
4509 ops[i].neg = lneg;
4510 ops[j].op = NULL_RTX;
4511 changed = 1;
4512 canonicalized = 1;
4517 if (!changed)
4518 break;
4520 /* Pack all the operands to the lower-numbered entries. */
4521 for (i = 0, j = 0; j < n_ops; j++)
4522 if (ops[j].op)
4524 ops[i] = ops[j];
4525 i++;
4527 n_ops = i;
4530 /* If nothing changed, check that rematerialization of rtl instructions
4531 is still required. */
4532 if (!canonicalized)
4534 /* Perform rematerialization if only all operands are registers and
4535 all operations are PLUS. */
4536 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4537 around rs6000 and how it uses the CA register. See PR67145. */
4538 for (i = 0; i < n_ops; i++)
4539 if (ops[i].neg
4540 || !REG_P (ops[i].op)
4541 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4542 && fixed_regs[REGNO (ops[i].op)]
4543 && !global_regs[REGNO (ops[i].op)]
4544 && ops[i].op != frame_pointer_rtx
4545 && ops[i].op != arg_pointer_rtx
4546 && ops[i].op != stack_pointer_rtx))
4547 return NULL_RTX;
4548 goto gen_result;
4551 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4552 if (n_ops == 2
4553 && CONST_INT_P (ops[1].op)
4554 && CONSTANT_P (ops[0].op)
4555 && ops[0].neg)
4556 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4558 /* We suppressed creation of trivial CONST expressions in the
4559 combination loop to avoid recursion. Create one manually now.
4560 The combination loop should have ensured that there is exactly
4561 one CONST_INT, and the sort will have ensured that it is last
4562 in the array and that any other constant will be next-to-last. */
4564 if (n_ops > 1
4565 && CONST_INT_P (ops[n_ops - 1].op)
4566 && CONSTANT_P (ops[n_ops - 2].op))
4568 rtx value = ops[n_ops - 1].op;
4569 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4570 value = neg_const_int (mode, value);
4571 if (CONST_INT_P (value))
4573 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4574 INTVAL (value));
4575 n_ops--;
4579 /* Put a non-negated operand first, if possible. */
4581 for (i = 0; i < n_ops && ops[i].neg; i++)
4582 continue;
4583 if (i == n_ops)
4584 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4585 else if (i != 0)
4587 tem = ops[0].op;
4588 ops[0] = ops[i];
4589 ops[i].op = tem;
4590 ops[i].neg = 1;
4593 /* Now make the result by performing the requested operations. */
4594 gen_result:
4595 result = ops[0].op;
4596 for (i = 1; i < n_ops; i++)
4597 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4598 mode, result, ops[i].op);
4600 return result;
4603 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4604 static bool
4605 plus_minus_operand_p (const_rtx x)
4607 return GET_CODE (x) == PLUS
4608 || GET_CODE (x) == MINUS
4609 || (GET_CODE (x) == CONST
4610 && GET_CODE (XEXP (x, 0)) == PLUS
4611 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4612 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4615 /* Like simplify_binary_operation except used for relational operators.
4616 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4617 not also be VOIDmode.
4619 CMP_MODE specifies in which mode the comparison is done in, so it is
4620 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4621 the operands or, if both are VOIDmode, the operands are compared in
4622 "infinite precision". */
4624 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4625 machine_mode cmp_mode, rtx op0, rtx op1)
4627 rtx tem, trueop0, trueop1;
4629 if (cmp_mode == VOIDmode)
4630 cmp_mode = GET_MODE (op0);
4631 if (cmp_mode == VOIDmode)
4632 cmp_mode = GET_MODE (op1);
4634 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4635 if (tem)
4637 if (SCALAR_FLOAT_MODE_P (mode))
4639 if (tem == const0_rtx)
4640 return CONST0_RTX (mode);
4641 #ifdef FLOAT_STORE_FLAG_VALUE
4643 REAL_VALUE_TYPE val;
4644 val = FLOAT_STORE_FLAG_VALUE (mode);
4645 return const_double_from_real_value (val, mode);
4647 #else
4648 return NULL_RTX;
4649 #endif
4651 if (VECTOR_MODE_P (mode))
4653 if (tem == const0_rtx)
4654 return CONST0_RTX (mode);
4655 #ifdef VECTOR_STORE_FLAG_VALUE
4657 int i, units;
4658 rtvec v;
4660 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4661 if (val == NULL_RTX)
4662 return NULL_RTX;
4663 if (val == const1_rtx)
4664 return CONST1_RTX (mode);
4666 units = GET_MODE_NUNITS (mode);
4667 v = rtvec_alloc (units);
4668 for (i = 0; i < units; i++)
4669 RTVEC_ELT (v, i) = val;
4670 return gen_rtx_raw_CONST_VECTOR (mode, v);
4672 #else
4673 return NULL_RTX;
4674 #endif
4677 return tem;
4680 /* For the following tests, ensure const0_rtx is op1. */
4681 if (swap_commutative_operands_p (op0, op1)
4682 || (op0 == const0_rtx && op1 != const0_rtx))
4683 std::swap (op0, op1), code = swap_condition (code);
4685 /* If op0 is a compare, extract the comparison arguments from it. */
4686 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4687 return simplify_gen_relational (code, mode, VOIDmode,
4688 XEXP (op0, 0), XEXP (op0, 1));
4690 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4691 || CC0_P (op0))
4692 return NULL_RTX;
4694 trueop0 = avoid_constant_pool_reference (op0);
4695 trueop1 = avoid_constant_pool_reference (op1);
4696 return simplify_relational_operation_1 (code, mode, cmp_mode,
4697 trueop0, trueop1);
4700 /* This part of simplify_relational_operation is only used when CMP_MODE
4701 is not in class MODE_CC (i.e. it is a real comparison).
4703 MODE is the mode of the result, while CMP_MODE specifies in which
4704 mode the comparison is done in, so it is the mode of the operands. */
4706 static rtx
4707 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4708 machine_mode cmp_mode, rtx op0, rtx op1)
4710 enum rtx_code op0code = GET_CODE (op0);
4712 if (op1 == const0_rtx && COMPARISON_P (op0))
4714 /* If op0 is a comparison, extract the comparison arguments
4715 from it. */
4716 if (code == NE)
4718 if (GET_MODE (op0) == mode)
4719 return simplify_rtx (op0);
4720 else
4721 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4722 XEXP (op0, 0), XEXP (op0, 1));
4724 else if (code == EQ)
4726 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4727 if (new_code != UNKNOWN)
4728 return simplify_gen_relational (new_code, mode, VOIDmode,
4729 XEXP (op0, 0), XEXP (op0, 1));
4733 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4734 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4735 if ((code == LTU || code == GEU)
4736 && GET_CODE (op0) == PLUS
4737 && CONST_INT_P (XEXP (op0, 1))
4738 && (rtx_equal_p (op1, XEXP (op0, 0))
4739 || rtx_equal_p (op1, XEXP (op0, 1)))
4740 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4741 && XEXP (op0, 1) != const0_rtx)
4743 rtx new_cmp
4744 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4745 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4746 cmp_mode, XEXP (op0, 0), new_cmp);
4749 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4750 transformed into (LTU a -C). */
4751 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4752 && CONST_INT_P (XEXP (op0, 1))
4753 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4754 && XEXP (op0, 1) != const0_rtx)
4756 rtx new_cmp
4757 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4758 return simplify_gen_relational (LTU, mode, cmp_mode,
4759 XEXP (op0, 0), new_cmp);
4762 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4763 if ((code == LTU || code == GEU)
4764 && GET_CODE (op0) == PLUS
4765 && rtx_equal_p (op1, XEXP (op0, 1))
4766 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4767 && !rtx_equal_p (op1, XEXP (op0, 0)))
4768 return simplify_gen_relational (code, mode, cmp_mode, op0,
4769 copy_rtx (XEXP (op0, 0)));
4771 if (op1 == const0_rtx)
4773 /* Canonicalize (GTU x 0) as (NE x 0). */
4774 if (code == GTU)
4775 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4776 /* Canonicalize (LEU x 0) as (EQ x 0). */
4777 if (code == LEU)
4778 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4780 else if (op1 == const1_rtx)
4782 switch (code)
4784 case GE:
4785 /* Canonicalize (GE x 1) as (GT x 0). */
4786 return simplify_gen_relational (GT, mode, cmp_mode,
4787 op0, const0_rtx);
4788 case GEU:
4789 /* Canonicalize (GEU x 1) as (NE x 0). */
4790 return simplify_gen_relational (NE, mode, cmp_mode,
4791 op0, const0_rtx);
4792 case LT:
4793 /* Canonicalize (LT x 1) as (LE x 0). */
4794 return simplify_gen_relational (LE, mode, cmp_mode,
4795 op0, const0_rtx);
4796 case LTU:
4797 /* Canonicalize (LTU x 1) as (EQ x 0). */
4798 return simplify_gen_relational (EQ, mode, cmp_mode,
4799 op0, const0_rtx);
4800 default:
4801 break;
4804 else if (op1 == constm1_rtx)
4806 /* Canonicalize (LE x -1) as (LT x 0). */
4807 if (code == LE)
4808 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4809 /* Canonicalize (GT x -1) as (GE x 0). */
4810 if (code == GT)
4811 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4814 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4815 if ((code == EQ || code == NE)
4816 && (op0code == PLUS || op0code == MINUS)
4817 && CONSTANT_P (op1)
4818 && CONSTANT_P (XEXP (op0, 1))
4819 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4821 rtx x = XEXP (op0, 0);
4822 rtx c = XEXP (op0, 1);
4823 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4824 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4826 /* Detect an infinite recursive condition, where we oscillate at this
4827 simplification case between:
4828 A + B == C <---> C - B == A,
4829 where A, B, and C are all constants with non-simplifiable expressions,
4830 usually SYMBOL_REFs. */
4831 if (GET_CODE (tem) == invcode
4832 && CONSTANT_P (x)
4833 && rtx_equal_p (c, XEXP (tem, 1)))
4834 return NULL_RTX;
4836 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4839 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4840 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4841 scalar_int_mode int_mode, int_cmp_mode;
4842 if (code == NE
4843 && op1 == const0_rtx
4844 && is_int_mode (mode, &int_mode)
4845 && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
4846 /* ??? Work-around BImode bugs in the ia64 backend. */
4847 && int_mode != BImode
4848 && int_cmp_mode != BImode
4849 && nonzero_bits (op0, int_cmp_mode) == 1
4850 && STORE_FLAG_VALUE == 1)
4851 return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
4852 ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
4853 : lowpart_subreg (int_mode, op0, int_cmp_mode);
4855 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4856 if ((code == EQ || code == NE)
4857 && op1 == const0_rtx
4858 && op0code == XOR)
4859 return simplify_gen_relational (code, mode, cmp_mode,
4860 XEXP (op0, 0), XEXP (op0, 1));
4862 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4863 if ((code == EQ || code == NE)
4864 && op0code == XOR
4865 && rtx_equal_p (XEXP (op0, 0), op1)
4866 && !side_effects_p (XEXP (op0, 0)))
4867 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4868 CONST0_RTX (mode));
4870 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4871 if ((code == EQ || code == NE)
4872 && op0code == XOR
4873 && rtx_equal_p (XEXP (op0, 1), op1)
4874 && !side_effects_p (XEXP (op0, 1)))
4875 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4876 CONST0_RTX (mode));
4878 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4879 if ((code == EQ || code == NE)
4880 && op0code == XOR
4881 && CONST_SCALAR_INT_P (op1)
4882 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4883 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4884 simplify_gen_binary (XOR, cmp_mode,
4885 XEXP (op0, 1), op1));
4887 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4888 can be implemented with a BICS instruction on some targets, or
4889 constant-folded if y is a constant. */
4890 if ((code == EQ || code == NE)
4891 && op0code == AND
4892 && rtx_equal_p (XEXP (op0, 0), op1)
4893 && !side_effects_p (op1)
4894 && op1 != CONST0_RTX (cmp_mode))
4896 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4897 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4899 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4900 CONST0_RTX (cmp_mode));
4903 /* Likewise for (eq/ne (and x y) y). */
4904 if ((code == EQ || code == NE)
4905 && op0code == AND
4906 && rtx_equal_p (XEXP (op0, 1), op1)
4907 && !side_effects_p (op1)
4908 && op1 != CONST0_RTX (cmp_mode))
4910 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4911 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4913 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4914 CONST0_RTX (cmp_mode));
4917 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4918 if ((code == EQ || code == NE)
4919 && GET_CODE (op0) == BSWAP
4920 && CONST_SCALAR_INT_P (op1))
4921 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4922 simplify_gen_unary (BSWAP, cmp_mode,
4923 op1, cmp_mode));
4925 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4926 if ((code == EQ || code == NE)
4927 && GET_CODE (op0) == BSWAP
4928 && GET_CODE (op1) == BSWAP)
4929 return simplify_gen_relational (code, mode, cmp_mode,
4930 XEXP (op0, 0), XEXP (op1, 0));
4932 if (op0code == POPCOUNT && op1 == const0_rtx)
4933 switch (code)
4935 case EQ:
4936 case LE:
4937 case LEU:
4938 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4939 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4940 XEXP (op0, 0), const0_rtx);
4942 case NE:
4943 case GT:
4944 case GTU:
4945 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4946 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4947 XEXP (op0, 0), const0_rtx);
4949 default:
4950 break;
4953 return NULL_RTX;
4956 enum
4958 CMP_EQ = 1,
4959 CMP_LT = 2,
4960 CMP_GT = 4,
4961 CMP_LTU = 8,
4962 CMP_GTU = 16
4966 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4967 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4968 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4969 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4970 For floating-point comparisons, assume that the operands were ordered. */
4972 static rtx
4973 comparison_result (enum rtx_code code, int known_results)
4975 switch (code)
4977 case EQ:
4978 case UNEQ:
4979 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4980 case NE:
4981 case LTGT:
4982 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4984 case LT:
4985 case UNLT:
4986 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4987 case GE:
4988 case UNGE:
4989 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4991 case GT:
4992 case UNGT:
4993 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4994 case LE:
4995 case UNLE:
4996 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4998 case LTU:
4999 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
5000 case GEU:
5001 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
5003 case GTU:
5004 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
5005 case LEU:
5006 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
5008 case ORDERED:
5009 return const_true_rtx;
5010 case UNORDERED:
5011 return const0_rtx;
5012 default:
5013 gcc_unreachable ();
5017 /* Check if the given comparison (done in the given MODE) is actually
5018 a tautology or a contradiction. If the mode is VOID_mode, the
5019 comparison is done in "infinite precision". If no simplification
5020 is possible, this function returns zero. Otherwise, it returns
5021 either const_true_rtx or const0_rtx. */
5024 simplify_const_relational_operation (enum rtx_code code,
5025 machine_mode mode,
5026 rtx op0, rtx op1)
5028 rtx tem;
5029 rtx trueop0;
5030 rtx trueop1;
5032 gcc_assert (mode != VOIDmode
5033 || (GET_MODE (op0) == VOIDmode
5034 && GET_MODE (op1) == VOIDmode));
5036 /* If op0 is a compare, extract the comparison arguments from it. */
5037 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5039 op1 = XEXP (op0, 1);
5040 op0 = XEXP (op0, 0);
5042 if (GET_MODE (op0) != VOIDmode)
5043 mode = GET_MODE (op0);
5044 else if (GET_MODE (op1) != VOIDmode)
5045 mode = GET_MODE (op1);
5046 else
5047 return 0;
5050 /* We can't simplify MODE_CC values since we don't know what the
5051 actual comparison is. */
5052 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5053 return 0;
5055 /* Make sure the constant is second. */
5056 if (swap_commutative_operands_p (op0, op1))
5058 std::swap (op0, op1);
5059 code = swap_condition (code);
5062 trueop0 = avoid_constant_pool_reference (op0);
5063 trueop1 = avoid_constant_pool_reference (op1);
5065 /* For integer comparisons of A and B maybe we can simplify A - B and can
5066 then simplify a comparison of that with zero. If A and B are both either
5067 a register or a CONST_INT, this can't help; testing for these cases will
5068 prevent infinite recursion here and speed things up.
5070 We can only do this for EQ and NE comparisons as otherwise we may
5071 lose or introduce overflow which we cannot disregard as undefined as
5072 we do not know the signedness of the operation on either the left or
5073 the right hand side of the comparison. */
5075 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5076 && (code == EQ || code == NE)
5077 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5078 && (REG_P (op1) || CONST_INT_P (trueop1)))
5079 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
5080 /* We cannot do this if tem is a nonzero address. */
5081 && ! nonzero_address_p (tem))
5082 return simplify_const_relational_operation (signed_condition (code),
5083 mode, tem, const0_rtx);
5085 if (! HONOR_NANS (mode) && code == ORDERED)
5086 return const_true_rtx;
5088 if (! HONOR_NANS (mode) && code == UNORDERED)
5089 return const0_rtx;
5091 /* For modes without NaNs, if the two operands are equal, we know the
5092 result except if they have side-effects. Even with NaNs we know
5093 the result of unordered comparisons and, if signaling NaNs are
5094 irrelevant, also the result of LT/GT/LTGT. */
5095 if ((! HONOR_NANS (trueop0)
5096 || code == UNEQ || code == UNLE || code == UNGE
5097 || ((code == LT || code == GT || code == LTGT)
5098 && ! HONOR_SNANS (trueop0)))
5099 && rtx_equal_p (trueop0, trueop1)
5100 && ! side_effects_p (trueop0))
5101 return comparison_result (code, CMP_EQ);
5103 /* If the operands are floating-point constants, see if we can fold
5104 the result. */
5105 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5106 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5107 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5109 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5110 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5112 /* Comparisons are unordered iff at least one of the values is NaN. */
5113 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5114 switch (code)
5116 case UNEQ:
5117 case UNLT:
5118 case UNGT:
5119 case UNLE:
5120 case UNGE:
5121 case NE:
5122 case UNORDERED:
5123 return const_true_rtx;
5124 case EQ:
5125 case LT:
5126 case GT:
5127 case LE:
5128 case GE:
5129 case LTGT:
5130 case ORDERED:
5131 return const0_rtx;
5132 default:
5133 return 0;
5136 return comparison_result (code,
5137 (real_equal (d0, d1) ? CMP_EQ :
5138 real_less (d0, d1) ? CMP_LT : CMP_GT));
5141 /* Otherwise, see if the operands are both integers. */
5142 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5143 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5145 /* It would be nice if we really had a mode here. However, the
5146 largest int representable on the target is as good as
5147 infinite. */
5148 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5149 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5150 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5152 if (wi::eq_p (ptrueop0, ptrueop1))
5153 return comparison_result (code, CMP_EQ);
5154 else
5156 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5157 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5158 return comparison_result (code, cr);
5162 /* Optimize comparisons with upper and lower bounds. */
5163 scalar_int_mode int_mode;
5164 if (CONST_INT_P (trueop1)
5165 && is_a <scalar_int_mode> (mode, &int_mode)
5166 && HWI_COMPUTABLE_MODE_P (int_mode)
5167 && !side_effects_p (trueop0))
5169 int sign;
5170 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
5171 HOST_WIDE_INT val = INTVAL (trueop1);
5172 HOST_WIDE_INT mmin, mmax;
5174 if (code == GEU
5175 || code == LEU
5176 || code == GTU
5177 || code == LTU)
5178 sign = 0;
5179 else
5180 sign = 1;
5182 /* Get a reduced range if the sign bit is zero. */
5183 if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
5185 mmin = 0;
5186 mmax = nonzero;
5188 else
5190 rtx mmin_rtx, mmax_rtx;
5191 get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
5193 mmin = INTVAL (mmin_rtx);
5194 mmax = INTVAL (mmax_rtx);
5195 if (sign)
5197 unsigned int sign_copies
5198 = num_sign_bit_copies (trueop0, int_mode);
5200 mmin >>= (sign_copies - 1);
5201 mmax >>= (sign_copies - 1);
5205 switch (code)
5207 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5208 case GEU:
5209 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5210 return const_true_rtx;
5211 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5212 return const0_rtx;
5213 break;
5214 case GE:
5215 if (val <= mmin)
5216 return const_true_rtx;
5217 if (val > mmax)
5218 return const0_rtx;
5219 break;
5221 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5222 case LEU:
5223 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5224 return const_true_rtx;
5225 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5226 return const0_rtx;
5227 break;
5228 case LE:
5229 if (val >= mmax)
5230 return const_true_rtx;
5231 if (val < mmin)
5232 return const0_rtx;
5233 break;
5235 case EQ:
5236 /* x == y is always false for y out of range. */
5237 if (val < mmin || val > mmax)
5238 return const0_rtx;
5239 break;
5241 /* x > y is always false for y >= mmax, always true for y < mmin. */
5242 case GTU:
5243 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5244 return const0_rtx;
5245 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5246 return const_true_rtx;
5247 break;
5248 case GT:
5249 if (val >= mmax)
5250 return const0_rtx;
5251 if (val < mmin)
5252 return const_true_rtx;
5253 break;
5255 /* x < y is always false for y <= mmin, always true for y > mmax. */
5256 case LTU:
5257 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5258 return const0_rtx;
5259 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5260 return const_true_rtx;
5261 break;
5262 case LT:
5263 if (val <= mmin)
5264 return const0_rtx;
5265 if (val > mmax)
5266 return const_true_rtx;
5267 break;
5269 case NE:
5270 /* x != y is always true for y out of range. */
5271 if (val < mmin || val > mmax)
5272 return const_true_rtx;
5273 break;
5275 default:
5276 break;
5280 /* Optimize integer comparisons with zero. */
5281 if (is_a <scalar_int_mode> (mode, &int_mode)
5282 && trueop1 == const0_rtx
5283 && !side_effects_p (trueop0))
5285 /* Some addresses are known to be nonzero. We don't know
5286 their sign, but equality comparisons are known. */
5287 if (nonzero_address_p (trueop0))
5289 if (code == EQ || code == LEU)
5290 return const0_rtx;
5291 if (code == NE || code == GTU)
5292 return const_true_rtx;
5295 /* See if the first operand is an IOR with a constant. If so, we
5296 may be able to determine the result of this comparison. */
5297 if (GET_CODE (op0) == IOR)
5299 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5300 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5302 int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
5303 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5304 && (UINTVAL (inner_const)
5305 & (HOST_WIDE_INT_1U
5306 << sign_bitnum)));
5308 switch (code)
5310 case EQ:
5311 case LEU:
5312 return const0_rtx;
5313 case NE:
5314 case GTU:
5315 return const_true_rtx;
5316 case LT:
5317 case LE:
5318 if (has_sign)
5319 return const_true_rtx;
5320 break;
5321 case GT:
5322 case GE:
5323 if (has_sign)
5324 return const0_rtx;
5325 break;
5326 default:
5327 break;
5333 /* Optimize comparison of ABS with zero. */
5334 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5335 && (GET_CODE (trueop0) == ABS
5336 || (GET_CODE (trueop0) == FLOAT_EXTEND
5337 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5339 switch (code)
5341 case LT:
5342 /* Optimize abs(x) < 0.0. */
5343 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5344 return const0_rtx;
5345 break;
5347 case GE:
5348 /* Optimize abs(x) >= 0.0. */
5349 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5350 return const_true_rtx;
5351 break;
5353 case UNGE:
5354 /* Optimize ! (abs(x) < 0.0). */
5355 return const_true_rtx;
5357 default:
5358 break;
5362 return 0;
5365 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5366 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5367 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5368 can be simplified to that or NULL_RTX if not.
5369 Assume X is compared against zero with CMP_CODE and the true
5370 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5372 static rtx
5373 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5375 if (cmp_code != EQ && cmp_code != NE)
5376 return NULL_RTX;
5378 /* Result on X == 0 and X !=0 respectively. */
5379 rtx on_zero, on_nonzero;
5380 if (cmp_code == EQ)
5382 on_zero = true_val;
5383 on_nonzero = false_val;
5385 else
5387 on_zero = false_val;
5388 on_nonzero = true_val;
5391 rtx_code op_code = GET_CODE (on_nonzero);
5392 if ((op_code != CLZ && op_code != CTZ)
5393 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5394 || !CONST_INT_P (on_zero))
5395 return NULL_RTX;
5397 HOST_WIDE_INT op_val;
5398 if (((op_code == CLZ
5399 && CLZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero), op_val))
5400 || (op_code == CTZ
5401 && CTZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero), op_val)))
5402 && op_val == INTVAL (on_zero))
5403 return on_nonzero;
5405 return NULL_RTX;
5409 /* Simplify CODE, an operation with result mode MODE and three operands,
5410 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5411 a constant. Return 0 if no simplifications is possible. */
5414 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5415 machine_mode op0_mode, rtx op0, rtx op1,
5416 rtx op2)
5418 bool any_change = false;
5419 rtx tem, trueop2;
5420 scalar_int_mode int_mode, int_op0_mode;
5422 switch (code)
5424 case FMA:
5425 /* Simplify negations around the multiplication. */
5426 /* -a * -b + c => a * b + c. */
5427 if (GET_CODE (op0) == NEG)
5429 tem = simplify_unary_operation (NEG, mode, op1, mode);
5430 if (tem)
5431 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5433 else if (GET_CODE (op1) == NEG)
5435 tem = simplify_unary_operation (NEG, mode, op0, mode);
5436 if (tem)
5437 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5440 /* Canonicalize the two multiplication operands. */
5441 /* a * -b + c => -b * a + c. */
5442 if (swap_commutative_operands_p (op0, op1))
5443 std::swap (op0, op1), any_change = true;
5445 if (any_change)
5446 return gen_rtx_FMA (mode, op0, op1, op2);
5447 return NULL_RTX;
5449 case SIGN_EXTRACT:
5450 case ZERO_EXTRACT:
5451 if (CONST_INT_P (op0)
5452 && CONST_INT_P (op1)
5453 && CONST_INT_P (op2)
5454 && is_a <scalar_int_mode> (mode, &int_mode)
5455 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
5456 && HWI_COMPUTABLE_MODE_P (int_mode))
5458 /* Extracting a bit-field from a constant */
5459 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5460 HOST_WIDE_INT op1val = INTVAL (op1);
5461 HOST_WIDE_INT op2val = INTVAL (op2);
5462 if (!BITS_BIG_ENDIAN)
5463 val >>= op2val;
5464 else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
5465 val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
5466 else
5467 /* Not enough information to calculate the bit position. */
5468 break;
5470 if (HOST_BITS_PER_WIDE_INT != op1val)
5472 /* First zero-extend. */
5473 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5474 /* If desired, propagate sign bit. */
5475 if (code == SIGN_EXTRACT
5476 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5477 != 0)
5478 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5481 return gen_int_mode (val, int_mode);
5483 break;
5485 case IF_THEN_ELSE:
5486 if (CONST_INT_P (op0))
5487 return op0 != const0_rtx ? op1 : op2;
5489 /* Convert c ? a : a into "a". */
5490 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5491 return op1;
5493 /* Convert a != b ? a : b into "a". */
5494 if (GET_CODE (op0) == NE
5495 && ! side_effects_p (op0)
5496 && ! HONOR_NANS (mode)
5497 && ! HONOR_SIGNED_ZEROS (mode)
5498 && ((rtx_equal_p (XEXP (op0, 0), op1)
5499 && rtx_equal_p (XEXP (op0, 1), op2))
5500 || (rtx_equal_p (XEXP (op0, 0), op2)
5501 && rtx_equal_p (XEXP (op0, 1), op1))))
5502 return op1;
5504 /* Convert a == b ? a : b into "b". */
5505 if (GET_CODE (op0) == EQ
5506 && ! side_effects_p (op0)
5507 && ! HONOR_NANS (mode)
5508 && ! HONOR_SIGNED_ZEROS (mode)
5509 && ((rtx_equal_p (XEXP (op0, 0), op1)
5510 && rtx_equal_p (XEXP (op0, 1), op2))
5511 || (rtx_equal_p (XEXP (op0, 0), op2)
5512 && rtx_equal_p (XEXP (op0, 1), op1))))
5513 return op2;
5515 /* Convert (!c) != {0,...,0} ? a : b into
5516 c != {0,...,0} ? b : a for vector modes. */
5517 if (VECTOR_MODE_P (GET_MODE (op1))
5518 && GET_CODE (op0) == NE
5519 && GET_CODE (XEXP (op0, 0)) == NOT
5520 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5522 rtx cv = XEXP (op0, 1);
5523 int nunits = CONST_VECTOR_NUNITS (cv);
5524 bool ok = true;
5525 for (int i = 0; i < nunits; ++i)
5526 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5528 ok = false;
5529 break;
5531 if (ok)
5533 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5534 XEXP (XEXP (op0, 0), 0),
5535 XEXP (op0, 1));
5536 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5537 return retval;
5541 /* Convert x == 0 ? N : clz (x) into clz (x) when
5542 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5543 Similarly for ctz (x). */
5544 if (COMPARISON_P (op0) && !side_effects_p (op0)
5545 && XEXP (op0, 1) == const0_rtx)
5547 rtx simplified
5548 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5549 op1, op2);
5550 if (simplified)
5551 return simplified;
5554 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5556 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5557 ? GET_MODE (XEXP (op0, 1))
5558 : GET_MODE (XEXP (op0, 0)));
5559 rtx temp;
5561 /* Look for happy constants in op1 and op2. */
5562 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5564 HOST_WIDE_INT t = INTVAL (op1);
5565 HOST_WIDE_INT f = INTVAL (op2);
5567 if (t == STORE_FLAG_VALUE && f == 0)
5568 code = GET_CODE (op0);
5569 else if (t == 0 && f == STORE_FLAG_VALUE)
5571 enum rtx_code tmp;
5572 tmp = reversed_comparison_code (op0, NULL);
5573 if (tmp == UNKNOWN)
5574 break;
5575 code = tmp;
5577 else
5578 break;
5580 return simplify_gen_relational (code, mode, cmp_mode,
5581 XEXP (op0, 0), XEXP (op0, 1));
5584 if (cmp_mode == VOIDmode)
5585 cmp_mode = op0_mode;
5586 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5587 cmp_mode, XEXP (op0, 0),
5588 XEXP (op0, 1));
5590 /* See if any simplifications were possible. */
5591 if (temp)
5593 if (CONST_INT_P (temp))
5594 return temp == const0_rtx ? op2 : op1;
5595 else if (temp)
5596 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5599 break;
5601 case VEC_MERGE:
5602 gcc_assert (GET_MODE (op0) == mode);
5603 gcc_assert (GET_MODE (op1) == mode);
5604 gcc_assert (VECTOR_MODE_P (mode));
5605 trueop2 = avoid_constant_pool_reference (op2);
5606 if (CONST_INT_P (trueop2))
5608 int elt_size = GET_MODE_UNIT_SIZE (mode);
5609 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5610 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5611 unsigned HOST_WIDE_INT mask;
5612 if (n_elts == HOST_BITS_PER_WIDE_INT)
5613 mask = -1;
5614 else
5615 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5617 if (!(sel & mask) && !side_effects_p (op0))
5618 return op1;
5619 if ((sel & mask) == mask && !side_effects_p (op1))
5620 return op0;
5622 rtx trueop0 = avoid_constant_pool_reference (op0);
5623 rtx trueop1 = avoid_constant_pool_reference (op1);
5624 if (GET_CODE (trueop0) == CONST_VECTOR
5625 && GET_CODE (trueop1) == CONST_VECTOR)
5627 rtvec v = rtvec_alloc (n_elts);
5628 unsigned int i;
5630 for (i = 0; i < n_elts; i++)
5631 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5632 ? CONST_VECTOR_ELT (trueop0, i)
5633 : CONST_VECTOR_ELT (trueop1, i));
5634 return gen_rtx_CONST_VECTOR (mode, v);
5637 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5638 if no element from a appears in the result. */
5639 if (GET_CODE (op0) == VEC_MERGE)
5641 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5642 if (CONST_INT_P (tem))
5644 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5645 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5646 return simplify_gen_ternary (code, mode, mode,
5647 XEXP (op0, 1), op1, op2);
5648 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5649 return simplify_gen_ternary (code, mode, mode,
5650 XEXP (op0, 0), op1, op2);
5653 if (GET_CODE (op1) == VEC_MERGE)
5655 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5656 if (CONST_INT_P (tem))
5658 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5659 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5660 return simplify_gen_ternary (code, mode, mode,
5661 op0, XEXP (op1, 1), op2);
5662 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5663 return simplify_gen_ternary (code, mode, mode,
5664 op0, XEXP (op1, 0), op2);
5668 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5669 with a. */
5670 if (GET_CODE (op0) == VEC_DUPLICATE
5671 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5672 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5673 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5675 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5676 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5678 if (XEXP (XEXP (op0, 0), 0) == op1
5679 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5680 return op1;
5685 if (rtx_equal_p (op0, op1)
5686 && !side_effects_p (op2) && !side_effects_p (op1))
5687 return op0;
5689 break;
5691 default:
5692 gcc_unreachable ();
5695 return 0;
5698 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5699 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5700 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5702 Works by unpacking OP into a collection of 8-bit values
5703 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5704 and then repacking them again for OUTERMODE. */
5706 static rtx
5707 simplify_immed_subreg (machine_mode outermode, rtx op,
5708 machine_mode innermode, unsigned int byte)
5710 enum {
5711 value_bit = 8,
5712 value_mask = (1 << value_bit) - 1
5714 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5715 int value_start;
5716 int i;
5717 int elem;
5719 int num_elem;
5720 rtx * elems;
5721 int elem_bitsize;
5722 rtx result_s = NULL;
5723 rtvec result_v = NULL;
5724 enum mode_class outer_class;
5725 machine_mode outer_submode;
5726 int max_bitsize;
5728 /* Some ports misuse CCmode. */
5729 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5730 return op;
5732 /* We have no way to represent a complex constant at the rtl level. */
5733 if (COMPLEX_MODE_P (outermode))
5734 return NULL_RTX;
5736 /* We support any size mode. */
5737 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5738 GET_MODE_BITSIZE (innermode));
5740 /* Unpack the value. */
5742 if (GET_CODE (op) == CONST_VECTOR)
5744 num_elem = CONST_VECTOR_NUNITS (op);
5745 elems = &CONST_VECTOR_ELT (op, 0);
5746 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5748 else
5750 num_elem = 1;
5751 elems = &op;
5752 elem_bitsize = max_bitsize;
5754 /* If this asserts, it is too complicated; reducing value_bit may help. */
5755 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5756 /* I don't know how to handle endianness of sub-units. */
5757 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5759 for (elem = 0; elem < num_elem; elem++)
5761 unsigned char * vp;
5762 rtx el = elems[elem];
5764 /* Vectors are kept in target memory order. (This is probably
5765 a mistake.) */
5767 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5768 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5769 / BITS_PER_UNIT);
5770 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5771 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5772 unsigned bytele = (subword_byte % UNITS_PER_WORD
5773 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5774 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5777 switch (GET_CODE (el))
5779 case CONST_INT:
5780 for (i = 0;
5781 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5782 i += value_bit)
5783 *vp++ = INTVAL (el) >> i;
5784 /* CONST_INTs are always logically sign-extended. */
5785 for (; i < elem_bitsize; i += value_bit)
5786 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5787 break;
5789 case CONST_WIDE_INT:
5791 rtx_mode_t val = rtx_mode_t (el, innermode);
5792 unsigned char extend = wi::sign_mask (val);
5793 int prec = wi::get_precision (val);
5795 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
5796 *vp++ = wi::extract_uhwi (val, i, value_bit);
5797 for (; i < elem_bitsize; i += value_bit)
5798 *vp++ = extend;
5800 break;
5802 case CONST_DOUBLE:
5803 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5805 unsigned char extend = 0;
5806 /* If this triggers, someone should have generated a
5807 CONST_INT instead. */
5808 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5810 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5811 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5812 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5814 *vp++
5815 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5816 i += value_bit;
5819 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5820 extend = -1;
5821 for (; i < elem_bitsize; i += value_bit)
5822 *vp++ = extend;
5824 else
5826 /* This is big enough for anything on the platform. */
5827 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5828 scalar_float_mode el_mode;
5830 el_mode = as_a <scalar_float_mode> (GET_MODE (el));
5831 int bitsize = GET_MODE_BITSIZE (el_mode);
5833 gcc_assert (bitsize <= elem_bitsize);
5834 gcc_assert (bitsize % value_bit == 0);
5836 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5837 GET_MODE (el));
5839 /* real_to_target produces its result in words affected by
5840 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5841 and use WORDS_BIG_ENDIAN instead; see the documentation
5842 of SUBREG in rtl.texi. */
5843 for (i = 0; i < bitsize; i += value_bit)
5845 int ibase;
5846 if (WORDS_BIG_ENDIAN)
5847 ibase = bitsize - 1 - i;
5848 else
5849 ibase = i;
5850 *vp++ = tmp[ibase / 32] >> i % 32;
5853 /* It shouldn't matter what's done here, so fill it with
5854 zero. */
5855 for (; i < elem_bitsize; i += value_bit)
5856 *vp++ = 0;
5858 break;
5860 case CONST_FIXED:
5861 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5863 for (i = 0; i < elem_bitsize; i += value_bit)
5864 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5866 else
5868 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5869 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5870 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5871 i += value_bit)
5872 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5873 >> (i - HOST_BITS_PER_WIDE_INT);
5874 for (; i < elem_bitsize; i += value_bit)
5875 *vp++ = 0;
5877 break;
5879 default:
5880 gcc_unreachable ();
5884 /* Now, pick the right byte to start with. */
5885 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5886 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5887 will already have offset 0. */
5888 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5890 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5891 - byte);
5892 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5893 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5894 byte = (subword_byte % UNITS_PER_WORD
5895 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5898 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5899 so if it's become negative it will instead be very large.) */
5900 gcc_assert (byte < GET_MODE_SIZE (innermode));
5902 /* Convert from bytes to chunks of size value_bit. */
5903 value_start = byte * (BITS_PER_UNIT / value_bit);
5905 /* Re-pack the value. */
5906 num_elem = GET_MODE_NUNITS (outermode);
5908 if (VECTOR_MODE_P (outermode))
5910 result_v = rtvec_alloc (num_elem);
5911 elems = &RTVEC_ELT (result_v, 0);
5913 else
5914 elems = &result_s;
5916 outer_submode = GET_MODE_INNER (outermode);
5917 outer_class = GET_MODE_CLASS (outer_submode);
5918 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5920 gcc_assert (elem_bitsize % value_bit == 0);
5921 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5923 for (elem = 0; elem < num_elem; elem++)
5925 unsigned char *vp;
5927 /* Vectors are stored in target memory order. (This is probably
5928 a mistake.) */
5930 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5931 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5932 / BITS_PER_UNIT);
5933 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5934 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5935 unsigned bytele = (subword_byte % UNITS_PER_WORD
5936 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5937 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5940 switch (outer_class)
5942 case MODE_INT:
5943 case MODE_PARTIAL_INT:
5945 int u;
5946 int base = 0;
5947 int units
5948 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5949 / HOST_BITS_PER_WIDE_INT;
5950 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5951 wide_int r;
5953 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5954 return NULL_RTX;
5955 for (u = 0; u < units; u++)
5957 unsigned HOST_WIDE_INT buf = 0;
5958 for (i = 0;
5959 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5960 i += value_bit)
5961 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5963 tmp[u] = buf;
5964 base += HOST_BITS_PER_WIDE_INT;
5966 r = wide_int::from_array (tmp, units,
5967 GET_MODE_PRECISION (outer_submode));
5968 #if TARGET_SUPPORTS_WIDE_INT == 0
5969 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5970 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5971 return NULL_RTX;
5972 #endif
5973 elems[elem] = immed_wide_int_const (r, outer_submode);
5975 break;
5977 case MODE_FLOAT:
5978 case MODE_DECIMAL_FLOAT:
5980 REAL_VALUE_TYPE r;
5981 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
5983 /* real_from_target wants its input in words affected by
5984 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5985 and use WORDS_BIG_ENDIAN instead; see the documentation
5986 of SUBREG in rtl.texi. */
5987 for (i = 0; i < elem_bitsize; i += value_bit)
5989 int ibase;
5990 if (WORDS_BIG_ENDIAN)
5991 ibase = elem_bitsize - 1 - i;
5992 else
5993 ibase = i;
5994 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5997 real_from_target (&r, tmp, outer_submode);
5998 elems[elem] = const_double_from_real_value (r, outer_submode);
6000 break;
6002 case MODE_FRACT:
6003 case MODE_UFRACT:
6004 case MODE_ACCUM:
6005 case MODE_UACCUM:
6007 FIXED_VALUE_TYPE f;
6008 f.data.low = 0;
6009 f.data.high = 0;
6010 f.mode = outer_submode;
6012 for (i = 0;
6013 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6014 i += value_bit)
6015 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6016 for (; i < elem_bitsize; i += value_bit)
6017 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
6018 << (i - HOST_BITS_PER_WIDE_INT));
6020 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
6022 break;
6024 default:
6025 gcc_unreachable ();
6028 if (VECTOR_MODE_P (outermode))
6029 return gen_rtx_CONST_VECTOR (outermode, result_v);
6030 else
6031 return result_s;
6034 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6035 Return 0 if no simplifications are possible. */
6037 simplify_subreg (machine_mode outermode, rtx op,
6038 machine_mode innermode, unsigned int byte)
6040 /* Little bit of sanity checking. */
6041 gcc_assert (innermode != VOIDmode);
6042 gcc_assert (outermode != VOIDmode);
6043 gcc_assert (innermode != BLKmode);
6044 gcc_assert (outermode != BLKmode);
6046 gcc_assert (GET_MODE (op) == innermode
6047 || GET_MODE (op) == VOIDmode);
6049 if ((byte % GET_MODE_SIZE (outermode)) != 0)
6050 return NULL_RTX;
6052 if (byte >= GET_MODE_SIZE (innermode))
6053 return NULL_RTX;
6055 if (outermode == innermode && !byte)
6056 return op;
6058 if (CONST_SCALAR_INT_P (op)
6059 || CONST_DOUBLE_AS_FLOAT_P (op)
6060 || GET_CODE (op) == CONST_FIXED
6061 || GET_CODE (op) == CONST_VECTOR)
6062 return simplify_immed_subreg (outermode, op, innermode, byte);
6064 /* Changing mode twice with SUBREG => just change it once,
6065 or not at all if changing back op starting mode. */
6066 if (GET_CODE (op) == SUBREG)
6068 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6069 int final_offset = byte + SUBREG_BYTE (op);
6070 rtx newx;
6072 if (outermode == innermostmode
6073 && byte == 0 && SUBREG_BYTE (op) == 0)
6074 return SUBREG_REG (op);
6076 /* The SUBREG_BYTE represents offset, as if the value were stored
6077 in memory. Irritating exception is paradoxical subreg, where
6078 we define SUBREG_BYTE to be 0. On big endian machines, this
6079 value should be negative. For a moment, undo this exception. */
6080 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6082 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
6083 if (WORDS_BIG_ENDIAN)
6084 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6085 if (BYTES_BIG_ENDIAN)
6086 final_offset += difference % UNITS_PER_WORD;
6088 if (SUBREG_BYTE (op) == 0
6089 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
6091 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
6092 if (WORDS_BIG_ENDIAN)
6093 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6094 if (BYTES_BIG_ENDIAN)
6095 final_offset += difference % UNITS_PER_WORD;
6098 /* See whether resulting subreg will be paradoxical. */
6099 if (!paradoxical_subreg_p (outermode, innermostmode))
6101 /* In nonparadoxical subregs we can't handle negative offsets. */
6102 if (final_offset < 0)
6103 return NULL_RTX;
6104 /* Bail out in case resulting subreg would be incorrect. */
6105 if (final_offset % GET_MODE_SIZE (outermode)
6106 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
6107 return NULL_RTX;
6109 else
6111 int offset = 0;
6112 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
6114 /* In paradoxical subreg, see if we are still looking on lower part.
6115 If so, our SUBREG_BYTE will be 0. */
6116 if (WORDS_BIG_ENDIAN)
6117 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6118 if (BYTES_BIG_ENDIAN)
6119 offset += difference % UNITS_PER_WORD;
6120 if (offset == final_offset)
6121 final_offset = 0;
6122 else
6123 return NULL_RTX;
6126 /* Recurse for further possible simplifications. */
6127 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6128 final_offset);
6129 if (newx)
6130 return newx;
6131 if (validate_subreg (outermode, innermostmode,
6132 SUBREG_REG (op), final_offset))
6134 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6135 if (SUBREG_PROMOTED_VAR_P (op)
6136 && SUBREG_PROMOTED_SIGN (op) >= 0
6137 && GET_MODE_CLASS (outermode) == MODE_INT
6138 && IN_RANGE (GET_MODE_SIZE (outermode),
6139 GET_MODE_SIZE (innermode),
6140 GET_MODE_SIZE (innermostmode))
6141 && subreg_lowpart_p (newx))
6143 SUBREG_PROMOTED_VAR_P (newx) = 1;
6144 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6146 return newx;
6148 return NULL_RTX;
6151 /* SUBREG of a hard register => just change the register number
6152 and/or mode. If the hard register is not valid in that mode,
6153 suppress this simplification. If the hard register is the stack,
6154 frame, or argument pointer, leave this as a SUBREG. */
6156 if (REG_P (op) && HARD_REGISTER_P (op))
6158 unsigned int regno, final_regno;
6160 regno = REGNO (op);
6161 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6162 if (HARD_REGISTER_NUM_P (final_regno))
6164 rtx x;
6165 int final_offset = byte;
6167 /* Adjust offset for paradoxical subregs. */
6168 if (byte == 0
6169 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6171 int difference = (GET_MODE_SIZE (innermode)
6172 - GET_MODE_SIZE (outermode));
6173 if (WORDS_BIG_ENDIAN)
6174 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6175 if (BYTES_BIG_ENDIAN)
6176 final_offset += difference % UNITS_PER_WORD;
6179 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
6181 /* Propagate original regno. We don't have any way to specify
6182 the offset inside original regno, so do so only for lowpart.
6183 The information is used only by alias analysis that can not
6184 grog partial register anyway. */
6186 if (subreg_lowpart_offset (outermode, innermode) == byte)
6187 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6188 return x;
6192 /* If we have a SUBREG of a register that we are replacing and we are
6193 replacing it with a MEM, make a new MEM and try replacing the
6194 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6195 or if we would be widening it. */
6197 if (MEM_P (op)
6198 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6199 /* Allow splitting of volatile memory references in case we don't
6200 have instruction to move the whole thing. */
6201 && (! MEM_VOLATILE_P (op)
6202 || ! have_insn_for (SET, innermode))
6203 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6204 return adjust_address_nv (op, outermode, byte);
6206 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6207 of two parts. */
6208 if (GET_CODE (op) == CONCAT
6209 || GET_CODE (op) == VEC_CONCAT)
6211 unsigned int part_size, final_offset;
6212 rtx part, res;
6214 machine_mode part_mode = GET_MODE (XEXP (op, 0));
6215 if (part_mode == VOIDmode)
6216 part_mode = GET_MODE_INNER (GET_MODE (op));
6217 part_size = GET_MODE_SIZE (part_mode);
6218 if (byte < part_size)
6220 part = XEXP (op, 0);
6221 final_offset = byte;
6223 else
6225 part = XEXP (op, 1);
6226 final_offset = byte - part_size;
6229 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6230 return NULL_RTX;
6232 part_mode = GET_MODE (part);
6233 if (part_mode == VOIDmode)
6234 part_mode = GET_MODE_INNER (GET_MODE (op));
6235 res = simplify_subreg (outermode, part, part_mode, final_offset);
6236 if (res)
6237 return res;
6238 if (validate_subreg (outermode, part_mode, part, final_offset))
6239 return gen_rtx_SUBREG (outermode, part, final_offset);
6240 return NULL_RTX;
6243 /* A SUBREG resulting from a zero extension may fold to zero if
6244 it extracts higher bits that the ZERO_EXTEND's source bits. */
6245 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6247 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6248 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6249 return CONST0_RTX (outermode);
6252 scalar_int_mode int_outermode, int_innermode;
6253 if (is_a <scalar_int_mode> (outermode, &int_outermode)
6254 && is_a <scalar_int_mode> (innermode, &int_innermode)
6255 && (GET_MODE_PRECISION (int_outermode)
6256 < GET_MODE_PRECISION (int_innermode))
6257 && byte == subreg_lowpart_offset (int_outermode, int_innermode))
6259 rtx tem = simplify_truncation (int_outermode, op, int_innermode);
6260 if (tem)
6261 return tem;
6264 return NULL_RTX;
6267 /* Make a SUBREG operation or equivalent if it folds. */
6270 simplify_gen_subreg (machine_mode outermode, rtx op,
6271 machine_mode innermode, unsigned int byte)
6273 rtx newx;
6275 newx = simplify_subreg (outermode, op, innermode, byte);
6276 if (newx)
6277 return newx;
6279 if (GET_CODE (op) == SUBREG
6280 || GET_CODE (op) == CONCAT
6281 || GET_MODE (op) == VOIDmode)
6282 return NULL_RTX;
6284 if (validate_subreg (outermode, innermode, op, byte))
6285 return gen_rtx_SUBREG (outermode, op, byte);
6287 return NULL_RTX;
6290 /* Generates a subreg to get the least significant part of EXPR (in mode
6291 INNER_MODE) to OUTER_MODE. */
6294 lowpart_subreg (machine_mode outer_mode, rtx expr,
6295 machine_mode inner_mode)
6297 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6298 subreg_lowpart_offset (outer_mode, inner_mode));
6301 /* Simplify X, an rtx expression.
6303 Return the simplified expression or NULL if no simplifications
6304 were possible.
6306 This is the preferred entry point into the simplification routines;
6307 however, we still allow passes to call the more specific routines.
6309 Right now GCC has three (yes, three) major bodies of RTL simplification
6310 code that need to be unified.
6312 1. fold_rtx in cse.c. This code uses various CSE specific
6313 information to aid in RTL simplification.
6315 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6316 it uses combine specific information to aid in RTL
6317 simplification.
6319 3. The routines in this file.
6322 Long term we want to only have one body of simplification code; to
6323 get to that state I recommend the following steps:
6325 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6326 which are not pass dependent state into these routines.
6328 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6329 use this routine whenever possible.
6331 3. Allow for pass dependent state to be provided to these
6332 routines and add simplifications based on the pass dependent
6333 state. Remove code from cse.c & combine.c that becomes
6334 redundant/dead.
6336 It will take time, but ultimately the compiler will be easier to
6337 maintain and improve. It's totally silly that when we add a
6338 simplification that it needs to be added to 4 places (3 for RTL
6339 simplification and 1 for tree simplification. */
6342 simplify_rtx (const_rtx x)
6344 const enum rtx_code code = GET_CODE (x);
6345 const machine_mode mode = GET_MODE (x);
6347 switch (GET_RTX_CLASS (code))
6349 case RTX_UNARY:
6350 return simplify_unary_operation (code, mode,
6351 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6352 case RTX_COMM_ARITH:
6353 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6354 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6356 /* Fall through. */
6358 case RTX_BIN_ARITH:
6359 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6361 case RTX_TERNARY:
6362 case RTX_BITFIELD_OPS:
6363 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6364 XEXP (x, 0), XEXP (x, 1),
6365 XEXP (x, 2));
6367 case RTX_COMPARE:
6368 case RTX_COMM_COMPARE:
6369 return simplify_relational_operation (code, mode,
6370 ((GET_MODE (XEXP (x, 0))
6371 != VOIDmode)
6372 ? GET_MODE (XEXP (x, 0))
6373 : GET_MODE (XEXP (x, 1))),
6374 XEXP (x, 0),
6375 XEXP (x, 1));
6377 case RTX_EXTRA:
6378 if (code == SUBREG)
6379 return simplify_subreg (mode, SUBREG_REG (x),
6380 GET_MODE (SUBREG_REG (x)),
6381 SUBREG_BYTE (x));
6382 break;
6384 case RTX_OBJ:
6385 if (code == LO_SUM)
6387 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6388 if (GET_CODE (XEXP (x, 0)) == HIGH
6389 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6390 return XEXP (x, 1);
6392 break;
6394 default:
6395 break;
6397 return NULL;