* i386.c (ix86_expand_set_or_movmem): Disable 512bit loops for targets
[official-gcc.git] / gcc / simplify-rtx.c
blob3b6cf6fa850711956fcdef1a2274e95a7e5e4a36
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
37 /* Simplification and canonicalization of RTL. */
39 /* Much code operates on (low, high) pairs; the low value is an
40 unsigned wide int, the high value a signed wide int. We
41 occasionally need to sign extend from low to high as if low were a
42 signed wide int. */
43 #define HWI_SIGN_EXTEND(low) \
44 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
46 static rtx neg_const_int (machine_mode, const_rtx);
47 static bool plus_minus_operand_p (const_rtx);
48 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
49 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
50 unsigned int);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
59 /* Negate a CONST_INT rtx. */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
63 unsigned HOST_WIDE_INT val = -UINTVAL (i);
65 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
66 && val == UINTVAL (i))
67 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
68 mode);
69 return gen_int_mode (val, mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 bool
76 mode_signbit_p (machine_mode mode, const_rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80 scalar_int_mode int_mode;
82 if (!is_int_mode (mode, &int_mode))
83 return false;
85 width = GET_MODE_PRECISION (int_mode);
86 if (width == 0)
87 return false;
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92 #if TARGET_SUPPORTS_WIDE_INT
93 else if (CONST_WIDE_INT_P (x))
95 unsigned int i;
96 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
97 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
98 return false;
99 for (i = 0; i < elts - 1; i++)
100 if (CONST_WIDE_INT_ELT (x, i) != 0)
101 return false;
102 val = CONST_WIDE_INT_ELT (x, elts - 1);
103 width %= HOST_BITS_PER_WIDE_INT;
104 if (width == 0)
105 width = HOST_BITS_PER_WIDE_INT;
107 #else
108 else if (width <= HOST_BITS_PER_DOUBLE_INT
109 && CONST_DOUBLE_AS_INT_P (x)
110 && CONST_DOUBLE_LOW (x) == 0)
112 val = CONST_DOUBLE_HIGH (x);
113 width -= HOST_BITS_PER_WIDE_INT;
115 #endif
116 else
117 /* X is not an integer constant. */
118 return false;
120 if (width < HOST_BITS_PER_WIDE_INT)
121 val &= (HOST_WIDE_INT_1U << width) - 1;
122 return val == (HOST_WIDE_INT_1U << (width - 1));
125 /* Test whether VAL is equal to the most significant bit of mode MODE
126 (after masking with the mode mask of MODE). Returns false if the
127 precision of MODE is too large to handle. */
129 bool
130 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
132 unsigned int width;
133 scalar_int_mode int_mode;
135 if (!is_int_mode (mode, &int_mode))
136 return false;
138 width = GET_MODE_PRECISION (int_mode);
139 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
140 return false;
142 val &= GET_MODE_MASK (int_mode);
143 return val == (HOST_WIDE_INT_1U << (width - 1));
146 /* Test whether the most significant bit of mode MODE is set in VAL.
147 Returns false if the precision of MODE is too large to handle. */
148 bool
149 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
151 unsigned int width;
153 scalar_int_mode int_mode;
154 if (!is_int_mode (mode, &int_mode))
155 return false;
157 width = GET_MODE_PRECISION (int_mode);
158 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
159 return false;
161 val &= HOST_WIDE_INT_1U << (width - 1);
162 return val != 0;
165 /* Test whether the most significant bit of mode MODE is clear in VAL.
166 Returns false if the precision of MODE is too large to handle. */
167 bool
168 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
170 unsigned int width;
172 scalar_int_mode int_mode;
173 if (!is_int_mode (mode, &int_mode))
174 return false;
176 width = GET_MODE_PRECISION (int_mode);
177 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
178 return false;
180 val &= HOST_WIDE_INT_1U << (width - 1);
181 return val == 0;
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
188 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
189 rtx op1)
191 rtx tem;
193 /* If this simplifies, do it. */
194 tem = simplify_binary_operation (code, mode, op0, op1);
195 if (tem)
196 return tem;
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0, op1))
201 std::swap (op0, op1);
203 return gen_rtx_fmt_ee (code, mode, op0, op1);
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
209 avoid_constant_pool_reference (rtx x)
211 rtx c, tmp, addr;
212 machine_mode cmode;
213 HOST_WIDE_INT offset = 0;
215 switch (GET_CODE (x))
217 case MEM:
218 break;
220 case FLOAT_EXTEND:
221 /* Handle float extensions of constant pool references. */
222 tmp = XEXP (x, 0);
223 c = avoid_constant_pool_reference (tmp);
224 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
226 GET_MODE (x));
227 return x;
229 default:
230 return x;
233 if (GET_MODE (x) == BLKmode)
234 return x;
236 addr = XEXP (x, 0);
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr = targetm.delegitimize_address (addr);
241 /* Split the address into a base and integer offset. */
242 if (GET_CODE (addr) == CONST
243 && GET_CODE (XEXP (addr, 0)) == PLUS
244 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
246 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
247 addr = XEXP (XEXP (addr, 0), 0);
250 if (GET_CODE (addr) == LO_SUM)
251 addr = XEXP (addr, 1);
253 /* If this is a constant pool reference, we can turn it into its
254 constant and hope that simplifications happen. */
255 if (GET_CODE (addr) == SYMBOL_REF
256 && CONSTANT_POOL_ADDRESS_P (addr))
258 c = get_pool_constant (addr);
259 cmode = get_pool_mode (addr);
261 /* If we're accessing the constant in a different mode than it was
262 originally stored, attempt to fix that up via subreg simplifications.
263 If that fails we have no choice but to return the original memory. */
264 if (offset == 0 && cmode == GET_MODE (x))
265 return c;
266 else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
268 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
269 if (tem && CONSTANT_P (tem))
270 return tem;
274 return x;
277 /* Simplify a MEM based on its attributes. This is the default
278 delegitimize_address target hook, and it's recommended that every
279 overrider call it. */
282 delegitimize_mem_from_attrs (rtx x)
284 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285 use their base addresses as equivalent. */
286 if (MEM_P (x)
287 && MEM_EXPR (x)
288 && MEM_OFFSET_KNOWN_P (x))
290 tree decl = MEM_EXPR (x);
291 machine_mode mode = GET_MODE (x);
292 HOST_WIDE_INT offset = 0;
294 switch (TREE_CODE (decl))
296 default:
297 decl = NULL;
298 break;
300 case VAR_DECL:
301 break;
303 case ARRAY_REF:
304 case ARRAY_RANGE_REF:
305 case COMPONENT_REF:
306 case BIT_FIELD_REF:
307 case REALPART_EXPR:
308 case IMAGPART_EXPR:
309 case VIEW_CONVERT_EXPR:
311 HOST_WIDE_INT bitsize, bitpos;
312 tree toffset;
313 int unsignedp, reversep, volatilep = 0;
315 decl
316 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
317 &unsignedp, &reversep, &volatilep);
318 if (bitsize != GET_MODE_BITSIZE (mode)
319 || (bitpos % BITS_PER_UNIT)
320 || (toffset && !tree_fits_shwi_p (toffset)))
321 decl = NULL;
322 else
324 offset += bitpos / BITS_PER_UNIT;
325 if (toffset)
326 offset += tree_to_shwi (toffset);
328 break;
332 if (decl
333 && mode == GET_MODE (x)
334 && VAR_P (decl)
335 && (TREE_STATIC (decl)
336 || DECL_THREAD_LOCAL_P (decl))
337 && DECL_RTL_SET_P (decl)
338 && MEM_P (DECL_RTL (decl)))
340 rtx newx;
342 offset += MEM_OFFSET (x);
344 newx = DECL_RTL (decl);
346 if (MEM_P (newx))
348 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
350 /* Avoid creating a new MEM needlessly if we already had
351 the same address. We do if there's no OFFSET and the
352 old address X is identical to NEWX, or if X is of the
353 form (plus NEWX OFFSET), or the NEWX is of the form
354 (plus Y (const_int Z)) and X is that with the offset
355 added: (plus Y (const_int Z+OFFSET)). */
356 if (!((offset == 0
357 || (GET_CODE (o) == PLUS
358 && GET_CODE (XEXP (o, 1)) == CONST_INT
359 && (offset == INTVAL (XEXP (o, 1))
360 || (GET_CODE (n) == PLUS
361 && GET_CODE (XEXP (n, 1)) == CONST_INT
362 && (INTVAL (XEXP (n, 1)) + offset
363 == INTVAL (XEXP (o, 1)))
364 && (n = XEXP (n, 0))))
365 && (o = XEXP (o, 0))))
366 && rtx_equal_p (o, n)))
367 x = adjust_address_nv (newx, mode, offset);
369 else if (GET_MODE (x) == GET_MODE (newx)
370 && offset == 0)
371 x = newx;
375 return x;
378 /* Make a unary operation by first seeing if it folds and otherwise making
379 the specified operation. */
382 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
383 machine_mode op_mode)
385 rtx tem;
387 /* If this simplifies, use it. */
388 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
389 return tem;
391 return gen_rtx_fmt_e (code, mode, op);
394 /* Likewise for ternary operations. */
397 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
398 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
400 rtx tem;
402 /* If this simplifies, use it. */
403 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
404 op0, op1, op2)))
405 return tem;
407 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
410 /* Likewise, for relational operations.
411 CMP_MODE specifies mode comparison is done in. */
414 simplify_gen_relational (enum rtx_code code, machine_mode mode,
415 machine_mode cmp_mode, rtx op0, rtx op1)
417 rtx tem;
419 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
420 op0, op1)))
421 return tem;
423 return gen_rtx_fmt_ee (code, mode, op0, op1);
426 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
427 and simplify the result. If FN is non-NULL, call this callback on each
428 X, if it returns non-NULL, replace X with its return value and simplify the
429 result. */
432 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
433 rtx (*fn) (rtx, const_rtx, void *), void *data)
435 enum rtx_code code = GET_CODE (x);
436 machine_mode mode = GET_MODE (x);
437 machine_mode op_mode;
438 const char *fmt;
439 rtx op0, op1, op2, newx, op;
440 rtvec vec, newvec;
441 int i, j;
443 if (__builtin_expect (fn != NULL, 0))
445 newx = fn (x, old_rtx, data);
446 if (newx)
447 return newx;
449 else if (rtx_equal_p (x, old_rtx))
450 return copy_rtx ((rtx) data);
452 switch (GET_RTX_CLASS (code))
454 case RTX_UNARY:
455 op0 = XEXP (x, 0);
456 op_mode = GET_MODE (op0);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 if (op0 == XEXP (x, 0))
459 return x;
460 return simplify_gen_unary (code, mode, op0, op_mode);
462 case RTX_BIN_ARITH:
463 case RTX_COMM_ARITH:
464 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
465 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
466 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
467 return x;
468 return simplify_gen_binary (code, mode, op0, op1);
470 case RTX_COMPARE:
471 case RTX_COMM_COMPARE:
472 op0 = XEXP (x, 0);
473 op1 = XEXP (x, 1);
474 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
475 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
476 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
477 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
478 return x;
479 return simplify_gen_relational (code, mode, op_mode, op0, op1);
481 case RTX_TERNARY:
482 case RTX_BITFIELD_OPS:
483 op0 = XEXP (x, 0);
484 op_mode = GET_MODE (op0);
485 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
486 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
487 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
488 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
489 return x;
490 if (op_mode == VOIDmode)
491 op_mode = GET_MODE (op0);
492 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
494 case RTX_EXTRA:
495 if (code == SUBREG)
497 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
498 if (op0 == SUBREG_REG (x))
499 return x;
500 op0 = simplify_gen_subreg (GET_MODE (x), op0,
501 GET_MODE (SUBREG_REG (x)),
502 SUBREG_BYTE (x));
503 return op0 ? op0 : x;
505 break;
507 case RTX_OBJ:
508 if (code == MEM)
510 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
511 if (op0 == XEXP (x, 0))
512 return x;
513 return replace_equiv_address_nv (x, op0);
515 else if (code == LO_SUM)
517 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
518 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
520 /* (lo_sum (high x) y) -> y where x and y have the same base. */
521 if (GET_CODE (op0) == HIGH)
523 rtx base0, base1, offset0, offset1;
524 split_const (XEXP (op0, 0), &base0, &offset0);
525 split_const (op1, &base1, &offset1);
526 if (rtx_equal_p (base0, base1))
527 return op1;
530 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
531 return x;
532 return gen_rtx_LO_SUM (mode, op0, op1);
534 break;
536 default:
537 break;
540 newx = x;
541 fmt = GET_RTX_FORMAT (code);
542 for (i = 0; fmt[i]; i++)
543 switch (fmt[i])
545 case 'E':
546 vec = XVEC (x, i);
547 newvec = XVEC (newx, i);
548 for (j = 0; j < GET_NUM_ELEM (vec); j++)
550 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
551 old_rtx, fn, data);
552 if (op != RTVEC_ELT (vec, j))
554 if (newvec == vec)
556 newvec = shallow_copy_rtvec (vec);
557 if (x == newx)
558 newx = shallow_copy_rtx (x);
559 XVEC (newx, i) = newvec;
561 RTVEC_ELT (newvec, j) = op;
564 break;
566 case 'e':
567 if (XEXP (x, i))
569 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
570 if (op != XEXP (x, i))
572 if (x == newx)
573 newx = shallow_copy_rtx (x);
574 XEXP (newx, i) = op;
577 break;
579 return newx;
582 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
583 resulting RTX. Return a new RTX which is as simplified as possible. */
586 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
588 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
591 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
592 Only handle cases where the truncated value is inherently an rvalue.
594 RTL provides two ways of truncating a value:
596 1. a lowpart subreg. This form is only a truncation when both
597 the outer and inner modes (here MODE and OP_MODE respectively)
598 are scalar integers, and only then when the subreg is used as
599 an rvalue.
601 It is only valid to form such truncating subregs if the
602 truncation requires no action by the target. The onus for
603 proving this is on the creator of the subreg -- e.g. the
604 caller to simplify_subreg or simplify_gen_subreg -- and typically
605 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
607 2. a TRUNCATE. This form handles both scalar and compound integers.
609 The first form is preferred where valid. However, the TRUNCATE
610 handling in simplify_unary_operation turns the second form into the
611 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
612 so it is generally safe to form rvalue truncations using:
614 simplify_gen_unary (TRUNCATE, ...)
616 and leave simplify_unary_operation to work out which representation
617 should be used.
619 Because of the proof requirements on (1), simplify_truncation must
620 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
621 regardless of whether the outer truncation came from a SUBREG or a
622 TRUNCATE. For example, if the caller has proven that an SImode
623 truncation of:
625 (and:DI X Y)
627 is a no-op and can be represented as a subreg, it does not follow
628 that SImode truncations of X and Y are also no-ops. On a target
629 like 64-bit MIPS that requires SImode values to be stored in
630 sign-extended form, an SImode truncation of:
632 (and:DI (reg:DI X) (const_int 63))
634 is trivially a no-op because only the lower 6 bits can be set.
635 However, X is still an arbitrary 64-bit number and so we cannot
636 assume that truncating it too is a no-op. */
638 static rtx
639 simplify_truncation (machine_mode mode, rtx op,
640 machine_mode op_mode)
642 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
643 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
644 scalar_int_mode int_mode, int_op_mode, subreg_mode;
646 gcc_assert (precision <= op_precision);
648 /* Optimize truncations of zero and sign extended values. */
649 if (GET_CODE (op) == ZERO_EXTEND
650 || GET_CODE (op) == SIGN_EXTEND)
652 /* There are three possibilities. If MODE is the same as the
653 origmode, we can omit both the extension and the subreg.
654 If MODE is not larger than the origmode, we can apply the
655 truncation without the extension. Finally, if the outermode
656 is larger than the origmode, we can just extend to the appropriate
657 mode. */
658 machine_mode origmode = GET_MODE (XEXP (op, 0));
659 if (mode == origmode)
660 return XEXP (op, 0);
661 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
662 return simplify_gen_unary (TRUNCATE, mode,
663 XEXP (op, 0), origmode);
664 else
665 return simplify_gen_unary (GET_CODE (op), mode,
666 XEXP (op, 0), origmode);
669 /* If the machine can perform operations in the truncated mode, distribute
670 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
671 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
672 if (1
673 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
674 && (GET_CODE (op) == PLUS
675 || GET_CODE (op) == MINUS
676 || GET_CODE (op) == MULT))
678 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
679 if (op0)
681 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
682 if (op1)
683 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
687 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
688 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
689 the outer subreg is effectively a truncation to the original mode. */
690 if ((GET_CODE (op) == LSHIFTRT
691 || GET_CODE (op) == ASHIFTRT)
692 /* Ensure that OP_MODE is at least twice as wide as MODE
693 to avoid the possibility that an outer LSHIFTRT shifts by more
694 than the sign extension's sign_bit_copies and introduces zeros
695 into the high bits of the result. */
696 && 2 * precision <= op_precision
697 && CONST_INT_P (XEXP (op, 1))
698 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
699 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
700 && UINTVAL (XEXP (op, 1)) < precision)
701 return simplify_gen_binary (ASHIFTRT, mode,
702 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
704 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
705 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
706 the outer subreg is effectively a truncation to the original mode. */
707 if ((GET_CODE (op) == LSHIFTRT
708 || GET_CODE (op) == ASHIFTRT)
709 && CONST_INT_P (XEXP (op, 1))
710 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
711 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
712 && UINTVAL (XEXP (op, 1)) < precision)
713 return simplify_gen_binary (LSHIFTRT, mode,
714 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
716 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
717 to (ashift:QI (x:QI) C), where C is a suitable small constant and
718 the outer subreg is effectively a truncation to the original mode. */
719 if (GET_CODE (op) == ASHIFT
720 && CONST_INT_P (XEXP (op, 1))
721 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
722 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
723 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
724 && UINTVAL (XEXP (op, 1)) < precision)
725 return simplify_gen_binary (ASHIFT, mode,
726 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
728 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
729 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
730 and C2. */
731 if (GET_CODE (op) == AND
732 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
733 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
734 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
735 && CONST_INT_P (XEXP (op, 1)))
737 rtx op0 = (XEXP (XEXP (op, 0), 0));
738 rtx shift_op = XEXP (XEXP (op, 0), 1);
739 rtx mask_op = XEXP (op, 1);
740 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
741 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
743 if (shift < precision
744 /* If doing this transform works for an X with all bits set,
745 it works for any X. */
746 && ((GET_MODE_MASK (mode) >> shift) & mask)
747 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
748 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
749 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
751 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
752 return simplify_gen_binary (AND, mode, op0, mask_op);
756 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
757 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
758 changing len. */
759 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
760 && REG_P (XEXP (op, 0))
761 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
762 && CONST_INT_P (XEXP (op, 1))
763 && CONST_INT_P (XEXP (op, 2)))
765 rtx op0 = XEXP (op, 0);
766 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
767 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
768 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
770 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
771 if (op0)
773 pos -= op_precision - precision;
774 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
775 XEXP (op, 1), GEN_INT (pos));
778 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
780 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
781 if (op0)
782 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
783 XEXP (op, 1), XEXP (op, 2));
787 /* Recognize a word extraction from a multi-word subreg. */
788 if ((GET_CODE (op) == LSHIFTRT
789 || GET_CODE (op) == ASHIFTRT)
790 && SCALAR_INT_MODE_P (mode)
791 && SCALAR_INT_MODE_P (op_mode)
792 && precision >= BITS_PER_WORD
793 && 2 * precision <= op_precision
794 && CONST_INT_P (XEXP (op, 1))
795 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
796 && UINTVAL (XEXP (op, 1)) < op_precision)
798 int byte = subreg_lowpart_offset (mode, op_mode);
799 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
800 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
801 (WORDS_BIG_ENDIAN
802 ? byte - shifted_bytes
803 : byte + shifted_bytes));
806 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
807 and try replacing the TRUNCATE and shift with it. Don't do this
808 if the MEM has a mode-dependent address. */
809 if ((GET_CODE (op) == LSHIFTRT
810 || GET_CODE (op) == ASHIFTRT)
811 && is_a <scalar_int_mode> (mode, &int_mode)
812 && is_a <scalar_int_mode> (op_mode, &int_op_mode)
813 && MEM_P (XEXP (op, 0))
814 && CONST_INT_P (XEXP (op, 1))
815 && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
816 && INTVAL (XEXP (op, 1)) > 0
817 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
818 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
819 MEM_ADDR_SPACE (XEXP (op, 0)))
820 && ! MEM_VOLATILE_P (XEXP (op, 0))
821 && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
822 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
824 int byte = subreg_lowpart_offset (int_mode, int_op_mode);
825 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
826 return adjust_address_nv (XEXP (op, 0), int_mode,
827 (WORDS_BIG_ENDIAN
828 ? byte - shifted_bytes
829 : byte + shifted_bytes));
832 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
833 (OP:SI foo:SI) if OP is NEG or ABS. */
834 if ((GET_CODE (op) == ABS
835 || GET_CODE (op) == NEG)
836 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
837 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
838 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
839 return simplify_gen_unary (GET_CODE (op), mode,
840 XEXP (XEXP (op, 0), 0), mode);
842 /* (truncate:A (subreg:B (truncate:C X) 0)) is
843 (truncate:A X). */
844 if (GET_CODE (op) == SUBREG
845 && is_a <scalar_int_mode> (mode, &int_mode)
846 && SCALAR_INT_MODE_P (op_mode)
847 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
848 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
849 && subreg_lowpart_p (op))
851 rtx inner = XEXP (SUBREG_REG (op), 0);
852 if (GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (subreg_mode))
853 return simplify_gen_unary (TRUNCATE, int_mode, inner,
854 GET_MODE (inner));
855 else
856 /* If subreg above is paradoxical and C is narrower
857 than A, return (subreg:A (truncate:C X) 0). */
858 return simplify_gen_subreg (int_mode, SUBREG_REG (op), subreg_mode, 0);
861 /* (truncate:A (truncate:B X)) is (truncate:A X). */
862 if (GET_CODE (op) == TRUNCATE)
863 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
864 GET_MODE (XEXP (op, 0)));
866 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
867 in mode A. */
868 if (GET_CODE (op) == IOR
869 && SCALAR_INT_MODE_P (mode)
870 && SCALAR_INT_MODE_P (op_mode)
871 && CONST_INT_P (XEXP (op, 1))
872 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
873 return constm1_rtx;
875 return NULL_RTX;
878 /* Try to simplify a unary operation CODE whose output mode is to be
879 MODE with input operand OP whose mode was originally OP_MODE.
880 Return zero if no simplification can be made. */
882 simplify_unary_operation (enum rtx_code code, machine_mode mode,
883 rtx op, machine_mode op_mode)
885 rtx trueop, tem;
887 trueop = avoid_constant_pool_reference (op);
889 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
890 if (tem)
891 return tem;
893 return simplify_unary_operation_1 (code, mode, op);
896 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
897 to be exact. */
899 static bool
900 exact_int_to_float_conversion_p (const_rtx op)
902 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
903 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
904 /* Constants shouldn't reach here. */
905 gcc_assert (op0_mode != VOIDmode);
906 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
907 int in_bits = in_prec;
908 if (HWI_COMPUTABLE_MODE_P (op0_mode))
910 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
911 if (GET_CODE (op) == FLOAT)
912 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
913 else if (GET_CODE (op) == UNSIGNED_FLOAT)
914 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
915 else
916 gcc_unreachable ();
917 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
919 return in_bits <= out_bits;
922 /* Perform some simplifications we can do even if the operands
923 aren't constant. */
924 static rtx
925 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
927 enum rtx_code reversed;
928 rtx temp;
929 scalar_int_mode inner, int_mode, op_mode, op0_mode;
931 switch (code)
933 case NOT:
934 /* (not (not X)) == X. */
935 if (GET_CODE (op) == NOT)
936 return XEXP (op, 0);
938 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
939 comparison is all ones. */
940 if (COMPARISON_P (op)
941 && (mode == BImode || STORE_FLAG_VALUE == -1)
942 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
943 return simplify_gen_relational (reversed, mode, VOIDmode,
944 XEXP (op, 0), XEXP (op, 1));
946 /* (not (plus X -1)) can become (neg X). */
947 if (GET_CODE (op) == PLUS
948 && XEXP (op, 1) == constm1_rtx)
949 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
951 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
952 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
953 and MODE_VECTOR_INT. */
954 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
955 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
956 CONSTM1_RTX (mode));
958 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
959 if (GET_CODE (op) == XOR
960 && CONST_INT_P (XEXP (op, 1))
961 && (temp = simplify_unary_operation (NOT, mode,
962 XEXP (op, 1), mode)) != 0)
963 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
965 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
966 if (GET_CODE (op) == PLUS
967 && CONST_INT_P (XEXP (op, 1))
968 && mode_signbit_p (mode, XEXP (op, 1))
969 && (temp = simplify_unary_operation (NOT, mode,
970 XEXP (op, 1), mode)) != 0)
971 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
974 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
975 operands other than 1, but that is not valid. We could do a
976 similar simplification for (not (lshiftrt C X)) where C is
977 just the sign bit, but this doesn't seem common enough to
978 bother with. */
979 if (GET_CODE (op) == ASHIFT
980 && XEXP (op, 0) == const1_rtx)
982 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
983 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
986 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
987 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
988 so we can perform the above simplification. */
989 if (STORE_FLAG_VALUE == -1
990 && is_a <scalar_int_mode> (mode, &int_mode)
991 && GET_CODE (op) == ASHIFTRT
992 && CONST_INT_P (XEXP (op, 1))
993 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
994 return simplify_gen_relational (GE, int_mode, VOIDmode,
995 XEXP (op, 0), const0_rtx);
998 if (partial_subreg_p (op)
999 && subreg_lowpart_p (op)
1000 && GET_CODE (SUBREG_REG (op)) == ASHIFT
1001 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
1003 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
1004 rtx x;
1006 x = gen_rtx_ROTATE (inner_mode,
1007 simplify_gen_unary (NOT, inner_mode, const1_rtx,
1008 inner_mode),
1009 XEXP (SUBREG_REG (op), 1));
1010 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
1011 if (temp)
1012 return temp;
1015 /* Apply De Morgan's laws to reduce number of patterns for machines
1016 with negating logical insns (and-not, nand, etc.). If result has
1017 only one NOT, put it first, since that is how the patterns are
1018 coded. */
1019 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1021 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1022 machine_mode op_mode;
1024 op_mode = GET_MODE (in1);
1025 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1027 op_mode = GET_MODE (in2);
1028 if (op_mode == VOIDmode)
1029 op_mode = mode;
1030 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1032 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1033 std::swap (in1, in2);
1035 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1036 mode, in1, in2);
1039 /* (not (bswap x)) -> (bswap (not x)). */
1040 if (GET_CODE (op) == BSWAP)
1042 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1043 return simplify_gen_unary (BSWAP, mode, x, mode);
1045 break;
1047 case NEG:
1048 /* (neg (neg X)) == X. */
1049 if (GET_CODE (op) == NEG)
1050 return XEXP (op, 0);
1052 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1053 If comparison is not reversible use
1054 x ? y : (neg y). */
1055 if (GET_CODE (op) == IF_THEN_ELSE)
1057 rtx cond = XEXP (op, 0);
1058 rtx true_rtx = XEXP (op, 1);
1059 rtx false_rtx = XEXP (op, 2);
1061 if ((GET_CODE (true_rtx) == NEG
1062 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1063 || (GET_CODE (false_rtx) == NEG
1064 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1066 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1067 temp = reversed_comparison (cond, mode);
1068 else
1070 temp = cond;
1071 std::swap (true_rtx, false_rtx);
1073 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1074 mode, temp, true_rtx, false_rtx);
1078 /* (neg (plus X 1)) can become (not X). */
1079 if (GET_CODE (op) == PLUS
1080 && XEXP (op, 1) == const1_rtx)
1081 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1083 /* Similarly, (neg (not X)) is (plus X 1). */
1084 if (GET_CODE (op) == NOT)
1085 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1086 CONST1_RTX (mode));
1088 /* (neg (minus X Y)) can become (minus Y X). This transformation
1089 isn't safe for modes with signed zeros, since if X and Y are
1090 both +0, (minus Y X) is the same as (minus X Y). If the
1091 rounding mode is towards +infinity (or -infinity) then the two
1092 expressions will be rounded differently. */
1093 if (GET_CODE (op) == MINUS
1094 && !HONOR_SIGNED_ZEROS (mode)
1095 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1096 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1098 if (GET_CODE (op) == PLUS
1099 && !HONOR_SIGNED_ZEROS (mode)
1100 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1102 /* (neg (plus A C)) is simplified to (minus -C A). */
1103 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1104 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1106 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1107 if (temp)
1108 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1111 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1112 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1113 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1116 /* (neg (mult A B)) becomes (mult A (neg B)).
1117 This works even for floating-point values. */
1118 if (GET_CODE (op) == MULT
1119 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1121 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1122 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1125 /* NEG commutes with ASHIFT since it is multiplication. Only do
1126 this if we can then eliminate the NEG (e.g., if the operand
1127 is a constant). */
1128 if (GET_CODE (op) == ASHIFT)
1130 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1131 if (temp)
1132 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1135 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1136 C is equal to the width of MODE minus 1. */
1137 if (GET_CODE (op) == ASHIFTRT
1138 && CONST_INT_P (XEXP (op, 1))
1139 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1140 return simplify_gen_binary (LSHIFTRT, mode,
1141 XEXP (op, 0), XEXP (op, 1));
1143 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1144 C is equal to the width of MODE minus 1. */
1145 if (GET_CODE (op) == LSHIFTRT
1146 && CONST_INT_P (XEXP (op, 1))
1147 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1148 return simplify_gen_binary (ASHIFTRT, mode,
1149 XEXP (op, 0), XEXP (op, 1));
1151 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1152 if (GET_CODE (op) == XOR
1153 && XEXP (op, 1) == const1_rtx
1154 && nonzero_bits (XEXP (op, 0), mode) == 1)
1155 return plus_constant (mode, XEXP (op, 0), -1);
1157 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1158 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1159 if (GET_CODE (op) == LT
1160 && XEXP (op, 1) == const0_rtx
1161 && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1163 int_mode = as_a <scalar_int_mode> (mode);
1164 int isize = GET_MODE_PRECISION (inner);
1165 if (STORE_FLAG_VALUE == 1)
1167 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1168 GEN_INT (isize - 1));
1169 if (int_mode == inner)
1170 return temp;
1171 if (GET_MODE_PRECISION (int_mode) > isize)
1172 return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1173 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1175 else if (STORE_FLAG_VALUE == -1)
1177 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1178 GEN_INT (isize - 1));
1179 if (int_mode == inner)
1180 return temp;
1181 if (GET_MODE_PRECISION (int_mode) > isize)
1182 return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1183 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1186 break;
1188 case TRUNCATE:
1189 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1190 with the umulXi3_highpart patterns. */
1191 if (GET_CODE (op) == LSHIFTRT
1192 && GET_CODE (XEXP (op, 0)) == MULT)
1193 break;
1195 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1197 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1199 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1200 if (temp)
1201 return temp;
1203 /* We can't handle truncation to a partial integer mode here
1204 because we don't know the real bitsize of the partial
1205 integer mode. */
1206 break;
1209 if (GET_MODE (op) != VOIDmode)
1211 temp = simplify_truncation (mode, op, GET_MODE (op));
1212 if (temp)
1213 return temp;
1216 /* If we know that the value is already truncated, we can
1217 replace the TRUNCATE with a SUBREG. */
1218 if (GET_MODE_NUNITS (mode) == 1
1219 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1220 || truncated_to_mode (mode, op)))
1222 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1223 if (temp)
1224 return temp;
1227 /* A truncate of a comparison can be replaced with a subreg if
1228 STORE_FLAG_VALUE permits. This is like the previous test,
1229 but it works even if the comparison is done in a mode larger
1230 than HOST_BITS_PER_WIDE_INT. */
1231 if (HWI_COMPUTABLE_MODE_P (mode)
1232 && COMPARISON_P (op)
1233 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1235 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1236 if (temp)
1237 return temp;
1240 /* A truncate of a memory is just loading the low part of the memory
1241 if we are not changing the meaning of the address. */
1242 if (GET_CODE (op) == MEM
1243 && !VECTOR_MODE_P (mode)
1244 && !MEM_VOLATILE_P (op)
1245 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1247 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1248 if (temp)
1249 return temp;
1252 break;
1254 case FLOAT_TRUNCATE:
1255 if (DECIMAL_FLOAT_MODE_P (mode))
1256 break;
1258 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1259 if (GET_CODE (op) == FLOAT_EXTEND
1260 && GET_MODE (XEXP (op, 0)) == mode)
1261 return XEXP (op, 0);
1263 /* (float_truncate:SF (float_truncate:DF foo:XF))
1264 = (float_truncate:SF foo:XF).
1265 This may eliminate double rounding, so it is unsafe.
1267 (float_truncate:SF (float_extend:XF foo:DF))
1268 = (float_truncate:SF foo:DF).
1270 (float_truncate:DF (float_extend:XF foo:SF))
1271 = (float_extend:DF foo:SF). */
1272 if ((GET_CODE (op) == FLOAT_TRUNCATE
1273 && flag_unsafe_math_optimizations)
1274 || GET_CODE (op) == FLOAT_EXTEND)
1275 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1276 0)))
1277 > GET_MODE_SIZE (mode)
1278 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1279 mode,
1280 XEXP (op, 0), mode);
1282 /* (float_truncate (float x)) is (float x) */
1283 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1284 && (flag_unsafe_math_optimizations
1285 || exact_int_to_float_conversion_p (op)))
1286 return simplify_gen_unary (GET_CODE (op), mode,
1287 XEXP (op, 0),
1288 GET_MODE (XEXP (op, 0)));
1290 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1291 (OP:SF foo:SF) if OP is NEG or ABS. */
1292 if ((GET_CODE (op) == ABS
1293 || GET_CODE (op) == NEG)
1294 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1295 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1296 return simplify_gen_unary (GET_CODE (op), mode,
1297 XEXP (XEXP (op, 0), 0), mode);
1299 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1300 is (float_truncate:SF x). */
1301 if (GET_CODE (op) == SUBREG
1302 && subreg_lowpart_p (op)
1303 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1304 return SUBREG_REG (op);
1305 break;
1307 case FLOAT_EXTEND:
1308 if (DECIMAL_FLOAT_MODE_P (mode))
1309 break;
1311 /* (float_extend (float_extend x)) is (float_extend x)
1313 (float_extend (float x)) is (float x) assuming that double
1314 rounding can't happen.
1316 if (GET_CODE (op) == FLOAT_EXTEND
1317 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1318 && exact_int_to_float_conversion_p (op)))
1319 return simplify_gen_unary (GET_CODE (op), mode,
1320 XEXP (op, 0),
1321 GET_MODE (XEXP (op, 0)));
1323 break;
1325 case ABS:
1326 /* (abs (neg <foo>)) -> (abs <foo>) */
1327 if (GET_CODE (op) == NEG)
1328 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1329 GET_MODE (XEXP (op, 0)));
1331 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1332 do nothing. */
1333 if (GET_MODE (op) == VOIDmode)
1334 break;
1336 /* If operand is something known to be positive, ignore the ABS. */
1337 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1338 || val_signbit_known_clear_p (GET_MODE (op),
1339 nonzero_bits (op, GET_MODE (op))))
1340 return op;
1342 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1343 if (is_a <scalar_int_mode> (mode, &int_mode)
1344 && (num_sign_bit_copies (op, int_mode)
1345 == GET_MODE_PRECISION (int_mode)))
1346 return gen_rtx_NEG (int_mode, op);
1348 break;
1350 case FFS:
1351 /* (ffs (*_extend <X>)) = (ffs <X>) */
1352 if (GET_CODE (op) == SIGN_EXTEND
1353 || GET_CODE (op) == ZERO_EXTEND)
1354 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1355 GET_MODE (XEXP (op, 0)));
1356 break;
1358 case POPCOUNT:
1359 switch (GET_CODE (op))
1361 case BSWAP:
1362 case ZERO_EXTEND:
1363 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1364 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1365 GET_MODE (XEXP (op, 0)));
1367 case ROTATE:
1368 case ROTATERT:
1369 /* Rotations don't affect popcount. */
1370 if (!side_effects_p (XEXP (op, 1)))
1371 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1372 GET_MODE (XEXP (op, 0)));
1373 break;
1375 default:
1376 break;
1378 break;
1380 case PARITY:
1381 switch (GET_CODE (op))
1383 case NOT:
1384 case BSWAP:
1385 case ZERO_EXTEND:
1386 case SIGN_EXTEND:
1387 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1388 GET_MODE (XEXP (op, 0)));
1390 case ROTATE:
1391 case ROTATERT:
1392 /* Rotations don't affect parity. */
1393 if (!side_effects_p (XEXP (op, 1)))
1394 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1395 GET_MODE (XEXP (op, 0)));
1396 break;
1398 default:
1399 break;
1401 break;
1403 case BSWAP:
1404 /* (bswap (bswap x)) -> x. */
1405 if (GET_CODE (op) == BSWAP)
1406 return XEXP (op, 0);
1407 break;
1409 case FLOAT:
1410 /* (float (sign_extend <X>)) = (float <X>). */
1411 if (GET_CODE (op) == SIGN_EXTEND)
1412 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1413 GET_MODE (XEXP (op, 0)));
1414 break;
1416 case SIGN_EXTEND:
1417 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1418 becomes just the MINUS if its mode is MODE. This allows
1419 folding switch statements on machines using casesi (such as
1420 the VAX). */
1421 if (GET_CODE (op) == TRUNCATE
1422 && GET_MODE (XEXP (op, 0)) == mode
1423 && GET_CODE (XEXP (op, 0)) == MINUS
1424 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1425 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1426 return XEXP (op, 0);
1428 /* Extending a widening multiplication should be canonicalized to
1429 a wider widening multiplication. */
1430 if (GET_CODE (op) == MULT)
1432 rtx lhs = XEXP (op, 0);
1433 rtx rhs = XEXP (op, 1);
1434 enum rtx_code lcode = GET_CODE (lhs);
1435 enum rtx_code rcode = GET_CODE (rhs);
1437 /* Widening multiplies usually extend both operands, but sometimes
1438 they use a shift to extract a portion of a register. */
1439 if ((lcode == SIGN_EXTEND
1440 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1441 && (rcode == SIGN_EXTEND
1442 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1444 machine_mode lmode = GET_MODE (lhs);
1445 machine_mode rmode = GET_MODE (rhs);
1446 int bits;
1448 if (lcode == ASHIFTRT)
1449 /* Number of bits not shifted off the end. */
1450 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1451 else /* lcode == SIGN_EXTEND */
1452 /* Size of inner mode. */
1453 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1455 if (rcode == ASHIFTRT)
1456 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1457 else /* rcode == SIGN_EXTEND */
1458 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1460 /* We can only widen multiplies if the result is mathematiclly
1461 equivalent. I.e. if overflow was impossible. */
1462 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1463 return simplify_gen_binary
1464 (MULT, mode,
1465 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1466 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1470 /* Check for a sign extension of a subreg of a promoted
1471 variable, where the promotion is sign-extended, and the
1472 target mode is the same as the variable's promotion. */
1473 if (GET_CODE (op) == SUBREG
1474 && SUBREG_PROMOTED_VAR_P (op)
1475 && SUBREG_PROMOTED_SIGNED_P (op)
1476 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1478 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1479 if (temp)
1480 return temp;
1483 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1484 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1485 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1487 gcc_assert (GET_MODE_PRECISION (mode)
1488 > GET_MODE_PRECISION (GET_MODE (op)));
1489 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1490 GET_MODE (XEXP (op, 0)));
1493 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1494 is (sign_extend:M (subreg:O <X>)) if there is mode with
1495 GET_MODE_BITSIZE (N) - I bits.
1496 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1497 is similarly (zero_extend:M (subreg:O <X>)). */
1498 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1499 && GET_CODE (XEXP (op, 0)) == ASHIFT
1500 && is_a <scalar_int_mode> (mode, &int_mode)
1501 && CONST_INT_P (XEXP (op, 1))
1502 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1503 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1504 GET_MODE_BITSIZE (op_mode) > INTVAL (XEXP (op, 1))))
1506 scalar_int_mode tmode;
1507 gcc_assert (GET_MODE_BITSIZE (int_mode)
1508 > GET_MODE_BITSIZE (op_mode));
1509 if (int_mode_for_size (GET_MODE_BITSIZE (op_mode)
1510 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1512 rtx inner =
1513 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1514 if (inner)
1515 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1516 ? SIGN_EXTEND : ZERO_EXTEND,
1517 int_mode, inner, tmode);
1521 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1522 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1523 if (GET_CODE (op) == LSHIFTRT
1524 && CONST_INT_P (XEXP (op, 1))
1525 && XEXP (op, 1) != const0_rtx)
1526 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1528 #if defined(POINTERS_EXTEND_UNSIGNED)
1529 /* As we do not know which address space the pointer is referring to,
1530 we can do this only if the target does not support different pointer
1531 or address modes depending on the address space. */
1532 if (target_default_pointer_address_modes_p ()
1533 && ! POINTERS_EXTEND_UNSIGNED
1534 && mode == Pmode && GET_MODE (op) == ptr_mode
1535 && (CONSTANT_P (op)
1536 || (GET_CODE (op) == SUBREG
1537 && REG_P (SUBREG_REG (op))
1538 && REG_POINTER (SUBREG_REG (op))
1539 && GET_MODE (SUBREG_REG (op)) == Pmode))
1540 && !targetm.have_ptr_extend ())
1542 temp
1543 = convert_memory_address_addr_space_1 (Pmode, op,
1544 ADDR_SPACE_GENERIC, false,
1545 true);
1546 if (temp)
1547 return temp;
1549 #endif
1550 break;
1552 case ZERO_EXTEND:
1553 /* Check for a zero extension of a subreg of a promoted
1554 variable, where the promotion is zero-extended, and the
1555 target mode is the same as the variable's promotion. */
1556 if (GET_CODE (op) == SUBREG
1557 && SUBREG_PROMOTED_VAR_P (op)
1558 && SUBREG_PROMOTED_UNSIGNED_P (op)
1559 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1561 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1562 if (temp)
1563 return temp;
1566 /* Extending a widening multiplication should be canonicalized to
1567 a wider widening multiplication. */
1568 if (GET_CODE (op) == MULT)
1570 rtx lhs = XEXP (op, 0);
1571 rtx rhs = XEXP (op, 1);
1572 enum rtx_code lcode = GET_CODE (lhs);
1573 enum rtx_code rcode = GET_CODE (rhs);
1575 /* Widening multiplies usually extend both operands, but sometimes
1576 they use a shift to extract a portion of a register. */
1577 if ((lcode == ZERO_EXTEND
1578 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1579 && (rcode == ZERO_EXTEND
1580 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1582 machine_mode lmode = GET_MODE (lhs);
1583 machine_mode rmode = GET_MODE (rhs);
1584 int bits;
1586 if (lcode == LSHIFTRT)
1587 /* Number of bits not shifted off the end. */
1588 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1589 else /* lcode == ZERO_EXTEND */
1590 /* Size of inner mode. */
1591 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1593 if (rcode == LSHIFTRT)
1594 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1595 else /* rcode == ZERO_EXTEND */
1596 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1598 /* We can only widen multiplies if the result is mathematiclly
1599 equivalent. I.e. if overflow was impossible. */
1600 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1601 return simplify_gen_binary
1602 (MULT, mode,
1603 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1604 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1608 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1609 if (GET_CODE (op) == ZERO_EXTEND)
1610 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1611 GET_MODE (XEXP (op, 0)));
1613 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1614 is (zero_extend:M (subreg:O <X>)) if there is mode with
1615 GET_MODE_PRECISION (N) - I bits. */
1616 if (GET_CODE (op) == LSHIFTRT
1617 && GET_CODE (XEXP (op, 0)) == ASHIFT
1618 && is_a <scalar_int_mode> (mode, &int_mode)
1619 && CONST_INT_P (XEXP (op, 1))
1620 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1621 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1622 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1624 scalar_int_mode tmode;
1625 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1626 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1628 rtx inner =
1629 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1630 if (inner)
1631 return simplify_gen_unary (ZERO_EXTEND, int_mode,
1632 inner, tmode);
1636 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1637 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1638 of mode N. E.g.
1639 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1640 (and:SI (reg:SI) (const_int 63)). */
1641 if (partial_subreg_p (op)
1642 && is_a <scalar_int_mode> (mode, &int_mode)
1643 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1644 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1645 && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1646 && subreg_lowpart_p (op)
1647 && (nonzero_bits (SUBREG_REG (op), op0_mode)
1648 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1650 if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1651 return SUBREG_REG (op);
1652 return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1653 op0_mode);
1656 #if defined(POINTERS_EXTEND_UNSIGNED)
1657 /* As we do not know which address space the pointer is referring to,
1658 we can do this only if the target does not support different pointer
1659 or address modes depending on the address space. */
1660 if (target_default_pointer_address_modes_p ()
1661 && POINTERS_EXTEND_UNSIGNED > 0
1662 && mode == Pmode && GET_MODE (op) == ptr_mode
1663 && (CONSTANT_P (op)
1664 || (GET_CODE (op) == SUBREG
1665 && REG_P (SUBREG_REG (op))
1666 && REG_POINTER (SUBREG_REG (op))
1667 && GET_MODE (SUBREG_REG (op)) == Pmode))
1668 && !targetm.have_ptr_extend ())
1670 temp
1671 = convert_memory_address_addr_space_1 (Pmode, op,
1672 ADDR_SPACE_GENERIC, false,
1673 true);
1674 if (temp)
1675 return temp;
1677 #endif
1678 break;
1680 default:
1681 break;
1684 return 0;
1687 /* Try to compute the value of a unary operation CODE whose output mode is to
1688 be MODE with input operand OP whose mode was originally OP_MODE.
1689 Return zero if the value cannot be computed. */
1691 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1692 rtx op, machine_mode op_mode)
1694 scalar_int_mode result_mode;
1696 if (code == VEC_DUPLICATE)
1698 gcc_assert (VECTOR_MODE_P (mode));
1699 if (GET_MODE (op) != VOIDmode)
1701 if (!VECTOR_MODE_P (GET_MODE (op)))
1702 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1703 else
1704 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1705 (GET_MODE (op)));
1707 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1708 || GET_CODE (op) == CONST_VECTOR)
1710 int elt_size = GET_MODE_UNIT_SIZE (mode);
1711 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1712 rtvec v = rtvec_alloc (n_elts);
1713 unsigned int i;
1715 if (GET_CODE (op) != CONST_VECTOR)
1716 for (i = 0; i < n_elts; i++)
1717 RTVEC_ELT (v, i) = op;
1718 else
1720 machine_mode inmode = GET_MODE (op);
1721 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1722 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1724 gcc_assert (in_n_elts < n_elts);
1725 gcc_assert ((n_elts % in_n_elts) == 0);
1726 for (i = 0; i < n_elts; i++)
1727 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1729 return gen_rtx_CONST_VECTOR (mode, v);
1733 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1735 int elt_size = GET_MODE_UNIT_SIZE (mode);
1736 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1737 machine_mode opmode = GET_MODE (op);
1738 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1739 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1740 rtvec v = rtvec_alloc (n_elts);
1741 unsigned int i;
1743 gcc_assert (op_n_elts == n_elts);
1744 for (i = 0; i < n_elts; i++)
1746 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1747 CONST_VECTOR_ELT (op, i),
1748 GET_MODE_INNER (opmode));
1749 if (!x)
1750 return 0;
1751 RTVEC_ELT (v, i) = x;
1753 return gen_rtx_CONST_VECTOR (mode, v);
1756 /* The order of these tests is critical so that, for example, we don't
1757 check the wrong mode (input vs. output) for a conversion operation,
1758 such as FIX. At some point, this should be simplified. */
1760 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1762 REAL_VALUE_TYPE d;
1764 if (op_mode == VOIDmode)
1766 /* CONST_INT have VOIDmode as the mode. We assume that all
1767 the bits of the constant are significant, though, this is
1768 a dangerous assumption as many times CONST_INTs are
1769 created and used with garbage in the bits outside of the
1770 precision of the implied mode of the const_int. */
1771 op_mode = MAX_MODE_INT;
1774 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1776 /* Avoid the folding if flag_signaling_nans is on and
1777 operand is a signaling NaN. */
1778 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1779 return 0;
1781 d = real_value_truncate (mode, d);
1782 return const_double_from_real_value (d, mode);
1784 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1786 REAL_VALUE_TYPE d;
1788 if (op_mode == VOIDmode)
1790 /* CONST_INT have VOIDmode as the mode. We assume that all
1791 the bits of the constant are significant, though, this is
1792 a dangerous assumption as many times CONST_INTs are
1793 created and used with garbage in the bits outside of the
1794 precision of the implied mode of the const_int. */
1795 op_mode = MAX_MODE_INT;
1798 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1800 /* Avoid the folding if flag_signaling_nans is on and
1801 operand is a signaling NaN. */
1802 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1803 return 0;
1805 d = real_value_truncate (mode, d);
1806 return const_double_from_real_value (d, mode);
1809 if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1811 unsigned int width = GET_MODE_PRECISION (result_mode);
1812 wide_int result;
1813 scalar_int_mode imode = (op_mode == VOIDmode
1814 ? result_mode
1815 : as_a <scalar_int_mode> (op_mode));
1816 rtx_mode_t op0 = rtx_mode_t (op, imode);
1817 int int_value;
1819 #if TARGET_SUPPORTS_WIDE_INT == 0
1820 /* This assert keeps the simplification from producing a result
1821 that cannot be represented in a CONST_DOUBLE but a lot of
1822 upstream callers expect that this function never fails to
1823 simplify something and so you if you added this to the test
1824 above the code would die later anyway. If this assert
1825 happens, you just need to make the port support wide int. */
1826 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1827 #endif
1829 switch (code)
1831 case NOT:
1832 result = wi::bit_not (op0);
1833 break;
1835 case NEG:
1836 result = wi::neg (op0);
1837 break;
1839 case ABS:
1840 result = wi::abs (op0);
1841 break;
1843 case FFS:
1844 result = wi::shwi (wi::ffs (op0), result_mode);
1845 break;
1847 case CLZ:
1848 if (wi::ne_p (op0, 0))
1849 int_value = wi::clz (op0);
1850 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1851 int_value = GET_MODE_PRECISION (imode);
1852 result = wi::shwi (int_value, result_mode);
1853 break;
1855 case CLRSB:
1856 result = wi::shwi (wi::clrsb (op0), result_mode);
1857 break;
1859 case CTZ:
1860 if (wi::ne_p (op0, 0))
1861 int_value = wi::ctz (op0);
1862 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1863 int_value = GET_MODE_PRECISION (imode);
1864 result = wi::shwi (int_value, result_mode);
1865 break;
1867 case POPCOUNT:
1868 result = wi::shwi (wi::popcount (op0), result_mode);
1869 break;
1871 case PARITY:
1872 result = wi::shwi (wi::parity (op0), result_mode);
1873 break;
1875 case BSWAP:
1876 result = wide_int (op0).bswap ();
1877 break;
1879 case TRUNCATE:
1880 case ZERO_EXTEND:
1881 result = wide_int::from (op0, width, UNSIGNED);
1882 break;
1884 case SIGN_EXTEND:
1885 result = wide_int::from (op0, width, SIGNED);
1886 break;
1888 case SQRT:
1889 default:
1890 return 0;
1893 return immed_wide_int_const (result, result_mode);
1896 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1897 && SCALAR_FLOAT_MODE_P (mode)
1898 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1900 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1901 switch (code)
1903 case SQRT:
1904 return 0;
1905 case ABS:
1906 d = real_value_abs (&d);
1907 break;
1908 case NEG:
1909 d = real_value_negate (&d);
1910 break;
1911 case FLOAT_TRUNCATE:
1912 /* Don't perform the operation if flag_signaling_nans is on
1913 and the operand is a signaling NaN. */
1914 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1915 return NULL_RTX;
1916 d = real_value_truncate (mode, d);
1917 break;
1918 case FLOAT_EXTEND:
1919 /* Don't perform the operation if flag_signaling_nans is on
1920 and the operand is a signaling NaN. */
1921 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1922 return NULL_RTX;
1923 /* All this does is change the mode, unless changing
1924 mode class. */
1925 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1926 real_convert (&d, mode, &d);
1927 break;
1928 case FIX:
1929 /* Don't perform the operation if flag_signaling_nans is on
1930 and the operand is a signaling NaN. */
1931 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1932 return NULL_RTX;
1933 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1934 break;
1935 case NOT:
1937 long tmp[4];
1938 int i;
1940 real_to_target (tmp, &d, GET_MODE (op));
1941 for (i = 0; i < 4; i++)
1942 tmp[i] = ~tmp[i];
1943 real_from_target (&d, tmp, mode);
1944 break;
1946 default:
1947 gcc_unreachable ();
1949 return const_double_from_real_value (d, mode);
1951 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1952 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1953 && is_int_mode (mode, &result_mode))
1955 unsigned int width = GET_MODE_PRECISION (result_mode);
1956 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1957 operators are intentionally left unspecified (to ease implementation
1958 by target backends), for consistency, this routine implements the
1959 same semantics for constant folding as used by the middle-end. */
1961 /* This was formerly used only for non-IEEE float.
1962 eggert@twinsun.com says it is safe for IEEE also. */
1963 REAL_VALUE_TYPE t;
1964 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1965 wide_int wmax, wmin;
1966 /* This is part of the abi to real_to_integer, but we check
1967 things before making this call. */
1968 bool fail;
1970 switch (code)
1972 case FIX:
1973 if (REAL_VALUE_ISNAN (*x))
1974 return const0_rtx;
1976 /* Test against the signed upper bound. */
1977 wmax = wi::max_value (width, SIGNED);
1978 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1979 if (real_less (&t, x))
1980 return immed_wide_int_const (wmax, mode);
1982 /* Test against the signed lower bound. */
1983 wmin = wi::min_value (width, SIGNED);
1984 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1985 if (real_less (x, &t))
1986 return immed_wide_int_const (wmin, mode);
1988 return immed_wide_int_const (real_to_integer (x, &fail, width),
1989 mode);
1991 case UNSIGNED_FIX:
1992 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1993 return const0_rtx;
1995 /* Test against the unsigned upper bound. */
1996 wmax = wi::max_value (width, UNSIGNED);
1997 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1998 if (real_less (&t, x))
1999 return immed_wide_int_const (wmax, mode);
2001 return immed_wide_int_const (real_to_integer (x, &fail, width),
2002 mode);
2004 default:
2005 gcc_unreachable ();
2009 return NULL_RTX;
2012 /* Subroutine of simplify_binary_operation to simplify a binary operation
2013 CODE that can commute with byte swapping, with result mode MODE and
2014 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2015 Return zero if no simplification or canonicalization is possible. */
2017 static rtx
2018 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
2019 rtx op0, rtx op1)
2021 rtx tem;
2023 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2024 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2026 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2027 simplify_gen_unary (BSWAP, mode, op1, mode));
2028 return simplify_gen_unary (BSWAP, mode, tem, mode);
2031 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2032 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2034 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2035 return simplify_gen_unary (BSWAP, mode, tem, mode);
2038 return NULL_RTX;
2041 /* Subroutine of simplify_binary_operation to simplify a commutative,
2042 associative binary operation CODE with result mode MODE, operating
2043 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2044 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2045 canonicalization is possible. */
2047 static rtx
2048 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2049 rtx op0, rtx op1)
2051 rtx tem;
2053 /* Linearize the operator to the left. */
2054 if (GET_CODE (op1) == code)
2056 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2057 if (GET_CODE (op0) == code)
2059 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2060 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2063 /* "a op (b op c)" becomes "(b op c) op a". */
2064 if (! swap_commutative_operands_p (op1, op0))
2065 return simplify_gen_binary (code, mode, op1, op0);
2067 std::swap (op0, op1);
2070 if (GET_CODE (op0) == code)
2072 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2073 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2075 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2076 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2079 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2080 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2081 if (tem != 0)
2082 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2084 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2085 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2086 if (tem != 0)
2087 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2090 return 0;
2094 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2095 and OP1. Return 0 if no simplification is possible.
2097 Don't use this for relational operations such as EQ or LT.
2098 Use simplify_relational_operation instead. */
2100 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2101 rtx op0, rtx op1)
2103 rtx trueop0, trueop1;
2104 rtx tem;
2106 /* Relational operations don't work here. We must know the mode
2107 of the operands in order to do the comparison correctly.
2108 Assuming a full word can give incorrect results.
2109 Consider comparing 128 with -128 in QImode. */
2110 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2111 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2113 /* Make sure the constant is second. */
2114 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2115 && swap_commutative_operands_p (op0, op1))
2116 std::swap (op0, op1);
2118 trueop0 = avoid_constant_pool_reference (op0);
2119 trueop1 = avoid_constant_pool_reference (op1);
2121 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2122 if (tem)
2123 return tem;
2124 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2126 if (tem)
2127 return tem;
2129 /* If the above steps did not result in a simplification and op0 or op1
2130 were constant pool references, use the referenced constants directly. */
2131 if (trueop0 != op0 || trueop1 != op1)
2132 return simplify_gen_binary (code, mode, trueop0, trueop1);
2134 return NULL_RTX;
2137 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2138 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2139 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2140 actual constants. */
2142 static rtx
2143 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2144 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2146 rtx tem, reversed, opleft, opright;
2147 HOST_WIDE_INT val;
2148 unsigned int width = GET_MODE_PRECISION (mode);
2149 scalar_int_mode int_mode, inner_mode;
2151 /* Even if we can't compute a constant result,
2152 there are some cases worth simplifying. */
2154 switch (code)
2156 case PLUS:
2157 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2158 when x is NaN, infinite, or finite and nonzero. They aren't
2159 when x is -0 and the rounding mode is not towards -infinity,
2160 since (-0) + 0 is then 0. */
2161 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2162 return op0;
2164 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2165 transformations are safe even for IEEE. */
2166 if (GET_CODE (op0) == NEG)
2167 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2168 else if (GET_CODE (op1) == NEG)
2169 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2171 /* (~a) + 1 -> -a */
2172 if (INTEGRAL_MODE_P (mode)
2173 && GET_CODE (op0) == NOT
2174 && trueop1 == const1_rtx)
2175 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2177 /* Handle both-operands-constant cases. We can only add
2178 CONST_INTs to constants since the sum of relocatable symbols
2179 can't be handled by most assemblers. Don't add CONST_INT
2180 to CONST_INT since overflow won't be computed properly if wider
2181 than HOST_BITS_PER_WIDE_INT. */
2183 if ((GET_CODE (op0) == CONST
2184 || GET_CODE (op0) == SYMBOL_REF
2185 || GET_CODE (op0) == LABEL_REF)
2186 && CONST_INT_P (op1))
2187 return plus_constant (mode, op0, INTVAL (op1));
2188 else if ((GET_CODE (op1) == CONST
2189 || GET_CODE (op1) == SYMBOL_REF
2190 || GET_CODE (op1) == LABEL_REF)
2191 && CONST_INT_P (op0))
2192 return plus_constant (mode, op1, INTVAL (op0));
2194 /* See if this is something like X * C - X or vice versa or
2195 if the multiplication is written as a shift. If so, we can
2196 distribute and make a new multiply, shift, or maybe just
2197 have X (if C is 2 in the example above). But don't make
2198 something more expensive than we had before. */
2200 if (is_a <scalar_int_mode> (mode, &int_mode))
2202 rtx lhs = op0, rhs = op1;
2204 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2205 wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2207 if (GET_CODE (lhs) == NEG)
2209 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2210 lhs = XEXP (lhs, 0);
2212 else if (GET_CODE (lhs) == MULT
2213 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2215 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2216 lhs = XEXP (lhs, 0);
2218 else if (GET_CODE (lhs) == ASHIFT
2219 && CONST_INT_P (XEXP (lhs, 1))
2220 && INTVAL (XEXP (lhs, 1)) >= 0
2221 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2223 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2224 GET_MODE_PRECISION (int_mode));
2225 lhs = XEXP (lhs, 0);
2228 if (GET_CODE (rhs) == NEG)
2230 coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2231 rhs = XEXP (rhs, 0);
2233 else if (GET_CODE (rhs) == MULT
2234 && CONST_INT_P (XEXP (rhs, 1)))
2236 coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2237 rhs = XEXP (rhs, 0);
2239 else if (GET_CODE (rhs) == ASHIFT
2240 && CONST_INT_P (XEXP (rhs, 1))
2241 && INTVAL (XEXP (rhs, 1)) >= 0
2242 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2244 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2245 GET_MODE_PRECISION (int_mode));
2246 rhs = XEXP (rhs, 0);
2249 if (rtx_equal_p (lhs, rhs))
2251 rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2252 rtx coeff;
2253 bool speed = optimize_function_for_speed_p (cfun);
2255 coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2257 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2258 return (set_src_cost (tem, int_mode, speed)
2259 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2263 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2264 if (CONST_SCALAR_INT_P (op1)
2265 && GET_CODE (op0) == XOR
2266 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2267 && mode_signbit_p (mode, op1))
2268 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2269 simplify_gen_binary (XOR, mode, op1,
2270 XEXP (op0, 1)));
2272 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2273 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2274 && GET_CODE (op0) == MULT
2275 && GET_CODE (XEXP (op0, 0)) == NEG)
2277 rtx in1, in2;
2279 in1 = XEXP (XEXP (op0, 0), 0);
2280 in2 = XEXP (op0, 1);
2281 return simplify_gen_binary (MINUS, mode, op1,
2282 simplify_gen_binary (MULT, mode,
2283 in1, in2));
2286 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2287 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2288 is 1. */
2289 if (COMPARISON_P (op0)
2290 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2291 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2292 && (reversed = reversed_comparison (op0, mode)))
2293 return
2294 simplify_gen_unary (NEG, mode, reversed, mode);
2296 /* If one of the operands is a PLUS or a MINUS, see if we can
2297 simplify this by the associative law.
2298 Don't use the associative law for floating point.
2299 The inaccuracy makes it nonassociative,
2300 and subtle programs can break if operations are associated. */
2302 if (INTEGRAL_MODE_P (mode)
2303 && (plus_minus_operand_p (op0)
2304 || plus_minus_operand_p (op1))
2305 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2306 return tem;
2308 /* Reassociate floating point addition only when the user
2309 specifies associative math operations. */
2310 if (FLOAT_MODE_P (mode)
2311 && flag_associative_math)
2313 tem = simplify_associative_operation (code, mode, op0, op1);
2314 if (tem)
2315 return tem;
2317 break;
2319 case COMPARE:
2320 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2321 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2322 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2323 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2325 rtx xop00 = XEXP (op0, 0);
2326 rtx xop10 = XEXP (op1, 0);
2328 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2329 return xop00;
2331 if (REG_P (xop00) && REG_P (xop10)
2332 && REGNO (xop00) == REGNO (xop10)
2333 && GET_MODE (xop00) == mode
2334 && GET_MODE (xop10) == mode
2335 && GET_MODE_CLASS (mode) == MODE_CC)
2336 return xop00;
2338 break;
2340 case MINUS:
2341 /* We can't assume x-x is 0 even with non-IEEE floating point,
2342 but since it is zero except in very strange circumstances, we
2343 will treat it as zero with -ffinite-math-only. */
2344 if (rtx_equal_p (trueop0, trueop1)
2345 && ! side_effects_p (op0)
2346 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2347 return CONST0_RTX (mode);
2349 /* Change subtraction from zero into negation. (0 - x) is the
2350 same as -x when x is NaN, infinite, or finite and nonzero.
2351 But if the mode has signed zeros, and does not round towards
2352 -infinity, then 0 - 0 is 0, not -0. */
2353 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2354 return simplify_gen_unary (NEG, mode, op1, mode);
2356 /* (-1 - a) is ~a, unless the expression contains symbolic
2357 constants, in which case not retaining additions and
2358 subtractions could cause invalid assembly to be produced. */
2359 if (trueop0 == constm1_rtx
2360 && !contains_symbolic_reference_p (op1))
2361 return simplify_gen_unary (NOT, mode, op1, mode);
2363 /* Subtracting 0 has no effect unless the mode has signed zeros
2364 and supports rounding towards -infinity. In such a case,
2365 0 - 0 is -0. */
2366 if (!(HONOR_SIGNED_ZEROS (mode)
2367 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2368 && trueop1 == CONST0_RTX (mode))
2369 return op0;
2371 /* See if this is something like X * C - X or vice versa or
2372 if the multiplication is written as a shift. If so, we can
2373 distribute and make a new multiply, shift, or maybe just
2374 have X (if C is 2 in the example above). But don't make
2375 something more expensive than we had before. */
2377 if (is_a <scalar_int_mode> (mode, &int_mode))
2379 rtx lhs = op0, rhs = op1;
2381 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2382 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2384 if (GET_CODE (lhs) == NEG)
2386 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2387 lhs = XEXP (lhs, 0);
2389 else if (GET_CODE (lhs) == MULT
2390 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2392 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2393 lhs = XEXP (lhs, 0);
2395 else if (GET_CODE (lhs) == ASHIFT
2396 && CONST_INT_P (XEXP (lhs, 1))
2397 && INTVAL (XEXP (lhs, 1)) >= 0
2398 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2400 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2401 GET_MODE_PRECISION (int_mode));
2402 lhs = XEXP (lhs, 0);
2405 if (GET_CODE (rhs) == NEG)
2407 negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2408 rhs = XEXP (rhs, 0);
2410 else if (GET_CODE (rhs) == MULT
2411 && CONST_INT_P (XEXP (rhs, 1)))
2413 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2414 rhs = XEXP (rhs, 0);
2416 else if (GET_CODE (rhs) == ASHIFT
2417 && CONST_INT_P (XEXP (rhs, 1))
2418 && INTVAL (XEXP (rhs, 1)) >= 0
2419 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2421 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2422 GET_MODE_PRECISION (int_mode));
2423 negcoeff1 = -negcoeff1;
2424 rhs = XEXP (rhs, 0);
2427 if (rtx_equal_p (lhs, rhs))
2429 rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
2430 rtx coeff;
2431 bool speed = optimize_function_for_speed_p (cfun);
2433 coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
2435 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2436 return (set_src_cost (tem, int_mode, speed)
2437 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2441 /* (a - (-b)) -> (a + b). True even for IEEE. */
2442 if (GET_CODE (op1) == NEG)
2443 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2445 /* (-x - c) may be simplified as (-c - x). */
2446 if (GET_CODE (op0) == NEG
2447 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2449 tem = simplify_unary_operation (NEG, mode, op1, mode);
2450 if (tem)
2451 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2454 /* Don't let a relocatable value get a negative coeff. */
2455 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2456 return simplify_gen_binary (PLUS, mode,
2457 op0,
2458 neg_const_int (mode, op1));
2460 /* (x - (x & y)) -> (x & ~y) */
2461 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2463 if (rtx_equal_p (op0, XEXP (op1, 0)))
2465 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2466 GET_MODE (XEXP (op1, 1)));
2467 return simplify_gen_binary (AND, mode, op0, tem);
2469 if (rtx_equal_p (op0, XEXP (op1, 1)))
2471 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2472 GET_MODE (XEXP (op1, 0)));
2473 return simplify_gen_binary (AND, mode, op0, tem);
2477 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2478 by reversing the comparison code if valid. */
2479 if (STORE_FLAG_VALUE == 1
2480 && trueop0 == const1_rtx
2481 && COMPARISON_P (op1)
2482 && (reversed = reversed_comparison (op1, mode)))
2483 return reversed;
2485 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2486 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2487 && GET_CODE (op1) == MULT
2488 && GET_CODE (XEXP (op1, 0)) == NEG)
2490 rtx in1, in2;
2492 in1 = XEXP (XEXP (op1, 0), 0);
2493 in2 = XEXP (op1, 1);
2494 return simplify_gen_binary (PLUS, mode,
2495 simplify_gen_binary (MULT, mode,
2496 in1, in2),
2497 op0);
2500 /* Canonicalize (minus (neg A) (mult B C)) to
2501 (minus (mult (neg B) C) A). */
2502 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2503 && GET_CODE (op1) == MULT
2504 && GET_CODE (op0) == NEG)
2506 rtx in1, in2;
2508 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2509 in2 = XEXP (op1, 1);
2510 return simplify_gen_binary (MINUS, mode,
2511 simplify_gen_binary (MULT, mode,
2512 in1, in2),
2513 XEXP (op0, 0));
2516 /* If one of the operands is a PLUS or a MINUS, see if we can
2517 simplify this by the associative law. This will, for example,
2518 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2519 Don't use the associative law for floating point.
2520 The inaccuracy makes it nonassociative,
2521 and subtle programs can break if operations are associated. */
2523 if (INTEGRAL_MODE_P (mode)
2524 && (plus_minus_operand_p (op0)
2525 || plus_minus_operand_p (op1))
2526 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2527 return tem;
2528 break;
2530 case MULT:
2531 if (trueop1 == constm1_rtx)
2532 return simplify_gen_unary (NEG, mode, op0, mode);
2534 if (GET_CODE (op0) == NEG)
2536 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2537 /* If op1 is a MULT as well and simplify_unary_operation
2538 just moved the NEG to the second operand, simplify_gen_binary
2539 below could through simplify_associative_operation move
2540 the NEG around again and recurse endlessly. */
2541 if (temp
2542 && GET_CODE (op1) == MULT
2543 && GET_CODE (temp) == MULT
2544 && XEXP (op1, 0) == XEXP (temp, 0)
2545 && GET_CODE (XEXP (temp, 1)) == NEG
2546 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2547 temp = NULL_RTX;
2548 if (temp)
2549 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2551 if (GET_CODE (op1) == NEG)
2553 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2554 /* If op0 is a MULT as well and simplify_unary_operation
2555 just moved the NEG to the second operand, simplify_gen_binary
2556 below could through simplify_associative_operation move
2557 the NEG around again and recurse endlessly. */
2558 if (temp
2559 && GET_CODE (op0) == MULT
2560 && GET_CODE (temp) == MULT
2561 && XEXP (op0, 0) == XEXP (temp, 0)
2562 && GET_CODE (XEXP (temp, 1)) == NEG
2563 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2564 temp = NULL_RTX;
2565 if (temp)
2566 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2569 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2570 x is NaN, since x * 0 is then also NaN. Nor is it valid
2571 when the mode has signed zeros, since multiplying a negative
2572 number by 0 will give -0, not 0. */
2573 if (!HONOR_NANS (mode)
2574 && !HONOR_SIGNED_ZEROS (mode)
2575 && trueop1 == CONST0_RTX (mode)
2576 && ! side_effects_p (op0))
2577 return op1;
2579 /* In IEEE floating point, x*1 is not equivalent to x for
2580 signalling NaNs. */
2581 if (!HONOR_SNANS (mode)
2582 && trueop1 == CONST1_RTX (mode))
2583 return op0;
2585 /* Convert multiply by constant power of two into shift. */
2586 if (CONST_SCALAR_INT_P (trueop1))
2588 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2589 if (val >= 0)
2590 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2593 /* x*2 is x+x and x*(-1) is -x */
2594 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2595 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2596 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2597 && GET_MODE (op0) == mode)
2599 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2601 if (real_equal (d1, &dconst2))
2602 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2604 if (!HONOR_SNANS (mode)
2605 && real_equal (d1, &dconstm1))
2606 return simplify_gen_unary (NEG, mode, op0, mode);
2609 /* Optimize -x * -x as x * x. */
2610 if (FLOAT_MODE_P (mode)
2611 && GET_CODE (op0) == NEG
2612 && GET_CODE (op1) == NEG
2613 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2614 && !side_effects_p (XEXP (op0, 0)))
2615 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2617 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2618 if (SCALAR_FLOAT_MODE_P (mode)
2619 && GET_CODE (op0) == ABS
2620 && GET_CODE (op1) == ABS
2621 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2622 && !side_effects_p (XEXP (op0, 0)))
2623 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2625 /* Reassociate multiplication, but for floating point MULTs
2626 only when the user specifies unsafe math optimizations. */
2627 if (! FLOAT_MODE_P (mode)
2628 || flag_unsafe_math_optimizations)
2630 tem = simplify_associative_operation (code, mode, op0, op1);
2631 if (tem)
2632 return tem;
2634 break;
2636 case IOR:
2637 if (trueop1 == CONST0_RTX (mode))
2638 return op0;
2639 if (INTEGRAL_MODE_P (mode)
2640 && trueop1 == CONSTM1_RTX (mode)
2641 && !side_effects_p (op0))
2642 return op1;
2643 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2644 return op0;
2645 /* A | (~A) -> -1 */
2646 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2647 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2648 && ! side_effects_p (op0)
2649 && SCALAR_INT_MODE_P (mode))
2650 return constm1_rtx;
2652 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2653 if (CONST_INT_P (op1)
2654 && HWI_COMPUTABLE_MODE_P (mode)
2655 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2656 && !side_effects_p (op0))
2657 return op1;
2659 /* Canonicalize (X & C1) | C2. */
2660 if (GET_CODE (op0) == AND
2661 && CONST_INT_P (trueop1)
2662 && CONST_INT_P (XEXP (op0, 1)))
2664 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2665 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2666 HOST_WIDE_INT c2 = INTVAL (trueop1);
2668 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2669 if ((c1 & c2) == c1
2670 && !side_effects_p (XEXP (op0, 0)))
2671 return trueop1;
2673 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2674 if (((c1|c2) & mask) == mask)
2675 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2678 /* Convert (A & B) | A to A. */
2679 if (GET_CODE (op0) == AND
2680 && (rtx_equal_p (XEXP (op0, 0), op1)
2681 || rtx_equal_p (XEXP (op0, 1), op1))
2682 && ! side_effects_p (XEXP (op0, 0))
2683 && ! side_effects_p (XEXP (op0, 1)))
2684 return op1;
2686 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2687 mode size to (rotate A CX). */
2689 if (GET_CODE (op1) == ASHIFT
2690 || GET_CODE (op1) == SUBREG)
2692 opleft = op1;
2693 opright = op0;
2695 else
2697 opright = op1;
2698 opleft = op0;
2701 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2702 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2703 && CONST_INT_P (XEXP (opleft, 1))
2704 && CONST_INT_P (XEXP (opright, 1))
2705 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2706 == GET_MODE_PRECISION (mode)))
2707 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2709 /* Same, but for ashift that has been "simplified" to a wider mode
2710 by simplify_shift_const. */
2712 if (GET_CODE (opleft) == SUBREG
2713 && is_a <scalar_int_mode> (mode, &int_mode)
2714 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
2715 &inner_mode)
2716 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2717 && GET_CODE (opright) == LSHIFTRT
2718 && GET_CODE (XEXP (opright, 0)) == SUBREG
2719 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2720 && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
2721 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2722 SUBREG_REG (XEXP (opright, 0)))
2723 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2724 && CONST_INT_P (XEXP (opright, 1))
2725 && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
2726 + INTVAL (XEXP (opright, 1))
2727 == GET_MODE_PRECISION (int_mode)))
2728 return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
2729 XEXP (SUBREG_REG (opleft), 1));
2731 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2732 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2733 the PLUS does not affect any of the bits in OP1: then we can do
2734 the IOR as a PLUS and we can associate. This is valid if OP1
2735 can be safely shifted left C bits. */
2736 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2737 && GET_CODE (XEXP (op0, 0)) == PLUS
2738 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2739 && CONST_INT_P (XEXP (op0, 1))
2740 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2742 int count = INTVAL (XEXP (op0, 1));
2743 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2745 if (mask >> count == INTVAL (trueop1)
2746 && trunc_int_for_mode (mask, mode) == mask
2747 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2748 return simplify_gen_binary (ASHIFTRT, mode,
2749 plus_constant (mode, XEXP (op0, 0),
2750 mask),
2751 XEXP (op0, 1));
2754 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2755 if (tem)
2756 return tem;
2758 tem = simplify_associative_operation (code, mode, op0, op1);
2759 if (tem)
2760 return tem;
2761 break;
2763 case XOR:
2764 if (trueop1 == CONST0_RTX (mode))
2765 return op0;
2766 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2767 return simplify_gen_unary (NOT, mode, op0, mode);
2768 if (rtx_equal_p (trueop0, trueop1)
2769 && ! side_effects_p (op0)
2770 && GET_MODE_CLASS (mode) != MODE_CC)
2771 return CONST0_RTX (mode);
2773 /* Canonicalize XOR of the most significant bit to PLUS. */
2774 if (CONST_SCALAR_INT_P (op1)
2775 && mode_signbit_p (mode, op1))
2776 return simplify_gen_binary (PLUS, mode, op0, op1);
2777 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2778 if (CONST_SCALAR_INT_P (op1)
2779 && GET_CODE (op0) == PLUS
2780 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2781 && mode_signbit_p (mode, XEXP (op0, 1)))
2782 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2783 simplify_gen_binary (XOR, mode, op1,
2784 XEXP (op0, 1)));
2786 /* If we are XORing two things that have no bits in common,
2787 convert them into an IOR. This helps to detect rotation encoded
2788 using those methods and possibly other simplifications. */
2790 if (HWI_COMPUTABLE_MODE_P (mode)
2791 && (nonzero_bits (op0, mode)
2792 & nonzero_bits (op1, mode)) == 0)
2793 return (simplify_gen_binary (IOR, mode, op0, op1));
2795 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2796 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2797 (NOT y). */
2799 int num_negated = 0;
2801 if (GET_CODE (op0) == NOT)
2802 num_negated++, op0 = XEXP (op0, 0);
2803 if (GET_CODE (op1) == NOT)
2804 num_negated++, op1 = XEXP (op1, 0);
2806 if (num_negated == 2)
2807 return simplify_gen_binary (XOR, mode, op0, op1);
2808 else if (num_negated == 1)
2809 return simplify_gen_unary (NOT, mode,
2810 simplify_gen_binary (XOR, mode, op0, op1),
2811 mode);
2814 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2815 correspond to a machine insn or result in further simplifications
2816 if B is a constant. */
2818 if (GET_CODE (op0) == AND
2819 && rtx_equal_p (XEXP (op0, 1), op1)
2820 && ! side_effects_p (op1))
2821 return simplify_gen_binary (AND, mode,
2822 simplify_gen_unary (NOT, mode,
2823 XEXP (op0, 0), mode),
2824 op1);
2826 else if (GET_CODE (op0) == AND
2827 && rtx_equal_p (XEXP (op0, 0), op1)
2828 && ! side_effects_p (op1))
2829 return simplify_gen_binary (AND, mode,
2830 simplify_gen_unary (NOT, mode,
2831 XEXP (op0, 1), mode),
2832 op1);
2834 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2835 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2836 out bits inverted twice and not set by C. Similarly, given
2837 (xor (and (xor A B) C) D), simplify without inverting C in
2838 the xor operand: (xor (and A C) (B&C)^D).
2840 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2841 && GET_CODE (XEXP (op0, 0)) == XOR
2842 && CONST_INT_P (op1)
2843 && CONST_INT_P (XEXP (op0, 1))
2844 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2846 enum rtx_code op = GET_CODE (op0);
2847 rtx a = XEXP (XEXP (op0, 0), 0);
2848 rtx b = XEXP (XEXP (op0, 0), 1);
2849 rtx c = XEXP (op0, 1);
2850 rtx d = op1;
2851 HOST_WIDE_INT bval = INTVAL (b);
2852 HOST_WIDE_INT cval = INTVAL (c);
2853 HOST_WIDE_INT dval = INTVAL (d);
2854 HOST_WIDE_INT xcval;
2856 if (op == IOR)
2857 xcval = ~cval;
2858 else
2859 xcval = cval;
2861 return simplify_gen_binary (XOR, mode,
2862 simplify_gen_binary (op, mode, a, c),
2863 gen_int_mode ((bval & xcval) ^ dval,
2864 mode));
2867 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2868 we can transform like this:
2869 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2870 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2871 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2872 Attempt a few simplifications when B and C are both constants. */
2873 if (GET_CODE (op0) == AND
2874 && CONST_INT_P (op1)
2875 && CONST_INT_P (XEXP (op0, 1)))
2877 rtx a = XEXP (op0, 0);
2878 rtx b = XEXP (op0, 1);
2879 rtx c = op1;
2880 HOST_WIDE_INT bval = INTVAL (b);
2881 HOST_WIDE_INT cval = INTVAL (c);
2883 /* Instead of computing ~A&C, we compute its negated value,
2884 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2885 optimize for sure. If it does not simplify, we still try
2886 to compute ~A&C below, but since that always allocates
2887 RTL, we don't try that before committing to returning a
2888 simplified expression. */
2889 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2890 GEN_INT (~cval));
2892 if ((~cval & bval) == 0)
2894 rtx na_c = NULL_RTX;
2895 if (n_na_c)
2896 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2897 else
2899 /* If ~A does not simplify, don't bother: we don't
2900 want to simplify 2 operations into 3, and if na_c
2901 were to simplify with na, n_na_c would have
2902 simplified as well. */
2903 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2904 if (na)
2905 na_c = simplify_gen_binary (AND, mode, na, c);
2908 /* Try to simplify ~A&C | ~B&C. */
2909 if (na_c != NULL_RTX)
2910 return simplify_gen_binary (IOR, mode, na_c,
2911 gen_int_mode (~bval & cval, mode));
2913 else
2915 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2916 if (n_na_c == CONSTM1_RTX (mode))
2918 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2919 gen_int_mode (~cval & bval,
2920 mode));
2921 return simplify_gen_binary (IOR, mode, a_nc_b,
2922 gen_int_mode (~bval & cval,
2923 mode));
2928 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
2929 do (ior (and A ~C) (and B C)) which is a machine instruction on some
2930 machines, and also has shorter instruction path length. */
2931 if (GET_CODE (op0) == AND
2932 && GET_CODE (XEXP (op0, 0)) == XOR
2933 && CONST_INT_P (XEXP (op0, 1))
2934 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
2936 rtx a = trueop1;
2937 rtx b = XEXP (XEXP (op0, 0), 1);
2938 rtx c = XEXP (op0, 1);
2939 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2940 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
2941 rtx bc = simplify_gen_binary (AND, mode, b, c);
2942 return simplify_gen_binary (IOR, mode, a_nc, bc);
2944 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
2945 else if (GET_CODE (op0) == AND
2946 && GET_CODE (XEXP (op0, 0)) == XOR
2947 && CONST_INT_P (XEXP (op0, 1))
2948 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
2950 rtx a = XEXP (XEXP (op0, 0), 0);
2951 rtx b = trueop1;
2952 rtx c = XEXP (op0, 1);
2953 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2954 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
2955 rtx ac = simplify_gen_binary (AND, mode, a, c);
2956 return simplify_gen_binary (IOR, mode, ac, b_nc);
2959 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2960 comparison if STORE_FLAG_VALUE is 1. */
2961 if (STORE_FLAG_VALUE == 1
2962 && trueop1 == const1_rtx
2963 && COMPARISON_P (op0)
2964 && (reversed = reversed_comparison (op0, mode)))
2965 return reversed;
2967 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2968 is (lt foo (const_int 0)), so we can perform the above
2969 simplification if STORE_FLAG_VALUE is 1. */
2971 if (is_a <scalar_int_mode> (mode, &int_mode)
2972 && STORE_FLAG_VALUE == 1
2973 && trueop1 == const1_rtx
2974 && GET_CODE (op0) == LSHIFTRT
2975 && CONST_INT_P (XEXP (op0, 1))
2976 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
2977 return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
2979 /* (xor (comparison foo bar) (const_int sign-bit))
2980 when STORE_FLAG_VALUE is the sign bit. */
2981 if (is_a <scalar_int_mode> (mode, &int_mode)
2982 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
2983 && trueop1 == const_true_rtx
2984 && COMPARISON_P (op0)
2985 && (reversed = reversed_comparison (op0, int_mode)))
2986 return reversed;
2988 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2989 if (tem)
2990 return tem;
2992 tem = simplify_associative_operation (code, mode, op0, op1);
2993 if (tem)
2994 return tem;
2995 break;
2997 case AND:
2998 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2999 return trueop1;
3000 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3001 return op0;
3002 if (HWI_COMPUTABLE_MODE_P (mode))
3004 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3005 HOST_WIDE_INT nzop1;
3006 if (CONST_INT_P (trueop1))
3008 HOST_WIDE_INT val1 = INTVAL (trueop1);
3009 /* If we are turning off bits already known off in OP0, we need
3010 not do an AND. */
3011 if ((nzop0 & ~val1) == 0)
3012 return op0;
3014 nzop1 = nonzero_bits (trueop1, mode);
3015 /* If we are clearing all the nonzero bits, the result is zero. */
3016 if ((nzop1 & nzop0) == 0
3017 && !side_effects_p (op0) && !side_effects_p (op1))
3018 return CONST0_RTX (mode);
3020 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3021 && GET_MODE_CLASS (mode) != MODE_CC)
3022 return op0;
3023 /* A & (~A) -> 0 */
3024 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3025 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3026 && ! side_effects_p (op0)
3027 && GET_MODE_CLASS (mode) != MODE_CC)
3028 return CONST0_RTX (mode);
3030 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3031 there are no nonzero bits of C outside of X's mode. */
3032 if ((GET_CODE (op0) == SIGN_EXTEND
3033 || GET_CODE (op0) == ZERO_EXTEND)
3034 && CONST_INT_P (trueop1)
3035 && HWI_COMPUTABLE_MODE_P (mode)
3036 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3037 & UINTVAL (trueop1)) == 0)
3039 machine_mode imode = GET_MODE (XEXP (op0, 0));
3040 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3041 gen_int_mode (INTVAL (trueop1),
3042 imode));
3043 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3046 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3047 we might be able to further simplify the AND with X and potentially
3048 remove the truncation altogether. */
3049 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3051 rtx x = XEXP (op0, 0);
3052 machine_mode xmode = GET_MODE (x);
3053 tem = simplify_gen_binary (AND, xmode, x,
3054 gen_int_mode (INTVAL (trueop1), xmode));
3055 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3058 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3059 if (GET_CODE (op0) == IOR
3060 && CONST_INT_P (trueop1)
3061 && CONST_INT_P (XEXP (op0, 1)))
3063 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3064 return simplify_gen_binary (IOR, mode,
3065 simplify_gen_binary (AND, mode,
3066 XEXP (op0, 0), op1),
3067 gen_int_mode (tmp, mode));
3070 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3071 insn (and may simplify more). */
3072 if (GET_CODE (op0) == XOR
3073 && rtx_equal_p (XEXP (op0, 0), op1)
3074 && ! side_effects_p (op1))
3075 return simplify_gen_binary (AND, mode,
3076 simplify_gen_unary (NOT, mode,
3077 XEXP (op0, 1), mode),
3078 op1);
3080 if (GET_CODE (op0) == XOR
3081 && rtx_equal_p (XEXP (op0, 1), op1)
3082 && ! side_effects_p (op1))
3083 return simplify_gen_binary (AND, mode,
3084 simplify_gen_unary (NOT, mode,
3085 XEXP (op0, 0), mode),
3086 op1);
3088 /* Similarly for (~(A ^ B)) & A. */
3089 if (GET_CODE (op0) == NOT
3090 && GET_CODE (XEXP (op0, 0)) == XOR
3091 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3092 && ! side_effects_p (op1))
3093 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3095 if (GET_CODE (op0) == NOT
3096 && GET_CODE (XEXP (op0, 0)) == XOR
3097 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3098 && ! side_effects_p (op1))
3099 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3101 /* Convert (A | B) & A to A. */
3102 if (GET_CODE (op0) == IOR
3103 && (rtx_equal_p (XEXP (op0, 0), op1)
3104 || rtx_equal_p (XEXP (op0, 1), op1))
3105 && ! side_effects_p (XEXP (op0, 0))
3106 && ! side_effects_p (XEXP (op0, 1)))
3107 return op1;
3109 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3110 ((A & N) + B) & M -> (A + B) & M
3111 Similarly if (N & M) == 0,
3112 ((A | N) + B) & M -> (A + B) & M
3113 and for - instead of + and/or ^ instead of |.
3114 Also, if (N & M) == 0, then
3115 (A +- N) & M -> A & M. */
3116 if (CONST_INT_P (trueop1)
3117 && HWI_COMPUTABLE_MODE_P (mode)
3118 && ~UINTVAL (trueop1)
3119 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3120 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3122 rtx pmop[2];
3123 int which;
3125 pmop[0] = XEXP (op0, 0);
3126 pmop[1] = XEXP (op0, 1);
3128 if (CONST_INT_P (pmop[1])
3129 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3130 return simplify_gen_binary (AND, mode, pmop[0], op1);
3132 for (which = 0; which < 2; which++)
3134 tem = pmop[which];
3135 switch (GET_CODE (tem))
3137 case AND:
3138 if (CONST_INT_P (XEXP (tem, 1))
3139 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3140 == UINTVAL (trueop1))
3141 pmop[which] = XEXP (tem, 0);
3142 break;
3143 case IOR:
3144 case XOR:
3145 if (CONST_INT_P (XEXP (tem, 1))
3146 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3147 pmop[which] = XEXP (tem, 0);
3148 break;
3149 default:
3150 break;
3154 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3156 tem = simplify_gen_binary (GET_CODE (op0), mode,
3157 pmop[0], pmop[1]);
3158 return simplify_gen_binary (code, mode, tem, op1);
3162 /* (and X (ior (not X) Y) -> (and X Y) */
3163 if (GET_CODE (op1) == IOR
3164 && GET_CODE (XEXP (op1, 0)) == NOT
3165 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3166 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3168 /* (and (ior (not X) Y) X) -> (and X Y) */
3169 if (GET_CODE (op0) == IOR
3170 && GET_CODE (XEXP (op0, 0)) == NOT
3171 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3172 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3174 /* (and X (ior Y (not X)) -> (and X Y) */
3175 if (GET_CODE (op1) == IOR
3176 && GET_CODE (XEXP (op1, 1)) == NOT
3177 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3178 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3180 /* (and (ior Y (not X)) X) -> (and X Y) */
3181 if (GET_CODE (op0) == IOR
3182 && GET_CODE (XEXP (op0, 1)) == NOT
3183 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3184 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3186 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3187 if (tem)
3188 return tem;
3190 tem = simplify_associative_operation (code, mode, op0, op1);
3191 if (tem)
3192 return tem;
3193 break;
3195 case UDIV:
3196 /* 0/x is 0 (or x&0 if x has side-effects). */
3197 if (trueop0 == CONST0_RTX (mode)
3198 && !cfun->can_throw_non_call_exceptions)
3200 if (side_effects_p (op1))
3201 return simplify_gen_binary (AND, mode, op1, trueop0);
3202 return trueop0;
3204 /* x/1 is x. */
3205 if (trueop1 == CONST1_RTX (mode))
3207 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3208 if (tem)
3209 return tem;
3211 /* Convert divide by power of two into shift. */
3212 if (CONST_INT_P (trueop1)
3213 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3214 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3215 break;
3217 case DIV:
3218 /* Handle floating point and integers separately. */
3219 if (SCALAR_FLOAT_MODE_P (mode))
3221 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3222 safe for modes with NaNs, since 0.0 / 0.0 will then be
3223 NaN rather than 0.0. Nor is it safe for modes with signed
3224 zeros, since dividing 0 by a negative number gives -0.0 */
3225 if (trueop0 == CONST0_RTX (mode)
3226 && !HONOR_NANS (mode)
3227 && !HONOR_SIGNED_ZEROS (mode)
3228 && ! side_effects_p (op1))
3229 return op0;
3230 /* x/1.0 is x. */
3231 if (trueop1 == CONST1_RTX (mode)
3232 && !HONOR_SNANS (mode))
3233 return op0;
3235 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3236 && trueop1 != CONST0_RTX (mode))
3238 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3240 /* x/-1.0 is -x. */
3241 if (real_equal (d1, &dconstm1)
3242 && !HONOR_SNANS (mode))
3243 return simplify_gen_unary (NEG, mode, op0, mode);
3245 /* Change FP division by a constant into multiplication.
3246 Only do this with -freciprocal-math. */
3247 if (flag_reciprocal_math
3248 && !real_equal (d1, &dconst0))
3250 REAL_VALUE_TYPE d;
3251 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3252 tem = const_double_from_real_value (d, mode);
3253 return simplify_gen_binary (MULT, mode, op0, tem);
3257 else if (SCALAR_INT_MODE_P (mode))
3259 /* 0/x is 0 (or x&0 if x has side-effects). */
3260 if (trueop0 == CONST0_RTX (mode)
3261 && !cfun->can_throw_non_call_exceptions)
3263 if (side_effects_p (op1))
3264 return simplify_gen_binary (AND, mode, op1, trueop0);
3265 return trueop0;
3267 /* x/1 is x. */
3268 if (trueop1 == CONST1_RTX (mode))
3270 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3271 if (tem)
3272 return tem;
3274 /* x/-1 is -x. */
3275 if (trueop1 == constm1_rtx)
3277 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3278 if (x)
3279 return simplify_gen_unary (NEG, mode, x, mode);
3282 break;
3284 case UMOD:
3285 /* 0%x is 0 (or x&0 if x has side-effects). */
3286 if (trueop0 == CONST0_RTX (mode))
3288 if (side_effects_p (op1))
3289 return simplify_gen_binary (AND, mode, op1, trueop0);
3290 return trueop0;
3292 /* x%1 is 0 (of x&0 if x has side-effects). */
3293 if (trueop1 == CONST1_RTX (mode))
3295 if (side_effects_p (op0))
3296 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3297 return CONST0_RTX (mode);
3299 /* Implement modulus by power of two as AND. */
3300 if (CONST_INT_P (trueop1)
3301 && exact_log2 (UINTVAL (trueop1)) > 0)
3302 return simplify_gen_binary (AND, mode, op0,
3303 gen_int_mode (INTVAL (op1) - 1, mode));
3304 break;
3306 case MOD:
3307 /* 0%x is 0 (or x&0 if x has side-effects). */
3308 if (trueop0 == CONST0_RTX (mode))
3310 if (side_effects_p (op1))
3311 return simplify_gen_binary (AND, mode, op1, trueop0);
3312 return trueop0;
3314 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3315 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3317 if (side_effects_p (op0))
3318 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3319 return CONST0_RTX (mode);
3321 break;
3323 case ROTATERT:
3324 case ROTATE:
3325 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3326 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3327 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3328 amount instead. */
3329 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3330 if (CONST_INT_P (trueop1)
3331 && IN_RANGE (INTVAL (trueop1),
3332 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3333 GET_MODE_PRECISION (mode) - 1))
3334 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3335 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3336 - INTVAL (trueop1)));
3337 #endif
3338 /* FALLTHRU */
3339 case ASHIFTRT:
3340 if (trueop1 == CONST0_RTX (mode))
3341 return op0;
3342 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3343 return op0;
3344 /* Rotating ~0 always results in ~0. */
3345 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3346 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3347 && ! side_effects_p (op1))
3348 return op0;
3350 canonicalize_shift:
3351 /* Given:
3352 scalar modes M1, M2
3353 scalar constants c1, c2
3354 size (M2) > size (M1)
3355 c1 == size (M2) - size (M1)
3356 optimize:
3357 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3358 <low_part>)
3359 (const_int <c2>))
3361 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3362 <low_part>). */
3363 if ((code == ASHIFTRT || code == LSHIFTRT)
3364 && is_a <scalar_int_mode> (mode, &int_mode)
3365 && SUBREG_P (op0)
3366 && CONST_INT_P (op1)
3367 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3368 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
3369 &inner_mode)
3370 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3371 && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
3372 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3373 == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
3374 && subreg_lowpart_p (op0))
3376 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3377 + INTVAL (op1));
3378 tmp = simplify_gen_binary (code, inner_mode,
3379 XEXP (SUBREG_REG (op0), 0),
3380 tmp);
3381 return lowpart_subreg (int_mode, tmp, inner_mode);
3384 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3386 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3387 if (val != INTVAL (op1))
3388 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3390 break;
3392 case ASHIFT:
3393 case SS_ASHIFT:
3394 case US_ASHIFT:
3395 if (trueop1 == CONST0_RTX (mode))
3396 return op0;
3397 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3398 return op0;
3399 goto canonicalize_shift;
3401 case LSHIFTRT:
3402 if (trueop1 == CONST0_RTX (mode))
3403 return op0;
3404 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3405 return op0;
3406 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3407 if (GET_CODE (op0) == CLZ
3408 && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
3409 && CONST_INT_P (trueop1)
3410 && STORE_FLAG_VALUE == 1
3411 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3413 unsigned HOST_WIDE_INT zero_val = 0;
3415 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
3416 && zero_val == GET_MODE_PRECISION (inner_mode)
3417 && INTVAL (trueop1) == exact_log2 (zero_val))
3418 return simplify_gen_relational (EQ, mode, inner_mode,
3419 XEXP (op0, 0), const0_rtx);
3421 goto canonicalize_shift;
3423 case SMIN:
3424 if (width <= HOST_BITS_PER_WIDE_INT
3425 && mode_signbit_p (mode, trueop1)
3426 && ! side_effects_p (op0))
3427 return op1;
3428 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3429 return op0;
3430 tem = simplify_associative_operation (code, mode, op0, op1);
3431 if (tem)
3432 return tem;
3433 break;
3435 case SMAX:
3436 if (width <= HOST_BITS_PER_WIDE_INT
3437 && CONST_INT_P (trueop1)
3438 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3439 && ! side_effects_p (op0))
3440 return op1;
3441 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3442 return op0;
3443 tem = simplify_associative_operation (code, mode, op0, op1);
3444 if (tem)
3445 return tem;
3446 break;
3448 case UMIN:
3449 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3450 return op1;
3451 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3452 return op0;
3453 tem = simplify_associative_operation (code, mode, op0, op1);
3454 if (tem)
3455 return tem;
3456 break;
3458 case UMAX:
3459 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3460 return op1;
3461 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3462 return op0;
3463 tem = simplify_associative_operation (code, mode, op0, op1);
3464 if (tem)
3465 return tem;
3466 break;
3468 case SS_PLUS:
3469 case US_PLUS:
3470 case SS_MINUS:
3471 case US_MINUS:
3472 case SS_MULT:
3473 case US_MULT:
3474 case SS_DIV:
3475 case US_DIV:
3476 /* ??? There are simplifications that can be done. */
3477 return 0;
3479 case VEC_SELECT:
3480 if (!VECTOR_MODE_P (mode))
3482 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3483 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3484 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3485 gcc_assert (XVECLEN (trueop1, 0) == 1);
3486 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3488 if (GET_CODE (trueop0) == CONST_VECTOR)
3489 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3490 (trueop1, 0, 0)));
3492 /* Extract a scalar element from a nested VEC_SELECT expression
3493 (with optional nested VEC_CONCAT expression). Some targets
3494 (i386) extract scalar element from a vector using chain of
3495 nested VEC_SELECT expressions. When input operand is a memory
3496 operand, this operation can be simplified to a simple scalar
3497 load from an offseted memory address. */
3498 if (GET_CODE (trueop0) == VEC_SELECT)
3500 rtx op0 = XEXP (trueop0, 0);
3501 rtx op1 = XEXP (trueop0, 1);
3503 machine_mode opmode = GET_MODE (op0);
3504 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3505 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3507 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3508 int elem;
3510 rtvec vec;
3511 rtx tmp_op, tmp;
3513 gcc_assert (GET_CODE (op1) == PARALLEL);
3514 gcc_assert (i < n_elts);
3516 /* Select element, pointed by nested selector. */
3517 elem = INTVAL (XVECEXP (op1, 0, i));
3519 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3520 if (GET_CODE (op0) == VEC_CONCAT)
3522 rtx op00 = XEXP (op0, 0);
3523 rtx op01 = XEXP (op0, 1);
3525 machine_mode mode00, mode01;
3526 int n_elts00, n_elts01;
3528 mode00 = GET_MODE (op00);
3529 mode01 = GET_MODE (op01);
3531 /* Find out number of elements of each operand. */
3532 if (VECTOR_MODE_P (mode00))
3534 elt_size = GET_MODE_UNIT_SIZE (mode00);
3535 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3537 else
3538 n_elts00 = 1;
3540 if (VECTOR_MODE_P (mode01))
3542 elt_size = GET_MODE_UNIT_SIZE (mode01);
3543 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3545 else
3546 n_elts01 = 1;
3548 gcc_assert (n_elts == n_elts00 + n_elts01);
3550 /* Select correct operand of VEC_CONCAT
3551 and adjust selector. */
3552 if (elem < n_elts01)
3553 tmp_op = op00;
3554 else
3556 tmp_op = op01;
3557 elem -= n_elts00;
3560 else
3561 tmp_op = op0;
3563 vec = rtvec_alloc (1);
3564 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3566 tmp = gen_rtx_fmt_ee (code, mode,
3567 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3568 return tmp;
3570 if (GET_CODE (trueop0) == VEC_DUPLICATE
3571 && GET_MODE (XEXP (trueop0, 0)) == mode)
3572 return XEXP (trueop0, 0);
3574 else
3576 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3577 gcc_assert (GET_MODE_INNER (mode)
3578 == GET_MODE_INNER (GET_MODE (trueop0)));
3579 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3581 if (GET_CODE (trueop0) == CONST_VECTOR)
3583 int elt_size = GET_MODE_UNIT_SIZE (mode);
3584 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3585 rtvec v = rtvec_alloc (n_elts);
3586 unsigned int i;
3588 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3589 for (i = 0; i < n_elts; i++)
3591 rtx x = XVECEXP (trueop1, 0, i);
3593 gcc_assert (CONST_INT_P (x));
3594 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3595 INTVAL (x));
3598 return gen_rtx_CONST_VECTOR (mode, v);
3601 /* Recognize the identity. */
3602 if (GET_MODE (trueop0) == mode)
3604 bool maybe_ident = true;
3605 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3607 rtx j = XVECEXP (trueop1, 0, i);
3608 if (!CONST_INT_P (j) || INTVAL (j) != i)
3610 maybe_ident = false;
3611 break;
3614 if (maybe_ident)
3615 return trueop0;
3618 /* If we build {a,b} then permute it, build the result directly. */
3619 if (XVECLEN (trueop1, 0) == 2
3620 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3621 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3622 && GET_CODE (trueop0) == VEC_CONCAT
3623 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3624 && GET_MODE (XEXP (trueop0, 0)) == mode
3625 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3626 && GET_MODE (XEXP (trueop0, 1)) == mode)
3628 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3629 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3630 rtx subop0, subop1;
3632 gcc_assert (i0 < 4 && i1 < 4);
3633 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3634 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3636 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3639 if (XVECLEN (trueop1, 0) == 2
3640 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3641 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3642 && GET_CODE (trueop0) == VEC_CONCAT
3643 && GET_MODE (trueop0) == mode)
3645 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3646 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3647 rtx subop0, subop1;
3649 gcc_assert (i0 < 2 && i1 < 2);
3650 subop0 = XEXP (trueop0, i0);
3651 subop1 = XEXP (trueop0, i1);
3653 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3656 /* If we select one half of a vec_concat, return that. */
3657 if (GET_CODE (trueop0) == VEC_CONCAT
3658 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3660 rtx subop0 = XEXP (trueop0, 0);
3661 rtx subop1 = XEXP (trueop0, 1);
3662 machine_mode mode0 = GET_MODE (subop0);
3663 machine_mode mode1 = GET_MODE (subop1);
3664 int li = GET_MODE_UNIT_SIZE (mode0);
3665 int l0 = GET_MODE_SIZE (mode0) / li;
3666 int l1 = GET_MODE_SIZE (mode1) / li;
3667 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3668 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3670 bool success = true;
3671 for (int i = 1; i < l0; ++i)
3673 rtx j = XVECEXP (trueop1, 0, i);
3674 if (!CONST_INT_P (j) || INTVAL (j) != i)
3676 success = false;
3677 break;
3680 if (success)
3681 return subop0;
3683 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3685 bool success = true;
3686 for (int i = 1; i < l1; ++i)
3688 rtx j = XVECEXP (trueop1, 0, i);
3689 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3691 success = false;
3692 break;
3695 if (success)
3696 return subop1;
3701 if (XVECLEN (trueop1, 0) == 1
3702 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3703 && GET_CODE (trueop0) == VEC_CONCAT)
3705 rtx vec = trueop0;
3706 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3708 /* Try to find the element in the VEC_CONCAT. */
3709 while (GET_MODE (vec) != mode
3710 && GET_CODE (vec) == VEC_CONCAT)
3712 HOST_WIDE_INT vec_size;
3714 if (CONST_INT_P (XEXP (vec, 0)))
3716 /* vec_concat of two const_ints doesn't make sense with
3717 respect to modes. */
3718 if (CONST_INT_P (XEXP (vec, 1)))
3719 return 0;
3721 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3722 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3724 else
3725 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3727 if (offset < vec_size)
3728 vec = XEXP (vec, 0);
3729 else
3731 offset -= vec_size;
3732 vec = XEXP (vec, 1);
3734 vec = avoid_constant_pool_reference (vec);
3737 if (GET_MODE (vec) == mode)
3738 return vec;
3741 /* If we select elements in a vec_merge that all come from the same
3742 operand, select from that operand directly. */
3743 if (GET_CODE (op0) == VEC_MERGE)
3745 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3746 if (CONST_INT_P (trueop02))
3748 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3749 bool all_operand0 = true;
3750 bool all_operand1 = true;
3751 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3753 rtx j = XVECEXP (trueop1, 0, i);
3754 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3755 all_operand1 = false;
3756 else
3757 all_operand0 = false;
3759 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3760 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3761 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3762 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3766 /* If we have two nested selects that are inverses of each
3767 other, replace them with the source operand. */
3768 if (GET_CODE (trueop0) == VEC_SELECT
3769 && GET_MODE (XEXP (trueop0, 0)) == mode)
3771 rtx op0_subop1 = XEXP (trueop0, 1);
3772 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3773 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3775 /* Apply the outer ordering vector to the inner one. (The inner
3776 ordering vector is expressly permitted to be of a different
3777 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3778 then the two VEC_SELECTs cancel. */
3779 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3781 rtx x = XVECEXP (trueop1, 0, i);
3782 if (!CONST_INT_P (x))
3783 return 0;
3784 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3785 if (!CONST_INT_P (y) || i != INTVAL (y))
3786 return 0;
3788 return XEXP (trueop0, 0);
3791 return 0;
3792 case VEC_CONCAT:
3794 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3795 ? GET_MODE (trueop0)
3796 : GET_MODE_INNER (mode));
3797 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3798 ? GET_MODE (trueop1)
3799 : GET_MODE_INNER (mode));
3801 gcc_assert (VECTOR_MODE_P (mode));
3802 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3803 == GET_MODE_SIZE (mode));
3805 if (VECTOR_MODE_P (op0_mode))
3806 gcc_assert (GET_MODE_INNER (mode)
3807 == GET_MODE_INNER (op0_mode));
3808 else
3809 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3811 if (VECTOR_MODE_P (op1_mode))
3812 gcc_assert (GET_MODE_INNER (mode)
3813 == GET_MODE_INNER (op1_mode));
3814 else
3815 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3817 if ((GET_CODE (trueop0) == CONST_VECTOR
3818 || CONST_SCALAR_INT_P (trueop0)
3819 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3820 && (GET_CODE (trueop1) == CONST_VECTOR
3821 || CONST_SCALAR_INT_P (trueop1)
3822 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3824 int elt_size = GET_MODE_UNIT_SIZE (mode);
3825 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3826 rtvec v = rtvec_alloc (n_elts);
3827 unsigned int i;
3828 unsigned in_n_elts = 1;
3830 if (VECTOR_MODE_P (op0_mode))
3831 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3832 for (i = 0; i < n_elts; i++)
3834 if (i < in_n_elts)
3836 if (!VECTOR_MODE_P (op0_mode))
3837 RTVEC_ELT (v, i) = trueop0;
3838 else
3839 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3841 else
3843 if (!VECTOR_MODE_P (op1_mode))
3844 RTVEC_ELT (v, i) = trueop1;
3845 else
3846 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3847 i - in_n_elts);
3851 return gen_rtx_CONST_VECTOR (mode, v);
3854 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3855 Restrict the transformation to avoid generating a VEC_SELECT with a
3856 mode unrelated to its operand. */
3857 if (GET_CODE (trueop0) == VEC_SELECT
3858 && GET_CODE (trueop1) == VEC_SELECT
3859 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3860 && GET_MODE (XEXP (trueop0, 0)) == mode)
3862 rtx par0 = XEXP (trueop0, 1);
3863 rtx par1 = XEXP (trueop1, 1);
3864 int len0 = XVECLEN (par0, 0);
3865 int len1 = XVECLEN (par1, 0);
3866 rtvec vec = rtvec_alloc (len0 + len1);
3867 for (int i = 0; i < len0; i++)
3868 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3869 for (int i = 0; i < len1; i++)
3870 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3871 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3872 gen_rtx_PARALLEL (VOIDmode, vec));
3875 return 0;
3877 default:
3878 gcc_unreachable ();
3881 return 0;
3885 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3886 rtx op0, rtx op1)
3888 if (VECTOR_MODE_P (mode)
3889 && code != VEC_CONCAT
3890 && GET_CODE (op0) == CONST_VECTOR
3891 && GET_CODE (op1) == CONST_VECTOR)
3893 unsigned n_elts = GET_MODE_NUNITS (mode);
3894 machine_mode op0mode = GET_MODE (op0);
3895 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3896 machine_mode op1mode = GET_MODE (op1);
3897 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3898 rtvec v = rtvec_alloc (n_elts);
3899 unsigned int i;
3901 gcc_assert (op0_n_elts == n_elts);
3902 gcc_assert (op1_n_elts == n_elts);
3903 for (i = 0; i < n_elts; i++)
3905 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3906 CONST_VECTOR_ELT (op0, i),
3907 CONST_VECTOR_ELT (op1, i));
3908 if (!x)
3909 return 0;
3910 RTVEC_ELT (v, i) = x;
3913 return gen_rtx_CONST_VECTOR (mode, v);
3916 if (VECTOR_MODE_P (mode)
3917 && code == VEC_CONCAT
3918 && (CONST_SCALAR_INT_P (op0)
3919 || GET_CODE (op0) == CONST_FIXED
3920 || CONST_DOUBLE_AS_FLOAT_P (op0))
3921 && (CONST_SCALAR_INT_P (op1)
3922 || CONST_DOUBLE_AS_FLOAT_P (op1)
3923 || GET_CODE (op1) == CONST_FIXED))
3925 unsigned n_elts = GET_MODE_NUNITS (mode);
3926 rtvec v = rtvec_alloc (n_elts);
3928 gcc_assert (n_elts >= 2);
3929 if (n_elts == 2)
3931 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3932 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3934 RTVEC_ELT (v, 0) = op0;
3935 RTVEC_ELT (v, 1) = op1;
3937 else
3939 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3940 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3941 unsigned i;
3943 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3944 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3945 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3947 for (i = 0; i < op0_n_elts; ++i)
3948 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3949 for (i = 0; i < op1_n_elts; ++i)
3950 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3953 return gen_rtx_CONST_VECTOR (mode, v);
3956 if (SCALAR_FLOAT_MODE_P (mode)
3957 && CONST_DOUBLE_AS_FLOAT_P (op0)
3958 && CONST_DOUBLE_AS_FLOAT_P (op1)
3959 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3961 if (code == AND
3962 || code == IOR
3963 || code == XOR)
3965 long tmp0[4];
3966 long tmp1[4];
3967 REAL_VALUE_TYPE r;
3968 int i;
3970 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3971 GET_MODE (op0));
3972 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3973 GET_MODE (op1));
3974 for (i = 0; i < 4; i++)
3976 switch (code)
3978 case AND:
3979 tmp0[i] &= tmp1[i];
3980 break;
3981 case IOR:
3982 tmp0[i] |= tmp1[i];
3983 break;
3984 case XOR:
3985 tmp0[i] ^= tmp1[i];
3986 break;
3987 default:
3988 gcc_unreachable ();
3991 real_from_target (&r, tmp0, mode);
3992 return const_double_from_real_value (r, mode);
3994 else
3996 REAL_VALUE_TYPE f0, f1, value, result;
3997 const REAL_VALUE_TYPE *opr0, *opr1;
3998 bool inexact;
4000 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4001 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4003 if (HONOR_SNANS (mode)
4004 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4005 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4006 return 0;
4008 real_convert (&f0, mode, opr0);
4009 real_convert (&f1, mode, opr1);
4011 if (code == DIV
4012 && real_equal (&f1, &dconst0)
4013 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4014 return 0;
4016 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4017 && flag_trapping_math
4018 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4020 int s0 = REAL_VALUE_NEGATIVE (f0);
4021 int s1 = REAL_VALUE_NEGATIVE (f1);
4023 switch (code)
4025 case PLUS:
4026 /* Inf + -Inf = NaN plus exception. */
4027 if (s0 != s1)
4028 return 0;
4029 break;
4030 case MINUS:
4031 /* Inf - Inf = NaN plus exception. */
4032 if (s0 == s1)
4033 return 0;
4034 break;
4035 case DIV:
4036 /* Inf / Inf = NaN plus exception. */
4037 return 0;
4038 default:
4039 break;
4043 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4044 && flag_trapping_math
4045 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4046 || (REAL_VALUE_ISINF (f1)
4047 && real_equal (&f0, &dconst0))))
4048 /* Inf * 0 = NaN plus exception. */
4049 return 0;
4051 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4052 &f0, &f1);
4053 real_convert (&result, mode, &value);
4055 /* Don't constant fold this floating point operation if
4056 the result has overflowed and flag_trapping_math. */
4058 if (flag_trapping_math
4059 && MODE_HAS_INFINITIES (mode)
4060 && REAL_VALUE_ISINF (result)
4061 && !REAL_VALUE_ISINF (f0)
4062 && !REAL_VALUE_ISINF (f1))
4063 /* Overflow plus exception. */
4064 return 0;
4066 /* Don't constant fold this floating point operation if the
4067 result may dependent upon the run-time rounding mode and
4068 flag_rounding_math is set, or if GCC's software emulation
4069 is unable to accurately represent the result. */
4071 if ((flag_rounding_math
4072 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4073 && (inexact || !real_identical (&result, &value)))
4074 return NULL_RTX;
4076 return const_double_from_real_value (result, mode);
4080 /* We can fold some multi-word operations. */
4081 scalar_int_mode int_mode;
4082 if (is_a <scalar_int_mode> (mode, &int_mode)
4083 && CONST_SCALAR_INT_P (op0)
4084 && CONST_SCALAR_INT_P (op1))
4086 wide_int result;
4087 bool overflow;
4088 rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4089 rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4091 #if TARGET_SUPPORTS_WIDE_INT == 0
4092 /* This assert keeps the simplification from producing a result
4093 that cannot be represented in a CONST_DOUBLE but a lot of
4094 upstream callers expect that this function never fails to
4095 simplify something and so you if you added this to the test
4096 above the code would die later anyway. If this assert
4097 happens, you just need to make the port support wide int. */
4098 gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4099 #endif
4100 switch (code)
4102 case MINUS:
4103 result = wi::sub (pop0, pop1);
4104 break;
4106 case PLUS:
4107 result = wi::add (pop0, pop1);
4108 break;
4110 case MULT:
4111 result = wi::mul (pop0, pop1);
4112 break;
4114 case DIV:
4115 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4116 if (overflow)
4117 return NULL_RTX;
4118 break;
4120 case MOD:
4121 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4122 if (overflow)
4123 return NULL_RTX;
4124 break;
4126 case UDIV:
4127 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4128 if (overflow)
4129 return NULL_RTX;
4130 break;
4132 case UMOD:
4133 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4134 if (overflow)
4135 return NULL_RTX;
4136 break;
4138 case AND:
4139 result = wi::bit_and (pop0, pop1);
4140 break;
4142 case IOR:
4143 result = wi::bit_or (pop0, pop1);
4144 break;
4146 case XOR:
4147 result = wi::bit_xor (pop0, pop1);
4148 break;
4150 case SMIN:
4151 result = wi::smin (pop0, pop1);
4152 break;
4154 case SMAX:
4155 result = wi::smax (pop0, pop1);
4156 break;
4158 case UMIN:
4159 result = wi::umin (pop0, pop1);
4160 break;
4162 case UMAX:
4163 result = wi::umax (pop0, pop1);
4164 break;
4166 case LSHIFTRT:
4167 case ASHIFTRT:
4168 case ASHIFT:
4170 wide_int wop1 = pop1;
4171 if (SHIFT_COUNT_TRUNCATED)
4172 wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
4173 else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
4174 return NULL_RTX;
4176 switch (code)
4178 case LSHIFTRT:
4179 result = wi::lrshift (pop0, wop1);
4180 break;
4182 case ASHIFTRT:
4183 result = wi::arshift (pop0, wop1);
4184 break;
4186 case ASHIFT:
4187 result = wi::lshift (pop0, wop1);
4188 break;
4190 default:
4191 gcc_unreachable ();
4193 break;
4195 case ROTATE:
4196 case ROTATERT:
4198 if (wi::neg_p (pop1))
4199 return NULL_RTX;
4201 switch (code)
4203 case ROTATE:
4204 result = wi::lrotate (pop0, pop1);
4205 break;
4207 case ROTATERT:
4208 result = wi::rrotate (pop0, pop1);
4209 break;
4211 default:
4212 gcc_unreachable ();
4214 break;
4216 default:
4217 return NULL_RTX;
4219 return immed_wide_int_const (result, int_mode);
4222 return NULL_RTX;
4227 /* Return a positive integer if X should sort after Y. The value
4228 returned is 1 if and only if X and Y are both regs. */
4230 static int
4231 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4233 int result;
4235 result = (commutative_operand_precedence (y)
4236 - commutative_operand_precedence (x));
4237 if (result)
4238 return result + result;
4240 /* Group together equal REGs to do more simplification. */
4241 if (REG_P (x) && REG_P (y))
4242 return REGNO (x) > REGNO (y);
4244 return 0;
4247 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4248 operands may be another PLUS or MINUS.
4250 Rather than test for specific case, we do this by a brute-force method
4251 and do all possible simplifications until no more changes occur. Then
4252 we rebuild the operation.
4254 May return NULL_RTX when no changes were made. */
4256 static rtx
4257 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4258 rtx op1)
4260 struct simplify_plus_minus_op_data
4262 rtx op;
4263 short neg;
4264 } ops[16];
4265 rtx result, tem;
4266 int n_ops = 2;
4267 int changed, n_constants, canonicalized = 0;
4268 int i, j;
4270 memset (ops, 0, sizeof ops);
4272 /* Set up the two operands and then expand them until nothing has been
4273 changed. If we run out of room in our array, give up; this should
4274 almost never happen. */
4276 ops[0].op = op0;
4277 ops[0].neg = 0;
4278 ops[1].op = op1;
4279 ops[1].neg = (code == MINUS);
4283 changed = 0;
4284 n_constants = 0;
4286 for (i = 0; i < n_ops; i++)
4288 rtx this_op = ops[i].op;
4289 int this_neg = ops[i].neg;
4290 enum rtx_code this_code = GET_CODE (this_op);
4292 switch (this_code)
4294 case PLUS:
4295 case MINUS:
4296 if (n_ops == ARRAY_SIZE (ops))
4297 return NULL_RTX;
4299 ops[n_ops].op = XEXP (this_op, 1);
4300 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4301 n_ops++;
4303 ops[i].op = XEXP (this_op, 0);
4304 changed = 1;
4305 /* If this operand was negated then we will potentially
4306 canonicalize the expression. Similarly if we don't
4307 place the operands adjacent we're re-ordering the
4308 expression and thus might be performing a
4309 canonicalization. Ignore register re-ordering.
4310 ??? It might be better to shuffle the ops array here,
4311 but then (plus (plus (A, B), plus (C, D))) wouldn't
4312 be seen as non-canonical. */
4313 if (this_neg
4314 || (i != n_ops - 2
4315 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4316 canonicalized = 1;
4317 break;
4319 case NEG:
4320 ops[i].op = XEXP (this_op, 0);
4321 ops[i].neg = ! this_neg;
4322 changed = 1;
4323 canonicalized = 1;
4324 break;
4326 case CONST:
4327 if (n_ops != ARRAY_SIZE (ops)
4328 && GET_CODE (XEXP (this_op, 0)) == PLUS
4329 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4330 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4332 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4333 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4334 ops[n_ops].neg = this_neg;
4335 n_ops++;
4336 changed = 1;
4337 canonicalized = 1;
4339 break;
4341 case NOT:
4342 /* ~a -> (-a - 1) */
4343 if (n_ops != ARRAY_SIZE (ops))
4345 ops[n_ops].op = CONSTM1_RTX (mode);
4346 ops[n_ops++].neg = this_neg;
4347 ops[i].op = XEXP (this_op, 0);
4348 ops[i].neg = !this_neg;
4349 changed = 1;
4350 canonicalized = 1;
4352 break;
4354 case CONST_INT:
4355 n_constants++;
4356 if (this_neg)
4358 ops[i].op = neg_const_int (mode, this_op);
4359 ops[i].neg = 0;
4360 changed = 1;
4361 canonicalized = 1;
4363 break;
4365 default:
4366 break;
4370 while (changed);
4372 if (n_constants > 1)
4373 canonicalized = 1;
4375 gcc_assert (n_ops >= 2);
4377 /* If we only have two operands, we can avoid the loops. */
4378 if (n_ops == 2)
4380 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4381 rtx lhs, rhs;
4383 /* Get the two operands. Be careful with the order, especially for
4384 the cases where code == MINUS. */
4385 if (ops[0].neg && ops[1].neg)
4387 lhs = gen_rtx_NEG (mode, ops[0].op);
4388 rhs = ops[1].op;
4390 else if (ops[0].neg)
4392 lhs = ops[1].op;
4393 rhs = ops[0].op;
4395 else
4397 lhs = ops[0].op;
4398 rhs = ops[1].op;
4401 return simplify_const_binary_operation (code, mode, lhs, rhs);
4404 /* Now simplify each pair of operands until nothing changes. */
4405 while (1)
4407 /* Insertion sort is good enough for a small array. */
4408 for (i = 1; i < n_ops; i++)
4410 struct simplify_plus_minus_op_data save;
4411 int cmp;
4413 j = i - 1;
4414 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4415 if (cmp <= 0)
4416 continue;
4417 /* Just swapping registers doesn't count as canonicalization. */
4418 if (cmp != 1)
4419 canonicalized = 1;
4421 save = ops[i];
4423 ops[j + 1] = ops[j];
4424 while (j--
4425 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4426 ops[j + 1] = save;
4429 changed = 0;
4430 for (i = n_ops - 1; i > 0; i--)
4431 for (j = i - 1; j >= 0; j--)
4433 rtx lhs = ops[j].op, rhs = ops[i].op;
4434 int lneg = ops[j].neg, rneg = ops[i].neg;
4436 if (lhs != 0 && rhs != 0)
4438 enum rtx_code ncode = PLUS;
4440 if (lneg != rneg)
4442 ncode = MINUS;
4443 if (lneg)
4444 std::swap (lhs, rhs);
4446 else if (swap_commutative_operands_p (lhs, rhs))
4447 std::swap (lhs, rhs);
4449 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4450 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4452 rtx tem_lhs, tem_rhs;
4454 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4455 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4456 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4457 tem_rhs);
4459 if (tem && !CONSTANT_P (tem))
4460 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4462 else
4463 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4465 if (tem)
4467 /* Reject "simplifications" that just wrap the two
4468 arguments in a CONST. Failure to do so can result
4469 in infinite recursion with simplify_binary_operation
4470 when it calls us to simplify CONST operations.
4471 Also, if we find such a simplification, don't try
4472 any more combinations with this rhs: We must have
4473 something like symbol+offset, ie. one of the
4474 trivial CONST expressions we handle later. */
4475 if (GET_CODE (tem) == CONST
4476 && GET_CODE (XEXP (tem, 0)) == ncode
4477 && XEXP (XEXP (tem, 0), 0) == lhs
4478 && XEXP (XEXP (tem, 0), 1) == rhs)
4479 break;
4480 lneg &= rneg;
4481 if (GET_CODE (tem) == NEG)
4482 tem = XEXP (tem, 0), lneg = !lneg;
4483 if (CONST_INT_P (tem) && lneg)
4484 tem = neg_const_int (mode, tem), lneg = 0;
4486 ops[i].op = tem;
4487 ops[i].neg = lneg;
4488 ops[j].op = NULL_RTX;
4489 changed = 1;
4490 canonicalized = 1;
4495 if (!changed)
4496 break;
4498 /* Pack all the operands to the lower-numbered entries. */
4499 for (i = 0, j = 0; j < n_ops; j++)
4500 if (ops[j].op)
4502 ops[i] = ops[j];
4503 i++;
4505 n_ops = i;
4508 /* If nothing changed, check that rematerialization of rtl instructions
4509 is still required. */
4510 if (!canonicalized)
4512 /* Perform rematerialization if only all operands are registers and
4513 all operations are PLUS. */
4514 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4515 around rs6000 and how it uses the CA register. See PR67145. */
4516 for (i = 0; i < n_ops; i++)
4517 if (ops[i].neg
4518 || !REG_P (ops[i].op)
4519 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4520 && fixed_regs[REGNO (ops[i].op)]
4521 && !global_regs[REGNO (ops[i].op)]
4522 && ops[i].op != frame_pointer_rtx
4523 && ops[i].op != arg_pointer_rtx
4524 && ops[i].op != stack_pointer_rtx))
4525 return NULL_RTX;
4526 goto gen_result;
4529 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4530 if (n_ops == 2
4531 && CONST_INT_P (ops[1].op)
4532 && CONSTANT_P (ops[0].op)
4533 && ops[0].neg)
4534 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4536 /* We suppressed creation of trivial CONST expressions in the
4537 combination loop to avoid recursion. Create one manually now.
4538 The combination loop should have ensured that there is exactly
4539 one CONST_INT, and the sort will have ensured that it is last
4540 in the array and that any other constant will be next-to-last. */
4542 if (n_ops > 1
4543 && CONST_INT_P (ops[n_ops - 1].op)
4544 && CONSTANT_P (ops[n_ops - 2].op))
4546 rtx value = ops[n_ops - 1].op;
4547 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4548 value = neg_const_int (mode, value);
4549 if (CONST_INT_P (value))
4551 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4552 INTVAL (value));
4553 n_ops--;
4557 /* Put a non-negated operand first, if possible. */
4559 for (i = 0; i < n_ops && ops[i].neg; i++)
4560 continue;
4561 if (i == n_ops)
4562 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4563 else if (i != 0)
4565 tem = ops[0].op;
4566 ops[0] = ops[i];
4567 ops[i].op = tem;
4568 ops[i].neg = 1;
4571 /* Now make the result by performing the requested operations. */
4572 gen_result:
4573 result = ops[0].op;
4574 for (i = 1; i < n_ops; i++)
4575 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4576 mode, result, ops[i].op);
4578 return result;
4581 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4582 static bool
4583 plus_minus_operand_p (const_rtx x)
4585 return GET_CODE (x) == PLUS
4586 || GET_CODE (x) == MINUS
4587 || (GET_CODE (x) == CONST
4588 && GET_CODE (XEXP (x, 0)) == PLUS
4589 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4590 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4593 /* Like simplify_binary_operation except used for relational operators.
4594 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4595 not also be VOIDmode.
4597 CMP_MODE specifies in which mode the comparison is done in, so it is
4598 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4599 the operands or, if both are VOIDmode, the operands are compared in
4600 "infinite precision". */
4602 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4603 machine_mode cmp_mode, rtx op0, rtx op1)
4605 rtx tem, trueop0, trueop1;
4607 if (cmp_mode == VOIDmode)
4608 cmp_mode = GET_MODE (op0);
4609 if (cmp_mode == VOIDmode)
4610 cmp_mode = GET_MODE (op1);
4612 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4613 if (tem)
4615 if (SCALAR_FLOAT_MODE_P (mode))
4617 if (tem == const0_rtx)
4618 return CONST0_RTX (mode);
4619 #ifdef FLOAT_STORE_FLAG_VALUE
4621 REAL_VALUE_TYPE val;
4622 val = FLOAT_STORE_FLAG_VALUE (mode);
4623 return const_double_from_real_value (val, mode);
4625 #else
4626 return NULL_RTX;
4627 #endif
4629 if (VECTOR_MODE_P (mode))
4631 if (tem == const0_rtx)
4632 return CONST0_RTX (mode);
4633 #ifdef VECTOR_STORE_FLAG_VALUE
4635 int i, units;
4636 rtvec v;
4638 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4639 if (val == NULL_RTX)
4640 return NULL_RTX;
4641 if (val == const1_rtx)
4642 return CONST1_RTX (mode);
4644 units = GET_MODE_NUNITS (mode);
4645 v = rtvec_alloc (units);
4646 for (i = 0; i < units; i++)
4647 RTVEC_ELT (v, i) = val;
4648 return gen_rtx_raw_CONST_VECTOR (mode, v);
4650 #else
4651 return NULL_RTX;
4652 #endif
4655 return tem;
4658 /* For the following tests, ensure const0_rtx is op1. */
4659 if (swap_commutative_operands_p (op0, op1)
4660 || (op0 == const0_rtx && op1 != const0_rtx))
4661 std::swap (op0, op1), code = swap_condition (code);
4663 /* If op0 is a compare, extract the comparison arguments from it. */
4664 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4665 return simplify_gen_relational (code, mode, VOIDmode,
4666 XEXP (op0, 0), XEXP (op0, 1));
4668 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4669 || CC0_P (op0))
4670 return NULL_RTX;
4672 trueop0 = avoid_constant_pool_reference (op0);
4673 trueop1 = avoid_constant_pool_reference (op1);
4674 return simplify_relational_operation_1 (code, mode, cmp_mode,
4675 trueop0, trueop1);
4678 /* This part of simplify_relational_operation is only used when CMP_MODE
4679 is not in class MODE_CC (i.e. it is a real comparison).
4681 MODE is the mode of the result, while CMP_MODE specifies in which
4682 mode the comparison is done in, so it is the mode of the operands. */
4684 static rtx
4685 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4686 machine_mode cmp_mode, rtx op0, rtx op1)
4688 enum rtx_code op0code = GET_CODE (op0);
4690 if (op1 == const0_rtx && COMPARISON_P (op0))
4692 /* If op0 is a comparison, extract the comparison arguments
4693 from it. */
4694 if (code == NE)
4696 if (GET_MODE (op0) == mode)
4697 return simplify_rtx (op0);
4698 else
4699 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4700 XEXP (op0, 0), XEXP (op0, 1));
4702 else if (code == EQ)
4704 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4705 if (new_code != UNKNOWN)
4706 return simplify_gen_relational (new_code, mode, VOIDmode,
4707 XEXP (op0, 0), XEXP (op0, 1));
4711 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4712 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4713 if ((code == LTU || code == GEU)
4714 && GET_CODE (op0) == PLUS
4715 && CONST_INT_P (XEXP (op0, 1))
4716 && (rtx_equal_p (op1, XEXP (op0, 0))
4717 || rtx_equal_p (op1, XEXP (op0, 1)))
4718 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4719 && XEXP (op0, 1) != const0_rtx)
4721 rtx new_cmp
4722 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4723 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4724 cmp_mode, XEXP (op0, 0), new_cmp);
4727 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4728 transformed into (LTU a -C). */
4729 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4730 && CONST_INT_P (XEXP (op0, 1))
4731 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4732 && XEXP (op0, 1) != const0_rtx)
4734 rtx new_cmp
4735 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4736 return simplify_gen_relational (LTU, mode, cmp_mode,
4737 XEXP (op0, 0), new_cmp);
4740 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4741 if ((code == LTU || code == GEU)
4742 && GET_CODE (op0) == PLUS
4743 && rtx_equal_p (op1, XEXP (op0, 1))
4744 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4745 && !rtx_equal_p (op1, XEXP (op0, 0)))
4746 return simplify_gen_relational (code, mode, cmp_mode, op0,
4747 copy_rtx (XEXP (op0, 0)));
4749 if (op1 == const0_rtx)
4751 /* Canonicalize (GTU x 0) as (NE x 0). */
4752 if (code == GTU)
4753 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4754 /* Canonicalize (LEU x 0) as (EQ x 0). */
4755 if (code == LEU)
4756 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4758 else if (op1 == const1_rtx)
4760 switch (code)
4762 case GE:
4763 /* Canonicalize (GE x 1) as (GT x 0). */
4764 return simplify_gen_relational (GT, mode, cmp_mode,
4765 op0, const0_rtx);
4766 case GEU:
4767 /* Canonicalize (GEU x 1) as (NE x 0). */
4768 return simplify_gen_relational (NE, mode, cmp_mode,
4769 op0, const0_rtx);
4770 case LT:
4771 /* Canonicalize (LT x 1) as (LE x 0). */
4772 return simplify_gen_relational (LE, mode, cmp_mode,
4773 op0, const0_rtx);
4774 case LTU:
4775 /* Canonicalize (LTU x 1) as (EQ x 0). */
4776 return simplify_gen_relational (EQ, mode, cmp_mode,
4777 op0, const0_rtx);
4778 default:
4779 break;
4782 else if (op1 == constm1_rtx)
4784 /* Canonicalize (LE x -1) as (LT x 0). */
4785 if (code == LE)
4786 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4787 /* Canonicalize (GT x -1) as (GE x 0). */
4788 if (code == GT)
4789 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4792 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4793 if ((code == EQ || code == NE)
4794 && (op0code == PLUS || op0code == MINUS)
4795 && CONSTANT_P (op1)
4796 && CONSTANT_P (XEXP (op0, 1))
4797 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4799 rtx x = XEXP (op0, 0);
4800 rtx c = XEXP (op0, 1);
4801 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4802 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4804 /* Detect an infinite recursive condition, where we oscillate at this
4805 simplification case between:
4806 A + B == C <---> C - B == A,
4807 where A, B, and C are all constants with non-simplifiable expressions,
4808 usually SYMBOL_REFs. */
4809 if (GET_CODE (tem) == invcode
4810 && CONSTANT_P (x)
4811 && rtx_equal_p (c, XEXP (tem, 1)))
4812 return NULL_RTX;
4814 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4817 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4818 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4819 scalar_int_mode int_mode, int_cmp_mode;
4820 if (code == NE
4821 && op1 == const0_rtx
4822 && is_int_mode (mode, &int_mode)
4823 && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
4824 /* ??? Work-around BImode bugs in the ia64 backend. */
4825 && int_mode != BImode
4826 && int_cmp_mode != BImode
4827 && nonzero_bits (op0, int_cmp_mode) == 1
4828 && STORE_FLAG_VALUE == 1)
4829 return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
4830 ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
4831 : lowpart_subreg (int_mode, op0, int_cmp_mode);
4833 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4834 if ((code == EQ || code == NE)
4835 && op1 == const0_rtx
4836 && op0code == XOR)
4837 return simplify_gen_relational (code, mode, cmp_mode,
4838 XEXP (op0, 0), XEXP (op0, 1));
4840 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4841 if ((code == EQ || code == NE)
4842 && op0code == XOR
4843 && rtx_equal_p (XEXP (op0, 0), op1)
4844 && !side_effects_p (XEXP (op0, 0)))
4845 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4846 CONST0_RTX (mode));
4848 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4849 if ((code == EQ || code == NE)
4850 && op0code == XOR
4851 && rtx_equal_p (XEXP (op0, 1), op1)
4852 && !side_effects_p (XEXP (op0, 1)))
4853 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4854 CONST0_RTX (mode));
4856 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4857 if ((code == EQ || code == NE)
4858 && op0code == XOR
4859 && CONST_SCALAR_INT_P (op1)
4860 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4861 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4862 simplify_gen_binary (XOR, cmp_mode,
4863 XEXP (op0, 1), op1));
4865 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4866 can be implemented with a BICS instruction on some targets, or
4867 constant-folded if y is a constant. */
4868 if ((code == EQ || code == NE)
4869 && op0code == AND
4870 && rtx_equal_p (XEXP (op0, 0), op1)
4871 && !side_effects_p (op1)
4872 && op1 != CONST0_RTX (cmp_mode))
4874 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4875 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4877 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4878 CONST0_RTX (cmp_mode));
4881 /* Likewise for (eq/ne (and x y) y). */
4882 if ((code == EQ || code == NE)
4883 && op0code == AND
4884 && rtx_equal_p (XEXP (op0, 1), op1)
4885 && !side_effects_p (op1)
4886 && op1 != CONST0_RTX (cmp_mode))
4888 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4889 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4891 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4892 CONST0_RTX (cmp_mode));
4895 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4896 if ((code == EQ || code == NE)
4897 && GET_CODE (op0) == BSWAP
4898 && CONST_SCALAR_INT_P (op1))
4899 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4900 simplify_gen_unary (BSWAP, cmp_mode,
4901 op1, cmp_mode));
4903 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4904 if ((code == EQ || code == NE)
4905 && GET_CODE (op0) == BSWAP
4906 && GET_CODE (op1) == BSWAP)
4907 return simplify_gen_relational (code, mode, cmp_mode,
4908 XEXP (op0, 0), XEXP (op1, 0));
4910 if (op0code == POPCOUNT && op1 == const0_rtx)
4911 switch (code)
4913 case EQ:
4914 case LE:
4915 case LEU:
4916 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4917 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4918 XEXP (op0, 0), const0_rtx);
4920 case NE:
4921 case GT:
4922 case GTU:
4923 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4924 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4925 XEXP (op0, 0), const0_rtx);
4927 default:
4928 break;
4931 return NULL_RTX;
4934 enum
4936 CMP_EQ = 1,
4937 CMP_LT = 2,
4938 CMP_GT = 4,
4939 CMP_LTU = 8,
4940 CMP_GTU = 16
4944 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4945 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4946 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4947 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4948 For floating-point comparisons, assume that the operands were ordered. */
4950 static rtx
4951 comparison_result (enum rtx_code code, int known_results)
4953 switch (code)
4955 case EQ:
4956 case UNEQ:
4957 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4958 case NE:
4959 case LTGT:
4960 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4962 case LT:
4963 case UNLT:
4964 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4965 case GE:
4966 case UNGE:
4967 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4969 case GT:
4970 case UNGT:
4971 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4972 case LE:
4973 case UNLE:
4974 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4976 case LTU:
4977 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4978 case GEU:
4979 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4981 case GTU:
4982 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4983 case LEU:
4984 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4986 case ORDERED:
4987 return const_true_rtx;
4988 case UNORDERED:
4989 return const0_rtx;
4990 default:
4991 gcc_unreachable ();
4995 /* Check if the given comparison (done in the given MODE) is actually
4996 a tautology or a contradiction. If the mode is VOID_mode, the
4997 comparison is done in "infinite precision". If no simplification
4998 is possible, this function returns zero. Otherwise, it returns
4999 either const_true_rtx or const0_rtx. */
5002 simplify_const_relational_operation (enum rtx_code code,
5003 machine_mode mode,
5004 rtx op0, rtx op1)
5006 rtx tem;
5007 rtx trueop0;
5008 rtx trueop1;
5010 gcc_assert (mode != VOIDmode
5011 || (GET_MODE (op0) == VOIDmode
5012 && GET_MODE (op1) == VOIDmode));
5014 /* If op0 is a compare, extract the comparison arguments from it. */
5015 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5017 op1 = XEXP (op0, 1);
5018 op0 = XEXP (op0, 0);
5020 if (GET_MODE (op0) != VOIDmode)
5021 mode = GET_MODE (op0);
5022 else if (GET_MODE (op1) != VOIDmode)
5023 mode = GET_MODE (op1);
5024 else
5025 return 0;
5028 /* We can't simplify MODE_CC values since we don't know what the
5029 actual comparison is. */
5030 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5031 return 0;
5033 /* Make sure the constant is second. */
5034 if (swap_commutative_operands_p (op0, op1))
5036 std::swap (op0, op1);
5037 code = swap_condition (code);
5040 trueop0 = avoid_constant_pool_reference (op0);
5041 trueop1 = avoid_constant_pool_reference (op1);
5043 /* For integer comparisons of A and B maybe we can simplify A - B and can
5044 then simplify a comparison of that with zero. If A and B are both either
5045 a register or a CONST_INT, this can't help; testing for these cases will
5046 prevent infinite recursion here and speed things up.
5048 We can only do this for EQ and NE comparisons as otherwise we may
5049 lose or introduce overflow which we cannot disregard as undefined as
5050 we do not know the signedness of the operation on either the left or
5051 the right hand side of the comparison. */
5053 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5054 && (code == EQ || code == NE)
5055 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5056 && (REG_P (op1) || CONST_INT_P (trueop1)))
5057 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
5058 /* We cannot do this if tem is a nonzero address. */
5059 && ! nonzero_address_p (tem))
5060 return simplify_const_relational_operation (signed_condition (code),
5061 mode, tem, const0_rtx);
5063 if (! HONOR_NANS (mode) && code == ORDERED)
5064 return const_true_rtx;
5066 if (! HONOR_NANS (mode) && code == UNORDERED)
5067 return const0_rtx;
5069 /* For modes without NaNs, if the two operands are equal, we know the
5070 result except if they have side-effects. Even with NaNs we know
5071 the result of unordered comparisons and, if signaling NaNs are
5072 irrelevant, also the result of LT/GT/LTGT. */
5073 if ((! HONOR_NANS (trueop0)
5074 || code == UNEQ || code == UNLE || code == UNGE
5075 || ((code == LT || code == GT || code == LTGT)
5076 && ! HONOR_SNANS (trueop0)))
5077 && rtx_equal_p (trueop0, trueop1)
5078 && ! side_effects_p (trueop0))
5079 return comparison_result (code, CMP_EQ);
5081 /* If the operands are floating-point constants, see if we can fold
5082 the result. */
5083 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5084 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5085 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5087 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5088 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5090 /* Comparisons are unordered iff at least one of the values is NaN. */
5091 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5092 switch (code)
5094 case UNEQ:
5095 case UNLT:
5096 case UNGT:
5097 case UNLE:
5098 case UNGE:
5099 case NE:
5100 case UNORDERED:
5101 return const_true_rtx;
5102 case EQ:
5103 case LT:
5104 case GT:
5105 case LE:
5106 case GE:
5107 case LTGT:
5108 case ORDERED:
5109 return const0_rtx;
5110 default:
5111 return 0;
5114 return comparison_result (code,
5115 (real_equal (d0, d1) ? CMP_EQ :
5116 real_less (d0, d1) ? CMP_LT : CMP_GT));
5119 /* Otherwise, see if the operands are both integers. */
5120 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5121 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5123 /* It would be nice if we really had a mode here. However, the
5124 largest int representable on the target is as good as
5125 infinite. */
5126 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5127 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5128 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5130 if (wi::eq_p (ptrueop0, ptrueop1))
5131 return comparison_result (code, CMP_EQ);
5132 else
5134 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5135 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5136 return comparison_result (code, cr);
5140 /* Optimize comparisons with upper and lower bounds. */
5141 scalar_int_mode int_mode;
5142 if (CONST_INT_P (trueop1)
5143 && is_a <scalar_int_mode> (mode, &int_mode)
5144 && HWI_COMPUTABLE_MODE_P (int_mode)
5145 && !side_effects_p (trueop0))
5147 int sign;
5148 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
5149 HOST_WIDE_INT val = INTVAL (trueop1);
5150 HOST_WIDE_INT mmin, mmax;
5152 if (code == GEU
5153 || code == LEU
5154 || code == GTU
5155 || code == LTU)
5156 sign = 0;
5157 else
5158 sign = 1;
5160 /* Get a reduced range if the sign bit is zero. */
5161 if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
5163 mmin = 0;
5164 mmax = nonzero;
5166 else
5168 rtx mmin_rtx, mmax_rtx;
5169 get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
5171 mmin = INTVAL (mmin_rtx);
5172 mmax = INTVAL (mmax_rtx);
5173 if (sign)
5175 unsigned int sign_copies
5176 = num_sign_bit_copies (trueop0, int_mode);
5178 mmin >>= (sign_copies - 1);
5179 mmax >>= (sign_copies - 1);
5183 switch (code)
5185 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5186 case GEU:
5187 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5188 return const_true_rtx;
5189 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5190 return const0_rtx;
5191 break;
5192 case GE:
5193 if (val <= mmin)
5194 return const_true_rtx;
5195 if (val > mmax)
5196 return const0_rtx;
5197 break;
5199 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5200 case LEU:
5201 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5202 return const_true_rtx;
5203 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5204 return const0_rtx;
5205 break;
5206 case LE:
5207 if (val >= mmax)
5208 return const_true_rtx;
5209 if (val < mmin)
5210 return const0_rtx;
5211 break;
5213 case EQ:
5214 /* x == y is always false for y out of range. */
5215 if (val < mmin || val > mmax)
5216 return const0_rtx;
5217 break;
5219 /* x > y is always false for y >= mmax, always true for y < mmin. */
5220 case GTU:
5221 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5222 return const0_rtx;
5223 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5224 return const_true_rtx;
5225 break;
5226 case GT:
5227 if (val >= mmax)
5228 return const0_rtx;
5229 if (val < mmin)
5230 return const_true_rtx;
5231 break;
5233 /* x < y is always false for y <= mmin, always true for y > mmax. */
5234 case LTU:
5235 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5236 return const0_rtx;
5237 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5238 return const_true_rtx;
5239 break;
5240 case LT:
5241 if (val <= mmin)
5242 return const0_rtx;
5243 if (val > mmax)
5244 return const_true_rtx;
5245 break;
5247 case NE:
5248 /* x != y is always true for y out of range. */
5249 if (val < mmin || val > mmax)
5250 return const_true_rtx;
5251 break;
5253 default:
5254 break;
5258 /* Optimize integer comparisons with zero. */
5259 if (is_a <scalar_int_mode> (mode, &int_mode)
5260 && trueop1 == const0_rtx
5261 && !side_effects_p (trueop0))
5263 /* Some addresses are known to be nonzero. We don't know
5264 their sign, but equality comparisons are known. */
5265 if (nonzero_address_p (trueop0))
5267 if (code == EQ || code == LEU)
5268 return const0_rtx;
5269 if (code == NE || code == GTU)
5270 return const_true_rtx;
5273 /* See if the first operand is an IOR with a constant. If so, we
5274 may be able to determine the result of this comparison. */
5275 if (GET_CODE (op0) == IOR)
5277 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5278 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5280 int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
5281 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5282 && (UINTVAL (inner_const)
5283 & (HOST_WIDE_INT_1U
5284 << sign_bitnum)));
5286 switch (code)
5288 case EQ:
5289 case LEU:
5290 return const0_rtx;
5291 case NE:
5292 case GTU:
5293 return const_true_rtx;
5294 case LT:
5295 case LE:
5296 if (has_sign)
5297 return const_true_rtx;
5298 break;
5299 case GT:
5300 case GE:
5301 if (has_sign)
5302 return const0_rtx;
5303 break;
5304 default:
5305 break;
5311 /* Optimize comparison of ABS with zero. */
5312 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5313 && (GET_CODE (trueop0) == ABS
5314 || (GET_CODE (trueop0) == FLOAT_EXTEND
5315 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5317 switch (code)
5319 case LT:
5320 /* Optimize abs(x) < 0.0. */
5321 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5322 return const0_rtx;
5323 break;
5325 case GE:
5326 /* Optimize abs(x) >= 0.0. */
5327 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5328 return const_true_rtx;
5329 break;
5331 case UNGE:
5332 /* Optimize ! (abs(x) < 0.0). */
5333 return const_true_rtx;
5335 default:
5336 break;
5340 return 0;
5343 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5344 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5345 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5346 can be simplified to that or NULL_RTX if not.
5347 Assume X is compared against zero with CMP_CODE and the true
5348 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5350 static rtx
5351 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5353 if (cmp_code != EQ && cmp_code != NE)
5354 return NULL_RTX;
5356 /* Result on X == 0 and X !=0 respectively. */
5357 rtx on_zero, on_nonzero;
5358 if (cmp_code == EQ)
5360 on_zero = true_val;
5361 on_nonzero = false_val;
5363 else
5365 on_zero = false_val;
5366 on_nonzero = true_val;
5369 rtx_code op_code = GET_CODE (on_nonzero);
5370 if ((op_code != CLZ && op_code != CTZ)
5371 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5372 || !CONST_INT_P (on_zero))
5373 return NULL_RTX;
5375 HOST_WIDE_INT op_val;
5376 scalar_int_mode mode ATTRIBUTE_UNUSED
5377 = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
5378 if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
5379 || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
5380 && op_val == INTVAL (on_zero))
5381 return on_nonzero;
5383 return NULL_RTX;
5387 /* Simplify CODE, an operation with result mode MODE and three operands,
5388 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5389 a constant. Return 0 if no simplifications is possible. */
5392 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5393 machine_mode op0_mode, rtx op0, rtx op1,
5394 rtx op2)
5396 bool any_change = false;
5397 rtx tem, trueop2;
5398 scalar_int_mode int_mode, int_op0_mode;
5400 switch (code)
5402 case FMA:
5403 /* Simplify negations around the multiplication. */
5404 /* -a * -b + c => a * b + c. */
5405 if (GET_CODE (op0) == NEG)
5407 tem = simplify_unary_operation (NEG, mode, op1, mode);
5408 if (tem)
5409 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5411 else if (GET_CODE (op1) == NEG)
5413 tem = simplify_unary_operation (NEG, mode, op0, mode);
5414 if (tem)
5415 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5418 /* Canonicalize the two multiplication operands. */
5419 /* a * -b + c => -b * a + c. */
5420 if (swap_commutative_operands_p (op0, op1))
5421 std::swap (op0, op1), any_change = true;
5423 if (any_change)
5424 return gen_rtx_FMA (mode, op0, op1, op2);
5425 return NULL_RTX;
5427 case SIGN_EXTRACT:
5428 case ZERO_EXTRACT:
5429 if (CONST_INT_P (op0)
5430 && CONST_INT_P (op1)
5431 && CONST_INT_P (op2)
5432 && is_a <scalar_int_mode> (mode, &int_mode)
5433 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
5434 && HWI_COMPUTABLE_MODE_P (int_mode))
5436 /* Extracting a bit-field from a constant */
5437 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5438 HOST_WIDE_INT op1val = INTVAL (op1);
5439 HOST_WIDE_INT op2val = INTVAL (op2);
5440 if (!BITS_BIG_ENDIAN)
5441 val >>= op2val;
5442 else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
5443 val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
5444 else
5445 /* Not enough information to calculate the bit position. */
5446 break;
5448 if (HOST_BITS_PER_WIDE_INT != op1val)
5450 /* First zero-extend. */
5451 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5452 /* If desired, propagate sign bit. */
5453 if (code == SIGN_EXTRACT
5454 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5455 != 0)
5456 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5459 return gen_int_mode (val, int_mode);
5461 break;
5463 case IF_THEN_ELSE:
5464 if (CONST_INT_P (op0))
5465 return op0 != const0_rtx ? op1 : op2;
5467 /* Convert c ? a : a into "a". */
5468 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5469 return op1;
5471 /* Convert a != b ? a : b into "a". */
5472 if (GET_CODE (op0) == NE
5473 && ! side_effects_p (op0)
5474 && ! HONOR_NANS (mode)
5475 && ! HONOR_SIGNED_ZEROS (mode)
5476 && ((rtx_equal_p (XEXP (op0, 0), op1)
5477 && rtx_equal_p (XEXP (op0, 1), op2))
5478 || (rtx_equal_p (XEXP (op0, 0), op2)
5479 && rtx_equal_p (XEXP (op0, 1), op1))))
5480 return op1;
5482 /* Convert a == b ? a : b into "b". */
5483 if (GET_CODE (op0) == EQ
5484 && ! side_effects_p (op0)
5485 && ! HONOR_NANS (mode)
5486 && ! HONOR_SIGNED_ZEROS (mode)
5487 && ((rtx_equal_p (XEXP (op0, 0), op1)
5488 && rtx_equal_p (XEXP (op0, 1), op2))
5489 || (rtx_equal_p (XEXP (op0, 0), op2)
5490 && rtx_equal_p (XEXP (op0, 1), op1))))
5491 return op2;
5493 /* Convert (!c) != {0,...,0} ? a : b into
5494 c != {0,...,0} ? b : a for vector modes. */
5495 if (VECTOR_MODE_P (GET_MODE (op1))
5496 && GET_CODE (op0) == NE
5497 && GET_CODE (XEXP (op0, 0)) == NOT
5498 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5500 rtx cv = XEXP (op0, 1);
5501 int nunits = CONST_VECTOR_NUNITS (cv);
5502 bool ok = true;
5503 for (int i = 0; i < nunits; ++i)
5504 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5506 ok = false;
5507 break;
5509 if (ok)
5511 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5512 XEXP (XEXP (op0, 0), 0),
5513 XEXP (op0, 1));
5514 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5515 return retval;
5519 /* Convert x == 0 ? N : clz (x) into clz (x) when
5520 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5521 Similarly for ctz (x). */
5522 if (COMPARISON_P (op0) && !side_effects_p (op0)
5523 && XEXP (op0, 1) == const0_rtx)
5525 rtx simplified
5526 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5527 op1, op2);
5528 if (simplified)
5529 return simplified;
5532 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5534 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5535 ? GET_MODE (XEXP (op0, 1))
5536 : GET_MODE (XEXP (op0, 0)));
5537 rtx temp;
5539 /* Look for happy constants in op1 and op2. */
5540 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5542 HOST_WIDE_INT t = INTVAL (op1);
5543 HOST_WIDE_INT f = INTVAL (op2);
5545 if (t == STORE_FLAG_VALUE && f == 0)
5546 code = GET_CODE (op0);
5547 else if (t == 0 && f == STORE_FLAG_VALUE)
5549 enum rtx_code tmp;
5550 tmp = reversed_comparison_code (op0, NULL);
5551 if (tmp == UNKNOWN)
5552 break;
5553 code = tmp;
5555 else
5556 break;
5558 return simplify_gen_relational (code, mode, cmp_mode,
5559 XEXP (op0, 0), XEXP (op0, 1));
5562 if (cmp_mode == VOIDmode)
5563 cmp_mode = op0_mode;
5564 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5565 cmp_mode, XEXP (op0, 0),
5566 XEXP (op0, 1));
5568 /* See if any simplifications were possible. */
5569 if (temp)
5571 if (CONST_INT_P (temp))
5572 return temp == const0_rtx ? op2 : op1;
5573 else if (temp)
5574 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5577 break;
5579 case VEC_MERGE:
5580 gcc_assert (GET_MODE (op0) == mode);
5581 gcc_assert (GET_MODE (op1) == mode);
5582 gcc_assert (VECTOR_MODE_P (mode));
5583 trueop2 = avoid_constant_pool_reference (op2);
5584 if (CONST_INT_P (trueop2))
5586 int elt_size = GET_MODE_UNIT_SIZE (mode);
5587 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5588 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5589 unsigned HOST_WIDE_INT mask;
5590 if (n_elts == HOST_BITS_PER_WIDE_INT)
5591 mask = -1;
5592 else
5593 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5595 if (!(sel & mask) && !side_effects_p (op0))
5596 return op1;
5597 if ((sel & mask) == mask && !side_effects_p (op1))
5598 return op0;
5600 rtx trueop0 = avoid_constant_pool_reference (op0);
5601 rtx trueop1 = avoid_constant_pool_reference (op1);
5602 if (GET_CODE (trueop0) == CONST_VECTOR
5603 && GET_CODE (trueop1) == CONST_VECTOR)
5605 rtvec v = rtvec_alloc (n_elts);
5606 unsigned int i;
5608 for (i = 0; i < n_elts; i++)
5609 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5610 ? CONST_VECTOR_ELT (trueop0, i)
5611 : CONST_VECTOR_ELT (trueop1, i));
5612 return gen_rtx_CONST_VECTOR (mode, v);
5615 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5616 if no element from a appears in the result. */
5617 if (GET_CODE (op0) == VEC_MERGE)
5619 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5620 if (CONST_INT_P (tem))
5622 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5623 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5624 return simplify_gen_ternary (code, mode, mode,
5625 XEXP (op0, 1), op1, op2);
5626 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5627 return simplify_gen_ternary (code, mode, mode,
5628 XEXP (op0, 0), op1, op2);
5631 if (GET_CODE (op1) == VEC_MERGE)
5633 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5634 if (CONST_INT_P (tem))
5636 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5637 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5638 return simplify_gen_ternary (code, mode, mode,
5639 op0, XEXP (op1, 1), op2);
5640 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5641 return simplify_gen_ternary (code, mode, mode,
5642 op0, XEXP (op1, 0), op2);
5646 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5647 with a. */
5648 if (GET_CODE (op0) == VEC_DUPLICATE
5649 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5650 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5651 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5653 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5654 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5656 if (XEXP (XEXP (op0, 0), 0) == op1
5657 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5658 return op1;
5663 if (rtx_equal_p (op0, op1)
5664 && !side_effects_p (op2) && !side_effects_p (op1))
5665 return op0;
5667 break;
5669 default:
5670 gcc_unreachable ();
5673 return 0;
5676 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5677 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5678 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5680 Works by unpacking OP into a collection of 8-bit values
5681 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5682 and then repacking them again for OUTERMODE. */
5684 static rtx
5685 simplify_immed_subreg (machine_mode outermode, rtx op,
5686 machine_mode innermode, unsigned int byte)
5688 enum {
5689 value_bit = 8,
5690 value_mask = (1 << value_bit) - 1
5692 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5693 int value_start;
5694 int i;
5695 int elem;
5697 int num_elem;
5698 rtx * elems;
5699 int elem_bitsize;
5700 rtx result_s = NULL;
5701 rtvec result_v = NULL;
5702 enum mode_class outer_class;
5703 scalar_mode outer_submode;
5704 int max_bitsize;
5706 /* Some ports misuse CCmode. */
5707 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5708 return op;
5710 /* We have no way to represent a complex constant at the rtl level. */
5711 if (COMPLEX_MODE_P (outermode))
5712 return NULL_RTX;
5714 /* We support any size mode. */
5715 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5716 GET_MODE_BITSIZE (innermode));
5718 /* Unpack the value. */
5720 if (GET_CODE (op) == CONST_VECTOR)
5722 num_elem = CONST_VECTOR_NUNITS (op);
5723 elems = &CONST_VECTOR_ELT (op, 0);
5724 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5726 else
5728 num_elem = 1;
5729 elems = &op;
5730 elem_bitsize = max_bitsize;
5732 /* If this asserts, it is too complicated; reducing value_bit may help. */
5733 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5734 /* I don't know how to handle endianness of sub-units. */
5735 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5737 for (elem = 0; elem < num_elem; elem++)
5739 unsigned char * vp;
5740 rtx el = elems[elem];
5742 /* Vectors are kept in target memory order. (This is probably
5743 a mistake.) */
5745 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5746 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5747 / BITS_PER_UNIT);
5748 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5749 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5750 unsigned bytele = (subword_byte % UNITS_PER_WORD
5751 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5752 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5755 switch (GET_CODE (el))
5757 case CONST_INT:
5758 for (i = 0;
5759 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5760 i += value_bit)
5761 *vp++ = INTVAL (el) >> i;
5762 /* CONST_INTs are always logically sign-extended. */
5763 for (; i < elem_bitsize; i += value_bit)
5764 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5765 break;
5767 case CONST_WIDE_INT:
5769 rtx_mode_t val = rtx_mode_t (el, GET_MODE_INNER (innermode));
5770 unsigned char extend = wi::sign_mask (val);
5771 int prec = wi::get_precision (val);
5773 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
5774 *vp++ = wi::extract_uhwi (val, i, value_bit);
5775 for (; i < elem_bitsize; i += value_bit)
5776 *vp++ = extend;
5778 break;
5780 case CONST_DOUBLE:
5781 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5783 unsigned char extend = 0;
5784 /* If this triggers, someone should have generated a
5785 CONST_INT instead. */
5786 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5788 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5789 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5790 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5792 *vp++
5793 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5794 i += value_bit;
5797 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5798 extend = -1;
5799 for (; i < elem_bitsize; i += value_bit)
5800 *vp++ = extend;
5802 else
5804 /* This is big enough for anything on the platform. */
5805 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5806 scalar_float_mode el_mode;
5808 el_mode = as_a <scalar_float_mode> (GET_MODE (el));
5809 int bitsize = GET_MODE_BITSIZE (el_mode);
5811 gcc_assert (bitsize <= elem_bitsize);
5812 gcc_assert (bitsize % value_bit == 0);
5814 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5815 GET_MODE (el));
5817 /* real_to_target produces its result in words affected by
5818 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5819 and use WORDS_BIG_ENDIAN instead; see the documentation
5820 of SUBREG in rtl.texi. */
5821 for (i = 0; i < bitsize; i += value_bit)
5823 int ibase;
5824 if (WORDS_BIG_ENDIAN)
5825 ibase = bitsize - 1 - i;
5826 else
5827 ibase = i;
5828 *vp++ = tmp[ibase / 32] >> i % 32;
5831 /* It shouldn't matter what's done here, so fill it with
5832 zero. */
5833 for (; i < elem_bitsize; i += value_bit)
5834 *vp++ = 0;
5836 break;
5838 case CONST_FIXED:
5839 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5841 for (i = 0; i < elem_bitsize; i += value_bit)
5842 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5844 else
5846 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5847 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5848 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5849 i += value_bit)
5850 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5851 >> (i - HOST_BITS_PER_WIDE_INT);
5852 for (; i < elem_bitsize; i += value_bit)
5853 *vp++ = 0;
5855 break;
5857 default:
5858 gcc_unreachable ();
5862 /* Now, pick the right byte to start with. */
5863 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5864 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5865 will already have offset 0. */
5866 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5868 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5869 - byte);
5870 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5871 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5872 byte = (subword_byte % UNITS_PER_WORD
5873 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5876 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5877 so if it's become negative it will instead be very large.) */
5878 gcc_assert (byte < GET_MODE_SIZE (innermode));
5880 /* Convert from bytes to chunks of size value_bit. */
5881 value_start = byte * (BITS_PER_UNIT / value_bit);
5883 /* Re-pack the value. */
5884 num_elem = GET_MODE_NUNITS (outermode);
5886 if (VECTOR_MODE_P (outermode))
5888 result_v = rtvec_alloc (num_elem);
5889 elems = &RTVEC_ELT (result_v, 0);
5891 else
5892 elems = &result_s;
5894 outer_submode = GET_MODE_INNER (outermode);
5895 outer_class = GET_MODE_CLASS (outer_submode);
5896 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5898 gcc_assert (elem_bitsize % value_bit == 0);
5899 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5901 for (elem = 0; elem < num_elem; elem++)
5903 unsigned char *vp;
5905 /* Vectors are stored in target memory order. (This is probably
5906 a mistake.) */
5908 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5909 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5910 / BITS_PER_UNIT);
5911 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5912 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5913 unsigned bytele = (subword_byte % UNITS_PER_WORD
5914 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5915 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5918 switch (outer_class)
5920 case MODE_INT:
5921 case MODE_PARTIAL_INT:
5923 int u;
5924 int base = 0;
5925 int units
5926 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5927 / HOST_BITS_PER_WIDE_INT;
5928 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5929 wide_int r;
5931 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5932 return NULL_RTX;
5933 for (u = 0; u < units; u++)
5935 unsigned HOST_WIDE_INT buf = 0;
5936 for (i = 0;
5937 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5938 i += value_bit)
5939 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5941 tmp[u] = buf;
5942 base += HOST_BITS_PER_WIDE_INT;
5944 r = wide_int::from_array (tmp, units,
5945 GET_MODE_PRECISION (outer_submode));
5946 #if TARGET_SUPPORTS_WIDE_INT == 0
5947 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5948 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5949 return NULL_RTX;
5950 #endif
5951 elems[elem] = immed_wide_int_const (r, outer_submode);
5953 break;
5955 case MODE_FLOAT:
5956 case MODE_DECIMAL_FLOAT:
5958 REAL_VALUE_TYPE r;
5959 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
5961 /* real_from_target wants its input in words affected by
5962 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5963 and use WORDS_BIG_ENDIAN instead; see the documentation
5964 of SUBREG in rtl.texi. */
5965 for (i = 0; i < elem_bitsize; i += value_bit)
5967 int ibase;
5968 if (WORDS_BIG_ENDIAN)
5969 ibase = elem_bitsize - 1 - i;
5970 else
5971 ibase = i;
5972 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5975 real_from_target (&r, tmp, outer_submode);
5976 elems[elem] = const_double_from_real_value (r, outer_submode);
5978 break;
5980 case MODE_FRACT:
5981 case MODE_UFRACT:
5982 case MODE_ACCUM:
5983 case MODE_UACCUM:
5985 FIXED_VALUE_TYPE f;
5986 f.data.low = 0;
5987 f.data.high = 0;
5988 f.mode = outer_submode;
5990 for (i = 0;
5991 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5992 i += value_bit)
5993 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5994 for (; i < elem_bitsize; i += value_bit)
5995 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5996 << (i - HOST_BITS_PER_WIDE_INT));
5998 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
6000 break;
6002 default:
6003 gcc_unreachable ();
6006 if (VECTOR_MODE_P (outermode))
6007 return gen_rtx_CONST_VECTOR (outermode, result_v);
6008 else
6009 return result_s;
6012 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6013 Return 0 if no simplifications are possible. */
6015 simplify_subreg (machine_mode outermode, rtx op,
6016 machine_mode innermode, unsigned int byte)
6018 /* Little bit of sanity checking. */
6019 gcc_assert (innermode != VOIDmode);
6020 gcc_assert (outermode != VOIDmode);
6021 gcc_assert (innermode != BLKmode);
6022 gcc_assert (outermode != BLKmode);
6024 gcc_assert (GET_MODE (op) == innermode
6025 || GET_MODE (op) == VOIDmode);
6027 if ((byte % GET_MODE_SIZE (outermode)) != 0)
6028 return NULL_RTX;
6030 if (byte >= GET_MODE_SIZE (innermode))
6031 return NULL_RTX;
6033 if (outermode == innermode && !byte)
6034 return op;
6036 if (CONST_SCALAR_INT_P (op)
6037 || CONST_DOUBLE_AS_FLOAT_P (op)
6038 || GET_CODE (op) == CONST_FIXED
6039 || GET_CODE (op) == CONST_VECTOR)
6040 return simplify_immed_subreg (outermode, op, innermode, byte);
6042 /* Changing mode twice with SUBREG => just change it once,
6043 or not at all if changing back op starting mode. */
6044 if (GET_CODE (op) == SUBREG)
6046 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6047 rtx newx;
6049 if (outermode == innermostmode
6050 && byte == 0 && SUBREG_BYTE (op) == 0)
6051 return SUBREG_REG (op);
6053 /* Work out the memory offset of the final OUTERMODE value relative
6054 to the inner value of OP. */
6055 HOST_WIDE_INT mem_offset = subreg_memory_offset (outermode,
6056 innermode, byte);
6057 HOST_WIDE_INT op_mem_offset = subreg_memory_offset (op);
6058 HOST_WIDE_INT final_offset = mem_offset + op_mem_offset;
6060 /* See whether resulting subreg will be paradoxical. */
6061 if (!paradoxical_subreg_p (outermode, innermostmode))
6063 /* In nonparadoxical subregs we can't handle negative offsets. */
6064 if (final_offset < 0)
6065 return NULL_RTX;
6066 /* Bail out in case resulting subreg would be incorrect. */
6067 if (final_offset % GET_MODE_SIZE (outermode)
6068 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
6069 return NULL_RTX;
6071 else
6073 HOST_WIDE_INT required_offset
6074 = subreg_memory_offset (outermode, innermostmode, 0);
6075 if (final_offset != required_offset)
6076 return NULL_RTX;
6077 /* Paradoxical subregs always have byte offset 0. */
6078 final_offset = 0;
6081 /* Recurse for further possible simplifications. */
6082 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6083 final_offset);
6084 if (newx)
6085 return newx;
6086 if (validate_subreg (outermode, innermostmode,
6087 SUBREG_REG (op), final_offset))
6089 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6090 if (SUBREG_PROMOTED_VAR_P (op)
6091 && SUBREG_PROMOTED_SIGN (op) >= 0
6092 && GET_MODE_CLASS (outermode) == MODE_INT
6093 && IN_RANGE (GET_MODE_SIZE (outermode),
6094 GET_MODE_SIZE (innermode),
6095 GET_MODE_SIZE (innermostmode))
6096 && subreg_lowpart_p (newx))
6098 SUBREG_PROMOTED_VAR_P (newx) = 1;
6099 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6101 return newx;
6103 return NULL_RTX;
6106 /* SUBREG of a hard register => just change the register number
6107 and/or mode. If the hard register is not valid in that mode,
6108 suppress this simplification. If the hard register is the stack,
6109 frame, or argument pointer, leave this as a SUBREG. */
6111 if (REG_P (op) && HARD_REGISTER_P (op))
6113 unsigned int regno, final_regno;
6115 regno = REGNO (op);
6116 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6117 if (HARD_REGISTER_NUM_P (final_regno))
6119 rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
6120 subreg_memory_offset (outermode,
6121 innermode, byte));
6123 /* Propagate original regno. We don't have any way to specify
6124 the offset inside original regno, so do so only for lowpart.
6125 The information is used only by alias analysis that can not
6126 grog partial register anyway. */
6128 if (subreg_lowpart_offset (outermode, innermode) == byte)
6129 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6130 return x;
6134 /* If we have a SUBREG of a register that we are replacing and we are
6135 replacing it with a MEM, make a new MEM and try replacing the
6136 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6137 or if we would be widening it. */
6139 if (MEM_P (op)
6140 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6141 /* Allow splitting of volatile memory references in case we don't
6142 have instruction to move the whole thing. */
6143 && (! MEM_VOLATILE_P (op)
6144 || ! have_insn_for (SET, innermode))
6145 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6146 return adjust_address_nv (op, outermode, byte);
6148 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6149 of two parts. */
6150 if (GET_CODE (op) == CONCAT
6151 || GET_CODE (op) == VEC_CONCAT)
6153 unsigned int part_size, final_offset;
6154 rtx part, res;
6156 machine_mode part_mode = GET_MODE (XEXP (op, 0));
6157 if (part_mode == VOIDmode)
6158 part_mode = GET_MODE_INNER (GET_MODE (op));
6159 part_size = GET_MODE_SIZE (part_mode);
6160 if (byte < part_size)
6162 part = XEXP (op, 0);
6163 final_offset = byte;
6165 else
6167 part = XEXP (op, 1);
6168 final_offset = byte - part_size;
6171 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6172 return NULL_RTX;
6174 part_mode = GET_MODE (part);
6175 if (part_mode == VOIDmode)
6176 part_mode = GET_MODE_INNER (GET_MODE (op));
6177 res = simplify_subreg (outermode, part, part_mode, final_offset);
6178 if (res)
6179 return res;
6180 if (validate_subreg (outermode, part_mode, part, final_offset))
6181 return gen_rtx_SUBREG (outermode, part, final_offset);
6182 return NULL_RTX;
6185 /* A SUBREG resulting from a zero extension may fold to zero if
6186 it extracts higher bits that the ZERO_EXTEND's source bits. */
6187 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6189 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6190 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6191 return CONST0_RTX (outermode);
6194 scalar_int_mode int_outermode, int_innermode;
6195 if (is_a <scalar_int_mode> (outermode, &int_outermode)
6196 && is_a <scalar_int_mode> (innermode, &int_innermode)
6197 && (GET_MODE_PRECISION (int_outermode)
6198 < GET_MODE_PRECISION (int_innermode))
6199 && byte == subreg_lowpart_offset (int_outermode, int_innermode))
6201 rtx tem = simplify_truncation (int_outermode, op, int_innermode);
6202 if (tem)
6203 return tem;
6206 return NULL_RTX;
6209 /* Make a SUBREG operation or equivalent if it folds. */
6212 simplify_gen_subreg (machine_mode outermode, rtx op,
6213 machine_mode innermode, unsigned int byte)
6215 rtx newx;
6217 newx = simplify_subreg (outermode, op, innermode, byte);
6218 if (newx)
6219 return newx;
6221 if (GET_CODE (op) == SUBREG
6222 || GET_CODE (op) == CONCAT
6223 || GET_MODE (op) == VOIDmode)
6224 return NULL_RTX;
6226 if (validate_subreg (outermode, innermode, op, byte))
6227 return gen_rtx_SUBREG (outermode, op, byte);
6229 return NULL_RTX;
6232 /* Generates a subreg to get the least significant part of EXPR (in mode
6233 INNER_MODE) to OUTER_MODE. */
6236 lowpart_subreg (machine_mode outer_mode, rtx expr,
6237 machine_mode inner_mode)
6239 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6240 subreg_lowpart_offset (outer_mode, inner_mode));
6243 /* Simplify X, an rtx expression.
6245 Return the simplified expression or NULL if no simplifications
6246 were possible.
6248 This is the preferred entry point into the simplification routines;
6249 however, we still allow passes to call the more specific routines.
6251 Right now GCC has three (yes, three) major bodies of RTL simplification
6252 code that need to be unified.
6254 1. fold_rtx in cse.c. This code uses various CSE specific
6255 information to aid in RTL simplification.
6257 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6258 it uses combine specific information to aid in RTL
6259 simplification.
6261 3. The routines in this file.
6264 Long term we want to only have one body of simplification code; to
6265 get to that state I recommend the following steps:
6267 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6268 which are not pass dependent state into these routines.
6270 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6271 use this routine whenever possible.
6273 3. Allow for pass dependent state to be provided to these
6274 routines and add simplifications based on the pass dependent
6275 state. Remove code from cse.c & combine.c that becomes
6276 redundant/dead.
6278 It will take time, but ultimately the compiler will be easier to
6279 maintain and improve. It's totally silly that when we add a
6280 simplification that it needs to be added to 4 places (3 for RTL
6281 simplification and 1 for tree simplification. */
6284 simplify_rtx (const_rtx x)
6286 const enum rtx_code code = GET_CODE (x);
6287 const machine_mode mode = GET_MODE (x);
6289 switch (GET_RTX_CLASS (code))
6291 case RTX_UNARY:
6292 return simplify_unary_operation (code, mode,
6293 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6294 case RTX_COMM_ARITH:
6295 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6296 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6298 /* Fall through. */
6300 case RTX_BIN_ARITH:
6301 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6303 case RTX_TERNARY:
6304 case RTX_BITFIELD_OPS:
6305 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6306 XEXP (x, 0), XEXP (x, 1),
6307 XEXP (x, 2));
6309 case RTX_COMPARE:
6310 case RTX_COMM_COMPARE:
6311 return simplify_relational_operation (code, mode,
6312 ((GET_MODE (XEXP (x, 0))
6313 != VOIDmode)
6314 ? GET_MODE (XEXP (x, 0))
6315 : GET_MODE (XEXP (x, 1))),
6316 XEXP (x, 0),
6317 XEXP (x, 1));
6319 case RTX_EXTRA:
6320 if (code == SUBREG)
6321 return simplify_subreg (mode, SUBREG_REG (x),
6322 GET_MODE (SUBREG_REG (x)),
6323 SUBREG_BYTE (x));
6324 break;
6326 case RTX_OBJ:
6327 if (code == LO_SUM)
6329 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6330 if (GET_CODE (XEXP (x, 0)) == HIGH
6331 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6332 return XEXP (x, 1);
6334 break;
6336 default:
6337 break;
6339 return NULL;