2017-08-28 Richard Biener <rguenther@suse.de>
[official-gcc.git] / gcc / simplify-rtx.c
blob02ebe389d40f2dc24fc96bd841683f93d43553ff
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
37 /* Simplification and canonicalization of RTL. */
39 /* Much code operates on (low, high) pairs; the low value is an
40 unsigned wide int, the high value a signed wide int. We
41 occasionally need to sign extend from low to high as if low were a
42 signed wide int. */
43 #define HWI_SIGN_EXTEND(low) \
44 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
46 static rtx neg_const_int (machine_mode, const_rtx);
47 static bool plus_minus_operand_p (const_rtx);
48 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
49 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
50 unsigned int);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
59 /* Negate a CONST_INT rtx. */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
63 unsigned HOST_WIDE_INT val = -UINTVAL (i);
65 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
66 && val == UINTVAL (i))
67 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
68 mode);
69 return gen_int_mode (val, mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 bool
76 mode_signbit_p (machine_mode mode, const_rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
84 width = GET_MODE_PRECISION (mode);
85 if (width == 0)
86 return false;
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && CONST_INT_P (x))
90 val = INTVAL (x);
91 #if TARGET_SUPPORTS_WIDE_INT
92 else if (CONST_WIDE_INT_P (x))
94 unsigned int i;
95 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
96 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
97 return false;
98 for (i = 0; i < elts - 1; i++)
99 if (CONST_WIDE_INT_ELT (x, i) != 0)
100 return false;
101 val = CONST_WIDE_INT_ELT (x, elts - 1);
102 width %= HOST_BITS_PER_WIDE_INT;
103 if (width == 0)
104 width = HOST_BITS_PER_WIDE_INT;
106 #else
107 else if (width <= HOST_BITS_PER_DOUBLE_INT
108 && CONST_DOUBLE_AS_INT_P (x)
109 && CONST_DOUBLE_LOW (x) == 0)
111 val = CONST_DOUBLE_HIGH (x);
112 width -= HOST_BITS_PER_WIDE_INT;
114 #endif
115 else
116 /* X is not an integer constant. */
117 return false;
119 if (width < HOST_BITS_PER_WIDE_INT)
120 val &= (HOST_WIDE_INT_1U << width) - 1;
121 return val == (HOST_WIDE_INT_1U << (width - 1));
124 /* Test whether VAL is equal to the most significant bit of mode MODE
125 (after masking with the mode mask of MODE). Returns false if the
126 precision of MODE is too large to handle. */
128 bool
129 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
131 unsigned int width;
133 if (GET_MODE_CLASS (mode) != MODE_INT)
134 return false;
136 width = GET_MODE_PRECISION (mode);
137 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
138 return false;
140 val &= GET_MODE_MASK (mode);
141 return val == (HOST_WIDE_INT_1U << (width - 1));
144 /* Test whether the most significant bit of mode MODE is set in VAL.
145 Returns false if the precision of MODE is too large to handle. */
146 bool
147 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
149 unsigned int width;
151 if (GET_MODE_CLASS (mode) != MODE_INT)
152 return false;
154 width = GET_MODE_PRECISION (mode);
155 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
156 return false;
158 val &= HOST_WIDE_INT_1U << (width - 1);
159 return val != 0;
162 /* Test whether the most significant bit of mode MODE is clear in VAL.
163 Returns false if the precision of MODE is too large to handle. */
164 bool
165 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
167 unsigned int width;
169 if (GET_MODE_CLASS (mode) != MODE_INT)
170 return false;
172 width = GET_MODE_PRECISION (mode);
173 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
174 return false;
176 val &= HOST_WIDE_INT_1U << (width - 1);
177 return val == 0;
180 /* Make a binary operation by properly ordering the operands and
181 seeing if the expression folds. */
184 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
185 rtx op1)
187 rtx tem;
189 /* If this simplifies, do it. */
190 tem = simplify_binary_operation (code, mode, op0, op1);
191 if (tem)
192 return tem;
194 /* Put complex operands first and constants second if commutative. */
195 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
196 && swap_commutative_operands_p (op0, op1))
197 std::swap (op0, op1);
199 return gen_rtx_fmt_ee (code, mode, op0, op1);
202 /* If X is a MEM referencing the constant pool, return the real value.
203 Otherwise return X. */
205 avoid_constant_pool_reference (rtx x)
207 rtx c, tmp, addr;
208 machine_mode cmode;
209 HOST_WIDE_INT offset = 0;
211 switch (GET_CODE (x))
213 case MEM:
214 break;
216 case FLOAT_EXTEND:
217 /* Handle float extensions of constant pool references. */
218 tmp = XEXP (x, 0);
219 c = avoid_constant_pool_reference (tmp);
220 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
221 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
222 GET_MODE (x));
223 return x;
225 default:
226 return x;
229 if (GET_MODE (x) == BLKmode)
230 return x;
232 addr = XEXP (x, 0);
234 /* Call target hook to avoid the effects of -fpic etc.... */
235 addr = targetm.delegitimize_address (addr);
237 /* Split the address into a base and integer offset. */
238 if (GET_CODE (addr) == CONST
239 && GET_CODE (XEXP (addr, 0)) == PLUS
240 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
242 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
243 addr = XEXP (XEXP (addr, 0), 0);
246 if (GET_CODE (addr) == LO_SUM)
247 addr = XEXP (addr, 1);
249 /* If this is a constant pool reference, we can turn it into its
250 constant and hope that simplifications happen. */
251 if (GET_CODE (addr) == SYMBOL_REF
252 && CONSTANT_POOL_ADDRESS_P (addr))
254 c = get_pool_constant (addr);
255 cmode = get_pool_mode (addr);
257 /* If we're accessing the constant in a different mode than it was
258 originally stored, attempt to fix that up via subreg simplifications.
259 If that fails we have no choice but to return the original memory. */
260 if (offset == 0 && cmode == GET_MODE (x))
261 return c;
262 else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
264 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
265 if (tem && CONSTANT_P (tem))
266 return tem;
270 return x;
273 /* Simplify a MEM based on its attributes. This is the default
274 delegitimize_address target hook, and it's recommended that every
275 overrider call it. */
278 delegitimize_mem_from_attrs (rtx x)
280 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
281 use their base addresses as equivalent. */
282 if (MEM_P (x)
283 && MEM_EXPR (x)
284 && MEM_OFFSET_KNOWN_P (x))
286 tree decl = MEM_EXPR (x);
287 machine_mode mode = GET_MODE (x);
288 HOST_WIDE_INT offset = 0;
290 switch (TREE_CODE (decl))
292 default:
293 decl = NULL;
294 break;
296 case VAR_DECL:
297 break;
299 case ARRAY_REF:
300 case ARRAY_RANGE_REF:
301 case COMPONENT_REF:
302 case BIT_FIELD_REF:
303 case REALPART_EXPR:
304 case IMAGPART_EXPR:
305 case VIEW_CONVERT_EXPR:
307 HOST_WIDE_INT bitsize, bitpos;
308 tree toffset;
309 int unsignedp, reversep, volatilep = 0;
311 decl
312 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
313 &unsignedp, &reversep, &volatilep);
314 if (bitsize != GET_MODE_BITSIZE (mode)
315 || (bitpos % BITS_PER_UNIT)
316 || (toffset && !tree_fits_shwi_p (toffset)))
317 decl = NULL;
318 else
320 offset += bitpos / BITS_PER_UNIT;
321 if (toffset)
322 offset += tree_to_shwi (toffset);
324 break;
328 if (decl
329 && mode == GET_MODE (x)
330 && VAR_P (decl)
331 && (TREE_STATIC (decl)
332 || DECL_THREAD_LOCAL_P (decl))
333 && DECL_RTL_SET_P (decl)
334 && MEM_P (DECL_RTL (decl)))
336 rtx newx;
338 offset += MEM_OFFSET (x);
340 newx = DECL_RTL (decl);
342 if (MEM_P (newx))
344 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
346 /* Avoid creating a new MEM needlessly if we already had
347 the same address. We do if there's no OFFSET and the
348 old address X is identical to NEWX, or if X is of the
349 form (plus NEWX OFFSET), or the NEWX is of the form
350 (plus Y (const_int Z)) and X is that with the offset
351 added: (plus Y (const_int Z+OFFSET)). */
352 if (!((offset == 0
353 || (GET_CODE (o) == PLUS
354 && GET_CODE (XEXP (o, 1)) == CONST_INT
355 && (offset == INTVAL (XEXP (o, 1))
356 || (GET_CODE (n) == PLUS
357 && GET_CODE (XEXP (n, 1)) == CONST_INT
358 && (INTVAL (XEXP (n, 1)) + offset
359 == INTVAL (XEXP (o, 1)))
360 && (n = XEXP (n, 0))))
361 && (o = XEXP (o, 0))))
362 && rtx_equal_p (o, n)))
363 x = adjust_address_nv (newx, mode, offset);
365 else if (GET_MODE (x) == GET_MODE (newx)
366 && offset == 0)
367 x = newx;
371 return x;
374 /* Make a unary operation by first seeing if it folds and otherwise making
375 the specified operation. */
378 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
379 machine_mode op_mode)
381 rtx tem;
383 /* If this simplifies, use it. */
384 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
385 return tem;
387 return gen_rtx_fmt_e (code, mode, op);
390 /* Likewise for ternary operations. */
393 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
394 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
396 rtx tem;
398 /* If this simplifies, use it. */
399 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
400 op0, op1, op2)))
401 return tem;
403 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
406 /* Likewise, for relational operations.
407 CMP_MODE specifies mode comparison is done in. */
410 simplify_gen_relational (enum rtx_code code, machine_mode mode,
411 machine_mode cmp_mode, rtx op0, rtx op1)
413 rtx tem;
415 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
416 op0, op1)))
417 return tem;
419 return gen_rtx_fmt_ee (code, mode, op0, op1);
422 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
423 and simplify the result. If FN is non-NULL, call this callback on each
424 X, if it returns non-NULL, replace X with its return value and simplify the
425 result. */
428 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
429 rtx (*fn) (rtx, const_rtx, void *), void *data)
431 enum rtx_code code = GET_CODE (x);
432 machine_mode mode = GET_MODE (x);
433 machine_mode op_mode;
434 const char *fmt;
435 rtx op0, op1, op2, newx, op;
436 rtvec vec, newvec;
437 int i, j;
439 if (__builtin_expect (fn != NULL, 0))
441 newx = fn (x, old_rtx, data);
442 if (newx)
443 return newx;
445 else if (rtx_equal_p (x, old_rtx))
446 return copy_rtx ((rtx) data);
448 switch (GET_RTX_CLASS (code))
450 case RTX_UNARY:
451 op0 = XEXP (x, 0);
452 op_mode = GET_MODE (op0);
453 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
454 if (op0 == XEXP (x, 0))
455 return x;
456 return simplify_gen_unary (code, mode, op0, op_mode);
458 case RTX_BIN_ARITH:
459 case RTX_COMM_ARITH:
460 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
461 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
462 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
463 return x;
464 return simplify_gen_binary (code, mode, op0, op1);
466 case RTX_COMPARE:
467 case RTX_COMM_COMPARE:
468 op0 = XEXP (x, 0);
469 op1 = XEXP (x, 1);
470 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
471 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
472 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
473 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
474 return x;
475 return simplify_gen_relational (code, mode, op_mode, op0, op1);
477 case RTX_TERNARY:
478 case RTX_BITFIELD_OPS:
479 op0 = XEXP (x, 0);
480 op_mode = GET_MODE (op0);
481 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
482 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
483 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
484 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
485 return x;
486 if (op_mode == VOIDmode)
487 op_mode = GET_MODE (op0);
488 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
490 case RTX_EXTRA:
491 if (code == SUBREG)
493 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
494 if (op0 == SUBREG_REG (x))
495 return x;
496 op0 = simplify_gen_subreg (GET_MODE (x), op0,
497 GET_MODE (SUBREG_REG (x)),
498 SUBREG_BYTE (x));
499 return op0 ? op0 : x;
501 break;
503 case RTX_OBJ:
504 if (code == MEM)
506 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
507 if (op0 == XEXP (x, 0))
508 return x;
509 return replace_equiv_address_nv (x, op0);
511 else if (code == LO_SUM)
513 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
514 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
516 /* (lo_sum (high x) y) -> y where x and y have the same base. */
517 if (GET_CODE (op0) == HIGH)
519 rtx base0, base1, offset0, offset1;
520 split_const (XEXP (op0, 0), &base0, &offset0);
521 split_const (op1, &base1, &offset1);
522 if (rtx_equal_p (base0, base1))
523 return op1;
526 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
527 return x;
528 return gen_rtx_LO_SUM (mode, op0, op1);
530 break;
532 default:
533 break;
536 newx = x;
537 fmt = GET_RTX_FORMAT (code);
538 for (i = 0; fmt[i]; i++)
539 switch (fmt[i])
541 case 'E':
542 vec = XVEC (x, i);
543 newvec = XVEC (newx, i);
544 for (j = 0; j < GET_NUM_ELEM (vec); j++)
546 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
547 old_rtx, fn, data);
548 if (op != RTVEC_ELT (vec, j))
550 if (newvec == vec)
552 newvec = shallow_copy_rtvec (vec);
553 if (x == newx)
554 newx = shallow_copy_rtx (x);
555 XVEC (newx, i) = newvec;
557 RTVEC_ELT (newvec, j) = op;
560 break;
562 case 'e':
563 if (XEXP (x, i))
565 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
566 if (op != XEXP (x, i))
568 if (x == newx)
569 newx = shallow_copy_rtx (x);
570 XEXP (newx, i) = op;
573 break;
575 return newx;
578 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
579 resulting RTX. Return a new RTX which is as simplified as possible. */
582 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
584 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
587 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
588 Only handle cases where the truncated value is inherently an rvalue.
590 RTL provides two ways of truncating a value:
592 1. a lowpart subreg. This form is only a truncation when both
593 the outer and inner modes (here MODE and OP_MODE respectively)
594 are scalar integers, and only then when the subreg is used as
595 an rvalue.
597 It is only valid to form such truncating subregs if the
598 truncation requires no action by the target. The onus for
599 proving this is on the creator of the subreg -- e.g. the
600 caller to simplify_subreg or simplify_gen_subreg -- and typically
601 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
603 2. a TRUNCATE. This form handles both scalar and compound integers.
605 The first form is preferred where valid. However, the TRUNCATE
606 handling in simplify_unary_operation turns the second form into the
607 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
608 so it is generally safe to form rvalue truncations using:
610 simplify_gen_unary (TRUNCATE, ...)
612 and leave simplify_unary_operation to work out which representation
613 should be used.
615 Because of the proof requirements on (1), simplify_truncation must
616 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
617 regardless of whether the outer truncation came from a SUBREG or a
618 TRUNCATE. For example, if the caller has proven that an SImode
619 truncation of:
621 (and:DI X Y)
623 is a no-op and can be represented as a subreg, it does not follow
624 that SImode truncations of X and Y are also no-ops. On a target
625 like 64-bit MIPS that requires SImode values to be stored in
626 sign-extended form, an SImode truncation of:
628 (and:DI (reg:DI X) (const_int 63))
630 is trivially a no-op because only the lower 6 bits can be set.
631 However, X is still an arbitrary 64-bit number and so we cannot
632 assume that truncating it too is a no-op. */
634 static rtx
635 simplify_truncation (machine_mode mode, rtx op,
636 machine_mode op_mode)
638 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
639 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
640 gcc_assert (precision <= op_precision);
642 /* Optimize truncations of zero and sign extended values. */
643 if (GET_CODE (op) == ZERO_EXTEND
644 || GET_CODE (op) == SIGN_EXTEND)
646 /* There are three possibilities. If MODE is the same as the
647 origmode, we can omit both the extension and the subreg.
648 If MODE is not larger than the origmode, we can apply the
649 truncation without the extension. Finally, if the outermode
650 is larger than the origmode, we can just extend to the appropriate
651 mode. */
652 machine_mode origmode = GET_MODE (XEXP (op, 0));
653 if (mode == origmode)
654 return XEXP (op, 0);
655 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
656 return simplify_gen_unary (TRUNCATE, mode,
657 XEXP (op, 0), origmode);
658 else
659 return simplify_gen_unary (GET_CODE (op), mode,
660 XEXP (op, 0), origmode);
663 /* If the machine can perform operations in the truncated mode, distribute
664 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
665 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
666 if (1
667 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
668 && (GET_CODE (op) == PLUS
669 || GET_CODE (op) == MINUS
670 || GET_CODE (op) == MULT))
672 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
673 if (op0)
675 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
676 if (op1)
677 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
681 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
682 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
683 the outer subreg is effectively a truncation to the original mode. */
684 if ((GET_CODE (op) == LSHIFTRT
685 || GET_CODE (op) == ASHIFTRT)
686 /* Ensure that OP_MODE is at least twice as wide as MODE
687 to avoid the possibility that an outer LSHIFTRT shifts by more
688 than the sign extension's sign_bit_copies and introduces zeros
689 into the high bits of the result. */
690 && 2 * precision <= op_precision
691 && CONST_INT_P (XEXP (op, 1))
692 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
693 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
694 && UINTVAL (XEXP (op, 1)) < precision)
695 return simplify_gen_binary (ASHIFTRT, mode,
696 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
698 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
699 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
700 the outer subreg is effectively a truncation to the original mode. */
701 if ((GET_CODE (op) == LSHIFTRT
702 || GET_CODE (op) == ASHIFTRT)
703 && CONST_INT_P (XEXP (op, 1))
704 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
705 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
706 && UINTVAL (XEXP (op, 1)) < precision)
707 return simplify_gen_binary (LSHIFTRT, mode,
708 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
710 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
711 to (ashift:QI (x:QI) C), where C is a suitable small constant and
712 the outer subreg is effectively a truncation to the original mode. */
713 if (GET_CODE (op) == ASHIFT
714 && CONST_INT_P (XEXP (op, 1))
715 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
716 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
717 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
718 && UINTVAL (XEXP (op, 1)) < precision)
719 return simplify_gen_binary (ASHIFT, mode,
720 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
722 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
723 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
724 and C2. */
725 if (GET_CODE (op) == AND
726 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
727 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
728 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
729 && CONST_INT_P (XEXP (op, 1)))
731 rtx op0 = (XEXP (XEXP (op, 0), 0));
732 rtx shift_op = XEXP (XEXP (op, 0), 1);
733 rtx mask_op = XEXP (op, 1);
734 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
735 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
737 if (shift < precision
738 /* If doing this transform works for an X with all bits set,
739 it works for any X. */
740 && ((GET_MODE_MASK (mode) >> shift) & mask)
741 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
742 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
743 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
745 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
746 return simplify_gen_binary (AND, mode, op0, mask_op);
750 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
751 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
752 changing len. */
753 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
754 && REG_P (XEXP (op, 0))
755 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
756 && CONST_INT_P (XEXP (op, 1))
757 && CONST_INT_P (XEXP (op, 2)))
759 rtx op0 = XEXP (op, 0);
760 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
761 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
762 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
764 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
765 if (op0)
767 pos -= op_precision - precision;
768 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
769 XEXP (op, 1), GEN_INT (pos));
772 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
774 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
775 if (op0)
776 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
777 XEXP (op, 1), XEXP (op, 2));
781 /* Recognize a word extraction from a multi-word subreg. */
782 if ((GET_CODE (op) == LSHIFTRT
783 || GET_CODE (op) == ASHIFTRT)
784 && SCALAR_INT_MODE_P (mode)
785 && SCALAR_INT_MODE_P (op_mode)
786 && precision >= BITS_PER_WORD
787 && 2 * precision <= op_precision
788 && CONST_INT_P (XEXP (op, 1))
789 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
790 && UINTVAL (XEXP (op, 1)) < op_precision)
792 int byte = subreg_lowpart_offset (mode, op_mode);
793 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
794 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
795 (WORDS_BIG_ENDIAN
796 ? byte - shifted_bytes
797 : byte + shifted_bytes));
800 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
801 and try replacing the TRUNCATE and shift with it. Don't do this
802 if the MEM has a mode-dependent address. */
803 if ((GET_CODE (op) == LSHIFTRT
804 || GET_CODE (op) == ASHIFTRT)
805 && SCALAR_INT_MODE_P (op_mode)
806 && MEM_P (XEXP (op, 0))
807 && CONST_INT_P (XEXP (op, 1))
808 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
809 && INTVAL (XEXP (op, 1)) > 0
810 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
811 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
812 MEM_ADDR_SPACE (XEXP (op, 0)))
813 && ! MEM_VOLATILE_P (XEXP (op, 0))
814 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
815 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
817 int byte = subreg_lowpart_offset (mode, op_mode);
818 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
819 return adjust_address_nv (XEXP (op, 0), mode,
820 (WORDS_BIG_ENDIAN
821 ? byte - shifted_bytes
822 : byte + shifted_bytes));
825 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
826 (OP:SI foo:SI) if OP is NEG or ABS. */
827 if ((GET_CODE (op) == ABS
828 || GET_CODE (op) == NEG)
829 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
830 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
831 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
832 return simplify_gen_unary (GET_CODE (op), mode,
833 XEXP (XEXP (op, 0), 0), mode);
835 /* (truncate:A (subreg:B (truncate:C X) 0)) is
836 (truncate:A X). */
837 if (GET_CODE (op) == SUBREG
838 && SCALAR_INT_MODE_P (mode)
839 && SCALAR_INT_MODE_P (op_mode)
840 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
841 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
842 && subreg_lowpart_p (op))
844 rtx inner = XEXP (SUBREG_REG (op), 0);
845 if (GET_MODE_PRECISION (mode)
846 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
847 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
848 else
849 /* If subreg above is paradoxical and C is narrower
850 than A, return (subreg:A (truncate:C X) 0). */
851 return simplify_gen_subreg (mode, SUBREG_REG (op),
852 GET_MODE (SUBREG_REG (op)), 0);
855 /* (truncate:A (truncate:B X)) is (truncate:A X). */
856 if (GET_CODE (op) == TRUNCATE)
857 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
858 GET_MODE (XEXP (op, 0)));
860 return NULL_RTX;
863 /* Try to simplify a unary operation CODE whose output mode is to be
864 MODE with input operand OP whose mode was originally OP_MODE.
865 Return zero if no simplification can be made. */
867 simplify_unary_operation (enum rtx_code code, machine_mode mode,
868 rtx op, machine_mode op_mode)
870 rtx trueop, tem;
872 trueop = avoid_constant_pool_reference (op);
874 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
875 if (tem)
876 return tem;
878 return simplify_unary_operation_1 (code, mode, op);
881 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
882 to be exact. */
884 static bool
885 exact_int_to_float_conversion_p (const_rtx op)
887 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
888 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
889 /* Constants shouldn't reach here. */
890 gcc_assert (op0_mode != VOIDmode);
891 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
892 int in_bits = in_prec;
893 if (HWI_COMPUTABLE_MODE_P (op0_mode))
895 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
896 if (GET_CODE (op) == FLOAT)
897 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
898 else if (GET_CODE (op) == UNSIGNED_FLOAT)
899 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
900 else
901 gcc_unreachable ();
902 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
904 return in_bits <= out_bits;
907 /* Perform some simplifications we can do even if the operands
908 aren't constant. */
909 static rtx
910 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
912 enum rtx_code reversed;
913 rtx temp;
915 switch (code)
917 case NOT:
918 /* (not (not X)) == X. */
919 if (GET_CODE (op) == NOT)
920 return XEXP (op, 0);
922 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
923 comparison is all ones. */
924 if (COMPARISON_P (op)
925 && (mode == BImode || STORE_FLAG_VALUE == -1)
926 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
927 return simplify_gen_relational (reversed, mode, VOIDmode,
928 XEXP (op, 0), XEXP (op, 1));
930 /* (not (plus X -1)) can become (neg X). */
931 if (GET_CODE (op) == PLUS
932 && XEXP (op, 1) == constm1_rtx)
933 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
935 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
936 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
937 and MODE_VECTOR_INT. */
938 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
939 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
940 CONSTM1_RTX (mode));
942 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
943 if (GET_CODE (op) == XOR
944 && CONST_INT_P (XEXP (op, 1))
945 && (temp = simplify_unary_operation (NOT, mode,
946 XEXP (op, 1), mode)) != 0)
947 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
949 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
950 if (GET_CODE (op) == PLUS
951 && CONST_INT_P (XEXP (op, 1))
952 && mode_signbit_p (mode, XEXP (op, 1))
953 && (temp = simplify_unary_operation (NOT, mode,
954 XEXP (op, 1), mode)) != 0)
955 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
958 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
959 operands other than 1, but that is not valid. We could do a
960 similar simplification for (not (lshiftrt C X)) where C is
961 just the sign bit, but this doesn't seem common enough to
962 bother with. */
963 if (GET_CODE (op) == ASHIFT
964 && XEXP (op, 0) == const1_rtx)
966 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
967 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
970 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
971 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
972 so we can perform the above simplification. */
973 if (STORE_FLAG_VALUE == -1
974 && GET_CODE (op) == ASHIFTRT
975 && CONST_INT_P (XEXP (op, 1))
976 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
977 return simplify_gen_relational (GE, mode, VOIDmode,
978 XEXP (op, 0), const0_rtx);
981 if (GET_CODE (op) == SUBREG
982 && subreg_lowpart_p (op)
983 && (GET_MODE_SIZE (GET_MODE (op))
984 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
985 && GET_CODE (SUBREG_REG (op)) == ASHIFT
986 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
988 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
989 rtx x;
991 x = gen_rtx_ROTATE (inner_mode,
992 simplify_gen_unary (NOT, inner_mode, const1_rtx,
993 inner_mode),
994 XEXP (SUBREG_REG (op), 1));
995 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
996 if (temp)
997 return temp;
1000 /* Apply De Morgan's laws to reduce number of patterns for machines
1001 with negating logical insns (and-not, nand, etc.). If result has
1002 only one NOT, put it first, since that is how the patterns are
1003 coded. */
1004 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1006 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1007 machine_mode op_mode;
1009 op_mode = GET_MODE (in1);
1010 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1012 op_mode = GET_MODE (in2);
1013 if (op_mode == VOIDmode)
1014 op_mode = mode;
1015 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1017 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1018 std::swap (in1, in2);
1020 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1021 mode, in1, in2);
1024 /* (not (bswap x)) -> (bswap (not x)). */
1025 if (GET_CODE (op) == BSWAP)
1027 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1028 return simplify_gen_unary (BSWAP, mode, x, mode);
1030 break;
1032 case NEG:
1033 /* (neg (neg X)) == X. */
1034 if (GET_CODE (op) == NEG)
1035 return XEXP (op, 0);
1037 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1038 If comparison is not reversible use
1039 x ? y : (neg y). */
1040 if (GET_CODE (op) == IF_THEN_ELSE)
1042 rtx cond = XEXP (op, 0);
1043 rtx true_rtx = XEXP (op, 1);
1044 rtx false_rtx = XEXP (op, 2);
1046 if ((GET_CODE (true_rtx) == NEG
1047 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1048 || (GET_CODE (false_rtx) == NEG
1049 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1051 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1052 temp = reversed_comparison (cond, mode);
1053 else
1055 temp = cond;
1056 std::swap (true_rtx, false_rtx);
1058 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1059 mode, temp, true_rtx, false_rtx);
1063 /* (neg (plus X 1)) can become (not X). */
1064 if (GET_CODE (op) == PLUS
1065 && XEXP (op, 1) == const1_rtx)
1066 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1068 /* Similarly, (neg (not X)) is (plus X 1). */
1069 if (GET_CODE (op) == NOT)
1070 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1071 CONST1_RTX (mode));
1073 /* (neg (minus X Y)) can become (minus Y X). This transformation
1074 isn't safe for modes with signed zeros, since if X and Y are
1075 both +0, (minus Y X) is the same as (minus X Y). If the
1076 rounding mode is towards +infinity (or -infinity) then the two
1077 expressions will be rounded differently. */
1078 if (GET_CODE (op) == MINUS
1079 && !HONOR_SIGNED_ZEROS (mode)
1080 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1081 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1083 if (GET_CODE (op) == PLUS
1084 && !HONOR_SIGNED_ZEROS (mode)
1085 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1087 /* (neg (plus A C)) is simplified to (minus -C A). */
1088 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1089 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1091 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1092 if (temp)
1093 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1096 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1097 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1098 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1101 /* (neg (mult A B)) becomes (mult A (neg B)).
1102 This works even for floating-point values. */
1103 if (GET_CODE (op) == MULT
1104 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1106 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1107 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1110 /* NEG commutes with ASHIFT since it is multiplication. Only do
1111 this if we can then eliminate the NEG (e.g., if the operand
1112 is a constant). */
1113 if (GET_CODE (op) == ASHIFT)
1115 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1116 if (temp)
1117 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1120 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1121 C is equal to the width of MODE minus 1. */
1122 if (GET_CODE (op) == ASHIFTRT
1123 && CONST_INT_P (XEXP (op, 1))
1124 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1125 return simplify_gen_binary (LSHIFTRT, mode,
1126 XEXP (op, 0), XEXP (op, 1));
1128 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1129 C is equal to the width of MODE minus 1. */
1130 if (GET_CODE (op) == LSHIFTRT
1131 && CONST_INT_P (XEXP (op, 1))
1132 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1133 return simplify_gen_binary (ASHIFTRT, mode,
1134 XEXP (op, 0), XEXP (op, 1));
1136 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1137 if (GET_CODE (op) == XOR
1138 && XEXP (op, 1) == const1_rtx
1139 && nonzero_bits (XEXP (op, 0), mode) == 1)
1140 return plus_constant (mode, XEXP (op, 0), -1);
1142 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1143 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1144 if (GET_CODE (op) == LT
1145 && XEXP (op, 1) == const0_rtx
1146 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1148 machine_mode inner = GET_MODE (XEXP (op, 0));
1149 int isize = GET_MODE_PRECISION (inner);
1150 if (STORE_FLAG_VALUE == 1)
1152 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1153 GEN_INT (isize - 1));
1154 if (mode == inner)
1155 return temp;
1156 if (GET_MODE_PRECISION (mode) > isize)
1157 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1158 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1160 else if (STORE_FLAG_VALUE == -1)
1162 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1163 GEN_INT (isize - 1));
1164 if (mode == inner)
1165 return temp;
1166 if (GET_MODE_PRECISION (mode) > isize)
1167 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1168 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1171 break;
1173 case TRUNCATE:
1174 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1175 with the umulXi3_highpart patterns. */
1176 if (GET_CODE (op) == LSHIFTRT
1177 && GET_CODE (XEXP (op, 0)) == MULT)
1178 break;
1180 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1182 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1184 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1185 if (temp)
1186 return temp;
1188 /* We can't handle truncation to a partial integer mode here
1189 because we don't know the real bitsize of the partial
1190 integer mode. */
1191 break;
1194 if (GET_MODE (op) != VOIDmode)
1196 temp = simplify_truncation (mode, op, GET_MODE (op));
1197 if (temp)
1198 return temp;
1201 /* If we know that the value is already truncated, we can
1202 replace the TRUNCATE with a SUBREG. */
1203 if (GET_MODE_NUNITS (mode) == 1
1204 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1205 || truncated_to_mode (mode, op)))
1207 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1208 if (temp)
1209 return temp;
1212 /* A truncate of a comparison can be replaced with a subreg if
1213 STORE_FLAG_VALUE permits. This is like the previous test,
1214 but it works even if the comparison is done in a mode larger
1215 than HOST_BITS_PER_WIDE_INT. */
1216 if (HWI_COMPUTABLE_MODE_P (mode)
1217 && COMPARISON_P (op)
1218 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1220 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1221 if (temp)
1222 return temp;
1225 /* A truncate of a memory is just loading the low part of the memory
1226 if we are not changing the meaning of the address. */
1227 if (GET_CODE (op) == MEM
1228 && !VECTOR_MODE_P (mode)
1229 && !MEM_VOLATILE_P (op)
1230 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1232 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1233 if (temp)
1234 return temp;
1237 break;
1239 case FLOAT_TRUNCATE:
1240 if (DECIMAL_FLOAT_MODE_P (mode))
1241 break;
1243 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1244 if (GET_CODE (op) == FLOAT_EXTEND
1245 && GET_MODE (XEXP (op, 0)) == mode)
1246 return XEXP (op, 0);
1248 /* (float_truncate:SF (float_truncate:DF foo:XF))
1249 = (float_truncate:SF foo:XF).
1250 This may eliminate double rounding, so it is unsafe.
1252 (float_truncate:SF (float_extend:XF foo:DF))
1253 = (float_truncate:SF foo:DF).
1255 (float_truncate:DF (float_extend:XF foo:SF))
1256 = (float_extend:DF foo:SF). */
1257 if ((GET_CODE (op) == FLOAT_TRUNCATE
1258 && flag_unsafe_math_optimizations)
1259 || GET_CODE (op) == FLOAT_EXTEND)
1260 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1261 0)))
1262 > GET_MODE_SIZE (mode)
1263 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1264 mode,
1265 XEXP (op, 0), mode);
1267 /* (float_truncate (float x)) is (float x) */
1268 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1269 && (flag_unsafe_math_optimizations
1270 || exact_int_to_float_conversion_p (op)))
1271 return simplify_gen_unary (GET_CODE (op), mode,
1272 XEXP (op, 0),
1273 GET_MODE (XEXP (op, 0)));
1275 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1276 (OP:SF foo:SF) if OP is NEG or ABS. */
1277 if ((GET_CODE (op) == ABS
1278 || GET_CODE (op) == NEG)
1279 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1280 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1281 return simplify_gen_unary (GET_CODE (op), mode,
1282 XEXP (XEXP (op, 0), 0), mode);
1284 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1285 is (float_truncate:SF x). */
1286 if (GET_CODE (op) == SUBREG
1287 && subreg_lowpart_p (op)
1288 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1289 return SUBREG_REG (op);
1290 break;
1292 case FLOAT_EXTEND:
1293 if (DECIMAL_FLOAT_MODE_P (mode))
1294 break;
1296 /* (float_extend (float_extend x)) is (float_extend x)
1298 (float_extend (float x)) is (float x) assuming that double
1299 rounding can't happen.
1301 if (GET_CODE (op) == FLOAT_EXTEND
1302 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1303 && exact_int_to_float_conversion_p (op)))
1304 return simplify_gen_unary (GET_CODE (op), mode,
1305 XEXP (op, 0),
1306 GET_MODE (XEXP (op, 0)));
1308 break;
1310 case ABS:
1311 /* (abs (neg <foo>)) -> (abs <foo>) */
1312 if (GET_CODE (op) == NEG)
1313 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1314 GET_MODE (XEXP (op, 0)));
1316 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1317 do nothing. */
1318 if (GET_MODE (op) == VOIDmode)
1319 break;
1321 /* If operand is something known to be positive, ignore the ABS. */
1322 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1323 || val_signbit_known_clear_p (GET_MODE (op),
1324 nonzero_bits (op, GET_MODE (op))))
1325 return op;
1327 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1328 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1329 return gen_rtx_NEG (mode, op);
1331 break;
1333 case FFS:
1334 /* (ffs (*_extend <X>)) = (ffs <X>) */
1335 if (GET_CODE (op) == SIGN_EXTEND
1336 || GET_CODE (op) == ZERO_EXTEND)
1337 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1338 GET_MODE (XEXP (op, 0)));
1339 break;
1341 case POPCOUNT:
1342 switch (GET_CODE (op))
1344 case BSWAP:
1345 case ZERO_EXTEND:
1346 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1347 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1348 GET_MODE (XEXP (op, 0)));
1350 case ROTATE:
1351 case ROTATERT:
1352 /* Rotations don't affect popcount. */
1353 if (!side_effects_p (XEXP (op, 1)))
1354 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1355 GET_MODE (XEXP (op, 0)));
1356 break;
1358 default:
1359 break;
1361 break;
1363 case PARITY:
1364 switch (GET_CODE (op))
1366 case NOT:
1367 case BSWAP:
1368 case ZERO_EXTEND:
1369 case SIGN_EXTEND:
1370 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1371 GET_MODE (XEXP (op, 0)));
1373 case ROTATE:
1374 case ROTATERT:
1375 /* Rotations don't affect parity. */
1376 if (!side_effects_p (XEXP (op, 1)))
1377 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1378 GET_MODE (XEXP (op, 0)));
1379 break;
1381 default:
1382 break;
1384 break;
1386 case BSWAP:
1387 /* (bswap (bswap x)) -> x. */
1388 if (GET_CODE (op) == BSWAP)
1389 return XEXP (op, 0);
1390 break;
1392 case FLOAT:
1393 /* (float (sign_extend <X>)) = (float <X>). */
1394 if (GET_CODE (op) == SIGN_EXTEND)
1395 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1396 GET_MODE (XEXP (op, 0)));
1397 break;
1399 case SIGN_EXTEND:
1400 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1401 becomes just the MINUS if its mode is MODE. This allows
1402 folding switch statements on machines using casesi (such as
1403 the VAX). */
1404 if (GET_CODE (op) == TRUNCATE
1405 && GET_MODE (XEXP (op, 0)) == mode
1406 && GET_CODE (XEXP (op, 0)) == MINUS
1407 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1408 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1409 return XEXP (op, 0);
1411 /* Extending a widening multiplication should be canonicalized to
1412 a wider widening multiplication. */
1413 if (GET_CODE (op) == MULT)
1415 rtx lhs = XEXP (op, 0);
1416 rtx rhs = XEXP (op, 1);
1417 enum rtx_code lcode = GET_CODE (lhs);
1418 enum rtx_code rcode = GET_CODE (rhs);
1420 /* Widening multiplies usually extend both operands, but sometimes
1421 they use a shift to extract a portion of a register. */
1422 if ((lcode == SIGN_EXTEND
1423 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1424 && (rcode == SIGN_EXTEND
1425 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1427 machine_mode lmode = GET_MODE (lhs);
1428 machine_mode rmode = GET_MODE (rhs);
1429 int bits;
1431 if (lcode == ASHIFTRT)
1432 /* Number of bits not shifted off the end. */
1433 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1434 else /* lcode == SIGN_EXTEND */
1435 /* Size of inner mode. */
1436 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1438 if (rcode == ASHIFTRT)
1439 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1440 else /* rcode == SIGN_EXTEND */
1441 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1443 /* We can only widen multiplies if the result is mathematiclly
1444 equivalent. I.e. if overflow was impossible. */
1445 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1446 return simplify_gen_binary
1447 (MULT, mode,
1448 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1449 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1453 /* Check for a sign extension of a subreg of a promoted
1454 variable, where the promotion is sign-extended, and the
1455 target mode is the same as the variable's promotion. */
1456 if (GET_CODE (op) == SUBREG
1457 && SUBREG_PROMOTED_VAR_P (op)
1458 && SUBREG_PROMOTED_SIGNED_P (op)
1459 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1461 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1462 if (temp)
1463 return temp;
1466 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1467 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1468 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1470 gcc_assert (GET_MODE_PRECISION (mode)
1471 > GET_MODE_PRECISION (GET_MODE (op)));
1472 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1473 GET_MODE (XEXP (op, 0)));
1476 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1477 is (sign_extend:M (subreg:O <X>)) if there is mode with
1478 GET_MODE_BITSIZE (N) - I bits.
1479 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1480 is similarly (zero_extend:M (subreg:O <X>)). */
1481 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1482 && GET_CODE (XEXP (op, 0)) == ASHIFT
1483 && CONST_INT_P (XEXP (op, 1))
1484 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1485 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1487 machine_mode tmode
1488 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1489 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1490 gcc_assert (GET_MODE_BITSIZE (mode)
1491 > GET_MODE_BITSIZE (GET_MODE (op)));
1492 if (tmode != BLKmode)
1494 rtx inner =
1495 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1496 if (inner)
1497 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1498 ? SIGN_EXTEND : ZERO_EXTEND,
1499 mode, inner, tmode);
1503 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1504 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1505 if (GET_CODE (op) == LSHIFTRT
1506 && CONST_INT_P (XEXP (op, 1))
1507 && XEXP (op, 1) != const0_rtx)
1508 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1510 #if defined(POINTERS_EXTEND_UNSIGNED)
1511 /* As we do not know which address space the pointer is referring to,
1512 we can do this only if the target does not support different pointer
1513 or address modes depending on the address space. */
1514 if (target_default_pointer_address_modes_p ()
1515 && ! POINTERS_EXTEND_UNSIGNED
1516 && mode == Pmode && GET_MODE (op) == ptr_mode
1517 && (CONSTANT_P (op)
1518 || (GET_CODE (op) == SUBREG
1519 && REG_P (SUBREG_REG (op))
1520 && REG_POINTER (SUBREG_REG (op))
1521 && GET_MODE (SUBREG_REG (op)) == Pmode))
1522 && !targetm.have_ptr_extend ())
1524 temp
1525 = convert_memory_address_addr_space_1 (Pmode, op,
1526 ADDR_SPACE_GENERIC, false,
1527 true);
1528 if (temp)
1529 return temp;
1531 #endif
1532 break;
1534 case ZERO_EXTEND:
1535 /* Check for a zero extension of a subreg of a promoted
1536 variable, where the promotion is zero-extended, and the
1537 target mode is the same as the variable's promotion. */
1538 if (GET_CODE (op) == SUBREG
1539 && SUBREG_PROMOTED_VAR_P (op)
1540 && SUBREG_PROMOTED_UNSIGNED_P (op)
1541 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1543 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1544 if (temp)
1545 return temp;
1548 /* Extending a widening multiplication should be canonicalized to
1549 a wider widening multiplication. */
1550 if (GET_CODE (op) == MULT)
1552 rtx lhs = XEXP (op, 0);
1553 rtx rhs = XEXP (op, 1);
1554 enum rtx_code lcode = GET_CODE (lhs);
1555 enum rtx_code rcode = GET_CODE (rhs);
1557 /* Widening multiplies usually extend both operands, but sometimes
1558 they use a shift to extract a portion of a register. */
1559 if ((lcode == ZERO_EXTEND
1560 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1561 && (rcode == ZERO_EXTEND
1562 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1564 machine_mode lmode = GET_MODE (lhs);
1565 machine_mode rmode = GET_MODE (rhs);
1566 int bits;
1568 if (lcode == LSHIFTRT)
1569 /* Number of bits not shifted off the end. */
1570 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1571 else /* lcode == ZERO_EXTEND */
1572 /* Size of inner mode. */
1573 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1575 if (rcode == LSHIFTRT)
1576 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1577 else /* rcode == ZERO_EXTEND */
1578 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1580 /* We can only widen multiplies if the result is mathematiclly
1581 equivalent. I.e. if overflow was impossible. */
1582 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1583 return simplify_gen_binary
1584 (MULT, mode,
1585 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1586 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1590 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1591 if (GET_CODE (op) == ZERO_EXTEND)
1592 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1593 GET_MODE (XEXP (op, 0)));
1595 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1596 is (zero_extend:M (subreg:O <X>)) if there is mode with
1597 GET_MODE_PRECISION (N) - I bits. */
1598 if (GET_CODE (op) == LSHIFTRT
1599 && GET_CODE (XEXP (op, 0)) == ASHIFT
1600 && CONST_INT_P (XEXP (op, 1))
1601 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1602 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1604 machine_mode tmode
1605 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1606 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1607 if (tmode != BLKmode)
1609 rtx inner =
1610 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1611 if (inner)
1612 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1616 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1617 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1618 of mode N. E.g.
1619 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1620 (and:SI (reg:SI) (const_int 63)). */
1621 if (GET_CODE (op) == SUBREG
1622 && GET_MODE_PRECISION (GET_MODE (op))
1623 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1624 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1625 <= HOST_BITS_PER_WIDE_INT
1626 && GET_MODE_PRECISION (mode)
1627 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1628 && subreg_lowpart_p (op)
1629 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1630 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1632 if (GET_MODE_PRECISION (mode)
1633 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1634 return SUBREG_REG (op);
1635 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1636 GET_MODE (SUBREG_REG (op)));
1639 #if defined(POINTERS_EXTEND_UNSIGNED)
1640 /* As we do not know which address space the pointer is referring to,
1641 we can do this only if the target does not support different pointer
1642 or address modes depending on the address space. */
1643 if (target_default_pointer_address_modes_p ()
1644 && POINTERS_EXTEND_UNSIGNED > 0
1645 && mode == Pmode && GET_MODE (op) == ptr_mode
1646 && (CONSTANT_P (op)
1647 || (GET_CODE (op) == SUBREG
1648 && REG_P (SUBREG_REG (op))
1649 && REG_POINTER (SUBREG_REG (op))
1650 && GET_MODE (SUBREG_REG (op)) == Pmode))
1651 && !targetm.have_ptr_extend ())
1653 temp
1654 = convert_memory_address_addr_space_1 (Pmode, op,
1655 ADDR_SPACE_GENERIC, false,
1656 true);
1657 if (temp)
1658 return temp;
1660 #endif
1661 break;
1663 default:
1664 break;
1667 return 0;
1670 /* Try to compute the value of a unary operation CODE whose output mode is to
1671 be MODE with input operand OP whose mode was originally OP_MODE.
1672 Return zero if the value cannot be computed. */
1674 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1675 rtx op, machine_mode op_mode)
1677 unsigned int width = GET_MODE_PRECISION (mode);
1679 if (code == VEC_DUPLICATE)
1681 gcc_assert (VECTOR_MODE_P (mode));
1682 if (GET_MODE (op) != VOIDmode)
1684 if (!VECTOR_MODE_P (GET_MODE (op)))
1685 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1686 else
1687 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1688 (GET_MODE (op)));
1690 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1691 || GET_CODE (op) == CONST_VECTOR)
1693 int elt_size = GET_MODE_UNIT_SIZE (mode);
1694 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1695 rtvec v = rtvec_alloc (n_elts);
1696 unsigned int i;
1698 if (GET_CODE (op) != CONST_VECTOR)
1699 for (i = 0; i < n_elts; i++)
1700 RTVEC_ELT (v, i) = op;
1701 else
1703 machine_mode inmode = GET_MODE (op);
1704 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1705 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1707 gcc_assert (in_n_elts < n_elts);
1708 gcc_assert ((n_elts % in_n_elts) == 0);
1709 for (i = 0; i < n_elts; i++)
1710 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1712 return gen_rtx_CONST_VECTOR (mode, v);
1716 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1718 int elt_size = GET_MODE_UNIT_SIZE (mode);
1719 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1720 machine_mode opmode = GET_MODE (op);
1721 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1722 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1723 rtvec v = rtvec_alloc (n_elts);
1724 unsigned int i;
1726 gcc_assert (op_n_elts == n_elts);
1727 for (i = 0; i < n_elts; i++)
1729 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1730 CONST_VECTOR_ELT (op, i),
1731 GET_MODE_INNER (opmode));
1732 if (!x)
1733 return 0;
1734 RTVEC_ELT (v, i) = x;
1736 return gen_rtx_CONST_VECTOR (mode, v);
1739 /* The order of these tests is critical so that, for example, we don't
1740 check the wrong mode (input vs. output) for a conversion operation,
1741 such as FIX. At some point, this should be simplified. */
1743 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1745 REAL_VALUE_TYPE d;
1747 if (op_mode == VOIDmode)
1749 /* CONST_INT have VOIDmode as the mode. We assume that all
1750 the bits of the constant are significant, though, this is
1751 a dangerous assumption as many times CONST_INTs are
1752 created and used with garbage in the bits outside of the
1753 precision of the implied mode of the const_int. */
1754 op_mode = MAX_MODE_INT;
1757 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1759 /* Avoid the folding if flag_signaling_nans is on and
1760 operand is a signaling NaN. */
1761 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1762 return 0;
1764 d = real_value_truncate (mode, d);
1765 return const_double_from_real_value (d, mode);
1767 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1769 REAL_VALUE_TYPE d;
1771 if (op_mode == VOIDmode)
1773 /* CONST_INT have VOIDmode as the mode. We assume that all
1774 the bits of the constant are significant, though, this is
1775 a dangerous assumption as many times CONST_INTs are
1776 created and used with garbage in the bits outside of the
1777 precision of the implied mode of the const_int. */
1778 op_mode = MAX_MODE_INT;
1781 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1783 /* Avoid the folding if flag_signaling_nans is on and
1784 operand is a signaling NaN. */
1785 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1786 return 0;
1788 d = real_value_truncate (mode, d);
1789 return const_double_from_real_value (d, mode);
1792 if (CONST_SCALAR_INT_P (op) && width > 0)
1794 wide_int result;
1795 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1796 rtx_mode_t op0 = rtx_mode_t (op, imode);
1797 int int_value;
1799 #if TARGET_SUPPORTS_WIDE_INT == 0
1800 /* This assert keeps the simplification from producing a result
1801 that cannot be represented in a CONST_DOUBLE but a lot of
1802 upstream callers expect that this function never fails to
1803 simplify something and so you if you added this to the test
1804 above the code would die later anyway. If this assert
1805 happens, you just need to make the port support wide int. */
1806 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1807 #endif
1809 switch (code)
1811 case NOT:
1812 result = wi::bit_not (op0);
1813 break;
1815 case NEG:
1816 result = wi::neg (op0);
1817 break;
1819 case ABS:
1820 result = wi::abs (op0);
1821 break;
1823 case FFS:
1824 result = wi::shwi (wi::ffs (op0), mode);
1825 break;
1827 case CLZ:
1828 if (wi::ne_p (op0, 0))
1829 int_value = wi::clz (op0);
1830 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1831 int_value = GET_MODE_PRECISION (mode);
1832 result = wi::shwi (int_value, mode);
1833 break;
1835 case CLRSB:
1836 result = wi::shwi (wi::clrsb (op0), mode);
1837 break;
1839 case CTZ:
1840 if (wi::ne_p (op0, 0))
1841 int_value = wi::ctz (op0);
1842 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1843 int_value = GET_MODE_PRECISION (mode);
1844 result = wi::shwi (int_value, mode);
1845 break;
1847 case POPCOUNT:
1848 result = wi::shwi (wi::popcount (op0), mode);
1849 break;
1851 case PARITY:
1852 result = wi::shwi (wi::parity (op0), mode);
1853 break;
1855 case BSWAP:
1856 result = wide_int (op0).bswap ();
1857 break;
1859 case TRUNCATE:
1860 case ZERO_EXTEND:
1861 result = wide_int::from (op0, width, UNSIGNED);
1862 break;
1864 case SIGN_EXTEND:
1865 result = wide_int::from (op0, width, SIGNED);
1866 break;
1868 case SQRT:
1869 default:
1870 return 0;
1873 return immed_wide_int_const (result, mode);
1876 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1877 && SCALAR_FLOAT_MODE_P (mode)
1878 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1880 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1881 switch (code)
1883 case SQRT:
1884 return 0;
1885 case ABS:
1886 d = real_value_abs (&d);
1887 break;
1888 case NEG:
1889 d = real_value_negate (&d);
1890 break;
1891 case FLOAT_TRUNCATE:
1892 /* Don't perform the operation if flag_signaling_nans is on
1893 and the operand is a signaling NaN. */
1894 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1895 return NULL_RTX;
1896 d = real_value_truncate (mode, d);
1897 break;
1898 case FLOAT_EXTEND:
1899 /* Don't perform the operation if flag_signaling_nans is on
1900 and the operand is a signaling NaN. */
1901 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1902 return NULL_RTX;
1903 /* All this does is change the mode, unless changing
1904 mode class. */
1905 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1906 real_convert (&d, mode, &d);
1907 break;
1908 case FIX:
1909 /* Don't perform the operation if flag_signaling_nans is on
1910 and the operand is a signaling NaN. */
1911 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1912 return NULL_RTX;
1913 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1914 break;
1915 case NOT:
1917 long tmp[4];
1918 int i;
1920 real_to_target (tmp, &d, GET_MODE (op));
1921 for (i = 0; i < 4; i++)
1922 tmp[i] = ~tmp[i];
1923 real_from_target (&d, tmp, mode);
1924 break;
1926 default:
1927 gcc_unreachable ();
1929 return const_double_from_real_value (d, mode);
1931 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1932 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1933 && GET_MODE_CLASS (mode) == MODE_INT
1934 && width > 0)
1936 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1937 operators are intentionally left unspecified (to ease implementation
1938 by target backends), for consistency, this routine implements the
1939 same semantics for constant folding as used by the middle-end. */
1941 /* This was formerly used only for non-IEEE float.
1942 eggert@twinsun.com says it is safe for IEEE also. */
1943 REAL_VALUE_TYPE t;
1944 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1945 wide_int wmax, wmin;
1946 /* This is part of the abi to real_to_integer, but we check
1947 things before making this call. */
1948 bool fail;
1950 switch (code)
1952 case FIX:
1953 if (REAL_VALUE_ISNAN (*x))
1954 return const0_rtx;
1956 /* Test against the signed upper bound. */
1957 wmax = wi::max_value (width, SIGNED);
1958 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1959 if (real_less (&t, x))
1960 return immed_wide_int_const (wmax, mode);
1962 /* Test against the signed lower bound. */
1963 wmin = wi::min_value (width, SIGNED);
1964 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1965 if (real_less (x, &t))
1966 return immed_wide_int_const (wmin, mode);
1968 return immed_wide_int_const (real_to_integer (x, &fail, width),
1969 mode);
1971 case UNSIGNED_FIX:
1972 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1973 return const0_rtx;
1975 /* Test against the unsigned upper bound. */
1976 wmax = wi::max_value (width, UNSIGNED);
1977 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1978 if (real_less (&t, x))
1979 return immed_wide_int_const (wmax, mode);
1981 return immed_wide_int_const (real_to_integer (x, &fail, width),
1982 mode);
1984 default:
1985 gcc_unreachable ();
1989 return NULL_RTX;
1992 /* Subroutine of simplify_binary_operation to simplify a binary operation
1993 CODE that can commute with byte swapping, with result mode MODE and
1994 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1995 Return zero if no simplification or canonicalization is possible. */
1997 static rtx
1998 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1999 rtx op0, rtx op1)
2001 rtx tem;
2003 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2004 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2006 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2007 simplify_gen_unary (BSWAP, mode, op1, mode));
2008 return simplify_gen_unary (BSWAP, mode, tem, mode);
2011 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2012 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2014 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2015 return simplify_gen_unary (BSWAP, mode, tem, mode);
2018 return NULL_RTX;
2021 /* Subroutine of simplify_binary_operation to simplify a commutative,
2022 associative binary operation CODE with result mode MODE, operating
2023 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2024 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2025 canonicalization is possible. */
2027 static rtx
2028 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2029 rtx op0, rtx op1)
2031 rtx tem;
2033 /* Linearize the operator to the left. */
2034 if (GET_CODE (op1) == code)
2036 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2037 if (GET_CODE (op0) == code)
2039 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2040 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2043 /* "a op (b op c)" becomes "(b op c) op a". */
2044 if (! swap_commutative_operands_p (op1, op0))
2045 return simplify_gen_binary (code, mode, op1, op0);
2047 std::swap (op0, op1);
2050 if (GET_CODE (op0) == code)
2052 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2053 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2055 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2056 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2059 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2060 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2061 if (tem != 0)
2062 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2064 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2065 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2066 if (tem != 0)
2067 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2070 return 0;
2074 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2075 and OP1. Return 0 if no simplification is possible.
2077 Don't use this for relational operations such as EQ or LT.
2078 Use simplify_relational_operation instead. */
2080 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2081 rtx op0, rtx op1)
2083 rtx trueop0, trueop1;
2084 rtx tem;
2086 /* Relational operations don't work here. We must know the mode
2087 of the operands in order to do the comparison correctly.
2088 Assuming a full word can give incorrect results.
2089 Consider comparing 128 with -128 in QImode. */
2090 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2091 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2093 /* Make sure the constant is second. */
2094 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2095 && swap_commutative_operands_p (op0, op1))
2096 std::swap (op0, op1);
2098 trueop0 = avoid_constant_pool_reference (op0);
2099 trueop1 = avoid_constant_pool_reference (op1);
2101 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2102 if (tem)
2103 return tem;
2104 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2106 if (tem)
2107 return tem;
2109 /* If the above steps did not result in a simplification and op0 or op1
2110 were constant pool references, use the referenced constants directly. */
2111 if (trueop0 != op0 || trueop1 != op1)
2112 return simplify_gen_binary (code, mode, trueop0, trueop1);
2114 return NULL_RTX;
2117 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2118 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2119 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2120 actual constants. */
2122 static rtx
2123 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2124 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2126 rtx tem, reversed, opleft, opright;
2127 HOST_WIDE_INT val;
2128 unsigned int width = GET_MODE_PRECISION (mode);
2130 /* Even if we can't compute a constant result,
2131 there are some cases worth simplifying. */
2133 switch (code)
2135 case PLUS:
2136 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2137 when x is NaN, infinite, or finite and nonzero. They aren't
2138 when x is -0 and the rounding mode is not towards -infinity,
2139 since (-0) + 0 is then 0. */
2140 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2141 return op0;
2143 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2144 transformations are safe even for IEEE. */
2145 if (GET_CODE (op0) == NEG)
2146 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2147 else if (GET_CODE (op1) == NEG)
2148 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2150 /* (~a) + 1 -> -a */
2151 if (INTEGRAL_MODE_P (mode)
2152 && GET_CODE (op0) == NOT
2153 && trueop1 == const1_rtx)
2154 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2156 /* Handle both-operands-constant cases. We can only add
2157 CONST_INTs to constants since the sum of relocatable symbols
2158 can't be handled by most assemblers. Don't add CONST_INT
2159 to CONST_INT since overflow won't be computed properly if wider
2160 than HOST_BITS_PER_WIDE_INT. */
2162 if ((GET_CODE (op0) == CONST
2163 || GET_CODE (op0) == SYMBOL_REF
2164 || GET_CODE (op0) == LABEL_REF)
2165 && CONST_INT_P (op1))
2166 return plus_constant (mode, op0, INTVAL (op1));
2167 else if ((GET_CODE (op1) == CONST
2168 || GET_CODE (op1) == SYMBOL_REF
2169 || GET_CODE (op1) == LABEL_REF)
2170 && CONST_INT_P (op0))
2171 return plus_constant (mode, op1, INTVAL (op0));
2173 /* See if this is something like X * C - X or vice versa or
2174 if the multiplication is written as a shift. If so, we can
2175 distribute and make a new multiply, shift, or maybe just
2176 have X (if C is 2 in the example above). But don't make
2177 something more expensive than we had before. */
2179 if (SCALAR_INT_MODE_P (mode))
2181 rtx lhs = op0, rhs = op1;
2183 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2184 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2186 if (GET_CODE (lhs) == NEG)
2188 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2189 lhs = XEXP (lhs, 0);
2191 else if (GET_CODE (lhs) == MULT
2192 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2194 coeff0 = rtx_mode_t (XEXP (lhs, 1), mode);
2195 lhs = XEXP (lhs, 0);
2197 else if (GET_CODE (lhs) == ASHIFT
2198 && CONST_INT_P (XEXP (lhs, 1))
2199 && INTVAL (XEXP (lhs, 1)) >= 0
2200 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2202 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2203 GET_MODE_PRECISION (mode));
2204 lhs = XEXP (lhs, 0);
2207 if (GET_CODE (rhs) == NEG)
2209 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2210 rhs = XEXP (rhs, 0);
2212 else if (GET_CODE (rhs) == MULT
2213 && CONST_INT_P (XEXP (rhs, 1)))
2215 coeff1 = rtx_mode_t (XEXP (rhs, 1), mode);
2216 rhs = XEXP (rhs, 0);
2218 else if (GET_CODE (rhs) == ASHIFT
2219 && CONST_INT_P (XEXP (rhs, 1))
2220 && INTVAL (XEXP (rhs, 1)) >= 0
2221 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2223 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2224 GET_MODE_PRECISION (mode));
2225 rhs = XEXP (rhs, 0);
2228 if (rtx_equal_p (lhs, rhs))
2230 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2231 rtx coeff;
2232 bool speed = optimize_function_for_speed_p (cfun);
2234 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2236 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2237 return (set_src_cost (tem, mode, speed)
2238 <= set_src_cost (orig, mode, speed) ? tem : 0);
2242 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2243 if (CONST_SCALAR_INT_P (op1)
2244 && GET_CODE (op0) == XOR
2245 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2246 && mode_signbit_p (mode, op1))
2247 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2248 simplify_gen_binary (XOR, mode, op1,
2249 XEXP (op0, 1)));
2251 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2252 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2253 && GET_CODE (op0) == MULT
2254 && GET_CODE (XEXP (op0, 0)) == NEG)
2256 rtx in1, in2;
2258 in1 = XEXP (XEXP (op0, 0), 0);
2259 in2 = XEXP (op0, 1);
2260 return simplify_gen_binary (MINUS, mode, op1,
2261 simplify_gen_binary (MULT, mode,
2262 in1, in2));
2265 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2266 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2267 is 1. */
2268 if (COMPARISON_P (op0)
2269 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2270 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2271 && (reversed = reversed_comparison (op0, mode)))
2272 return
2273 simplify_gen_unary (NEG, mode, reversed, mode);
2275 /* If one of the operands is a PLUS or a MINUS, see if we can
2276 simplify this by the associative law.
2277 Don't use the associative law for floating point.
2278 The inaccuracy makes it nonassociative,
2279 and subtle programs can break if operations are associated. */
2281 if (INTEGRAL_MODE_P (mode)
2282 && (plus_minus_operand_p (op0)
2283 || plus_minus_operand_p (op1))
2284 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2285 return tem;
2287 /* Reassociate floating point addition only when the user
2288 specifies associative math operations. */
2289 if (FLOAT_MODE_P (mode)
2290 && flag_associative_math)
2292 tem = simplify_associative_operation (code, mode, op0, op1);
2293 if (tem)
2294 return tem;
2296 break;
2298 case COMPARE:
2299 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2300 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2301 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2302 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2304 rtx xop00 = XEXP (op0, 0);
2305 rtx xop10 = XEXP (op1, 0);
2307 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2308 return xop00;
2310 if (REG_P (xop00) && REG_P (xop10)
2311 && REGNO (xop00) == REGNO (xop10)
2312 && GET_MODE (xop00) == mode
2313 && GET_MODE (xop10) == mode
2314 && GET_MODE_CLASS (mode) == MODE_CC)
2315 return xop00;
2317 break;
2319 case MINUS:
2320 /* We can't assume x-x is 0 even with non-IEEE floating point,
2321 but since it is zero except in very strange circumstances, we
2322 will treat it as zero with -ffinite-math-only. */
2323 if (rtx_equal_p (trueop0, trueop1)
2324 && ! side_effects_p (op0)
2325 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2326 return CONST0_RTX (mode);
2328 /* Change subtraction from zero into negation. (0 - x) is the
2329 same as -x when x is NaN, infinite, or finite and nonzero.
2330 But if the mode has signed zeros, and does not round towards
2331 -infinity, then 0 - 0 is 0, not -0. */
2332 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2333 return simplify_gen_unary (NEG, mode, op1, mode);
2335 /* (-1 - a) is ~a, unless the expression contains symbolic
2336 constants, in which case not retaining additions and
2337 subtractions could cause invalid assembly to be produced. */
2338 if (trueop0 == constm1_rtx
2339 && !contains_symbolic_reference_p (op1))
2340 return simplify_gen_unary (NOT, mode, op1, mode);
2342 /* Subtracting 0 has no effect unless the mode has signed zeros
2343 and supports rounding towards -infinity. In such a case,
2344 0 - 0 is -0. */
2345 if (!(HONOR_SIGNED_ZEROS (mode)
2346 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2347 && trueop1 == CONST0_RTX (mode))
2348 return op0;
2350 /* See if this is something like X * C - X or vice versa or
2351 if the multiplication is written as a shift. If so, we can
2352 distribute and make a new multiply, shift, or maybe just
2353 have X (if C is 2 in the example above). But don't make
2354 something more expensive than we had before. */
2356 if (SCALAR_INT_MODE_P (mode))
2358 rtx lhs = op0, rhs = op1;
2360 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2361 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2363 if (GET_CODE (lhs) == NEG)
2365 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2366 lhs = XEXP (lhs, 0);
2368 else if (GET_CODE (lhs) == MULT
2369 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2371 coeff0 = rtx_mode_t (XEXP (lhs, 1), mode);
2372 lhs = XEXP (lhs, 0);
2374 else if (GET_CODE (lhs) == ASHIFT
2375 && CONST_INT_P (XEXP (lhs, 1))
2376 && INTVAL (XEXP (lhs, 1)) >= 0
2377 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2379 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2380 GET_MODE_PRECISION (mode));
2381 lhs = XEXP (lhs, 0);
2384 if (GET_CODE (rhs) == NEG)
2386 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2387 rhs = XEXP (rhs, 0);
2389 else if (GET_CODE (rhs) == MULT
2390 && CONST_INT_P (XEXP (rhs, 1)))
2392 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), mode));
2393 rhs = XEXP (rhs, 0);
2395 else if (GET_CODE (rhs) == ASHIFT
2396 && CONST_INT_P (XEXP (rhs, 1))
2397 && INTVAL (XEXP (rhs, 1)) >= 0
2398 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2400 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2401 GET_MODE_PRECISION (mode));
2402 negcoeff1 = -negcoeff1;
2403 rhs = XEXP (rhs, 0);
2406 if (rtx_equal_p (lhs, rhs))
2408 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2409 rtx coeff;
2410 bool speed = optimize_function_for_speed_p (cfun);
2412 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2414 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2415 return (set_src_cost (tem, mode, speed)
2416 <= set_src_cost (orig, mode, speed) ? tem : 0);
2420 /* (a - (-b)) -> (a + b). True even for IEEE. */
2421 if (GET_CODE (op1) == NEG)
2422 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2424 /* (-x - c) may be simplified as (-c - x). */
2425 if (GET_CODE (op0) == NEG
2426 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2428 tem = simplify_unary_operation (NEG, mode, op1, mode);
2429 if (tem)
2430 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2433 /* Don't let a relocatable value get a negative coeff. */
2434 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2435 return simplify_gen_binary (PLUS, mode,
2436 op0,
2437 neg_const_int (mode, op1));
2439 /* (x - (x & y)) -> (x & ~y) */
2440 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2442 if (rtx_equal_p (op0, XEXP (op1, 0)))
2444 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2445 GET_MODE (XEXP (op1, 1)));
2446 return simplify_gen_binary (AND, mode, op0, tem);
2448 if (rtx_equal_p (op0, XEXP (op1, 1)))
2450 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2451 GET_MODE (XEXP (op1, 0)));
2452 return simplify_gen_binary (AND, mode, op0, tem);
2456 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2457 by reversing the comparison code if valid. */
2458 if (STORE_FLAG_VALUE == 1
2459 && trueop0 == const1_rtx
2460 && COMPARISON_P (op1)
2461 && (reversed = reversed_comparison (op1, mode)))
2462 return reversed;
2464 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2465 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2466 && GET_CODE (op1) == MULT
2467 && GET_CODE (XEXP (op1, 0)) == NEG)
2469 rtx in1, in2;
2471 in1 = XEXP (XEXP (op1, 0), 0);
2472 in2 = XEXP (op1, 1);
2473 return simplify_gen_binary (PLUS, mode,
2474 simplify_gen_binary (MULT, mode,
2475 in1, in2),
2476 op0);
2479 /* Canonicalize (minus (neg A) (mult B C)) to
2480 (minus (mult (neg B) C) A). */
2481 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2482 && GET_CODE (op1) == MULT
2483 && GET_CODE (op0) == NEG)
2485 rtx in1, in2;
2487 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2488 in2 = XEXP (op1, 1);
2489 return simplify_gen_binary (MINUS, mode,
2490 simplify_gen_binary (MULT, mode,
2491 in1, in2),
2492 XEXP (op0, 0));
2495 /* If one of the operands is a PLUS or a MINUS, see if we can
2496 simplify this by the associative law. This will, for example,
2497 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2498 Don't use the associative law for floating point.
2499 The inaccuracy makes it nonassociative,
2500 and subtle programs can break if operations are associated. */
2502 if (INTEGRAL_MODE_P (mode)
2503 && (plus_minus_operand_p (op0)
2504 || plus_minus_operand_p (op1))
2505 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2506 return tem;
2507 break;
2509 case MULT:
2510 if (trueop1 == constm1_rtx)
2511 return simplify_gen_unary (NEG, mode, op0, mode);
2513 if (GET_CODE (op0) == NEG)
2515 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2516 /* If op1 is a MULT as well and simplify_unary_operation
2517 just moved the NEG to the second operand, simplify_gen_binary
2518 below could through simplify_associative_operation move
2519 the NEG around again and recurse endlessly. */
2520 if (temp
2521 && GET_CODE (op1) == MULT
2522 && GET_CODE (temp) == MULT
2523 && XEXP (op1, 0) == XEXP (temp, 0)
2524 && GET_CODE (XEXP (temp, 1)) == NEG
2525 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2526 temp = NULL_RTX;
2527 if (temp)
2528 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2530 if (GET_CODE (op1) == NEG)
2532 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2533 /* If op0 is a MULT as well and simplify_unary_operation
2534 just moved the NEG to the second operand, simplify_gen_binary
2535 below could through simplify_associative_operation move
2536 the NEG around again and recurse endlessly. */
2537 if (temp
2538 && GET_CODE (op0) == MULT
2539 && GET_CODE (temp) == MULT
2540 && XEXP (op0, 0) == XEXP (temp, 0)
2541 && GET_CODE (XEXP (temp, 1)) == NEG
2542 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2543 temp = NULL_RTX;
2544 if (temp)
2545 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2548 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2549 x is NaN, since x * 0 is then also NaN. Nor is it valid
2550 when the mode has signed zeros, since multiplying a negative
2551 number by 0 will give -0, not 0. */
2552 if (!HONOR_NANS (mode)
2553 && !HONOR_SIGNED_ZEROS (mode)
2554 && trueop1 == CONST0_RTX (mode)
2555 && ! side_effects_p (op0))
2556 return op1;
2558 /* In IEEE floating point, x*1 is not equivalent to x for
2559 signalling NaNs. */
2560 if (!HONOR_SNANS (mode)
2561 && trueop1 == CONST1_RTX (mode))
2562 return op0;
2564 /* Convert multiply by constant power of two into shift. */
2565 if (CONST_SCALAR_INT_P (trueop1))
2567 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2568 if (val >= 0)
2569 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2572 /* x*2 is x+x and x*(-1) is -x */
2573 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2574 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2575 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2576 && GET_MODE (op0) == mode)
2578 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2580 if (real_equal (d1, &dconst2))
2581 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2583 if (!HONOR_SNANS (mode)
2584 && real_equal (d1, &dconstm1))
2585 return simplify_gen_unary (NEG, mode, op0, mode);
2588 /* Optimize -x * -x as x * x. */
2589 if (FLOAT_MODE_P (mode)
2590 && GET_CODE (op0) == NEG
2591 && GET_CODE (op1) == NEG
2592 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2593 && !side_effects_p (XEXP (op0, 0)))
2594 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2596 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2597 if (SCALAR_FLOAT_MODE_P (mode)
2598 && GET_CODE (op0) == ABS
2599 && GET_CODE (op1) == ABS
2600 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2601 && !side_effects_p (XEXP (op0, 0)))
2602 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2604 /* Reassociate multiplication, but for floating point MULTs
2605 only when the user specifies unsafe math optimizations. */
2606 if (! FLOAT_MODE_P (mode)
2607 || flag_unsafe_math_optimizations)
2609 tem = simplify_associative_operation (code, mode, op0, op1);
2610 if (tem)
2611 return tem;
2613 break;
2615 case IOR:
2616 if (trueop1 == CONST0_RTX (mode))
2617 return op0;
2618 if (INTEGRAL_MODE_P (mode)
2619 && trueop1 == CONSTM1_RTX (mode)
2620 && !side_effects_p (op0))
2621 return op1;
2622 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2623 return op0;
2624 /* A | (~A) -> -1 */
2625 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2626 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2627 && ! side_effects_p (op0)
2628 && SCALAR_INT_MODE_P (mode))
2629 return constm1_rtx;
2631 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2632 if (CONST_INT_P (op1)
2633 && HWI_COMPUTABLE_MODE_P (mode)
2634 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2635 && !side_effects_p (op0))
2636 return op1;
2638 /* Canonicalize (X & C1) | C2. */
2639 if (GET_CODE (op0) == AND
2640 && CONST_INT_P (trueop1)
2641 && CONST_INT_P (XEXP (op0, 1)))
2643 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2644 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2645 HOST_WIDE_INT c2 = INTVAL (trueop1);
2647 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2648 if ((c1 & c2) == c1
2649 && !side_effects_p (XEXP (op0, 0)))
2650 return trueop1;
2652 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2653 if (((c1|c2) & mask) == mask)
2654 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2656 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2657 if (((c1 & ~c2) & mask) != (c1 & mask))
2659 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2660 gen_int_mode (c1 & ~c2, mode));
2661 return simplify_gen_binary (IOR, mode, tem, op1);
2665 /* Convert (A & B) | A to A. */
2666 if (GET_CODE (op0) == AND
2667 && (rtx_equal_p (XEXP (op0, 0), op1)
2668 || rtx_equal_p (XEXP (op0, 1), op1))
2669 && ! side_effects_p (XEXP (op0, 0))
2670 && ! side_effects_p (XEXP (op0, 1)))
2671 return op1;
2673 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2674 mode size to (rotate A CX). */
2676 if (GET_CODE (op1) == ASHIFT
2677 || GET_CODE (op1) == SUBREG)
2679 opleft = op1;
2680 opright = op0;
2682 else
2684 opright = op1;
2685 opleft = op0;
2688 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2689 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2690 && CONST_INT_P (XEXP (opleft, 1))
2691 && CONST_INT_P (XEXP (opright, 1))
2692 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2693 == GET_MODE_PRECISION (mode)))
2694 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2696 /* Same, but for ashift that has been "simplified" to a wider mode
2697 by simplify_shift_const. */
2699 if (GET_CODE (opleft) == SUBREG
2700 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2701 && GET_CODE (opright) == LSHIFTRT
2702 && GET_CODE (XEXP (opright, 0)) == SUBREG
2703 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2704 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2705 && (GET_MODE_SIZE (GET_MODE (opleft))
2706 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2707 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2708 SUBREG_REG (XEXP (opright, 0)))
2709 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2710 && CONST_INT_P (XEXP (opright, 1))
2711 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2712 == GET_MODE_PRECISION (mode)))
2713 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2714 XEXP (SUBREG_REG (opleft), 1));
2716 /* If we have (ior (and (X C1) C2)), simplify this by making
2717 C1 as small as possible if C1 actually changes. */
2718 if (CONST_INT_P (op1)
2719 && (HWI_COMPUTABLE_MODE_P (mode)
2720 || INTVAL (op1) > 0)
2721 && GET_CODE (op0) == AND
2722 && CONST_INT_P (XEXP (op0, 1))
2723 && CONST_INT_P (op1)
2724 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2726 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2727 gen_int_mode (UINTVAL (XEXP (op0, 1))
2728 & ~UINTVAL (op1),
2729 mode));
2730 return simplify_gen_binary (IOR, mode, tmp, op1);
2733 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2734 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2735 the PLUS does not affect any of the bits in OP1: then we can do
2736 the IOR as a PLUS and we can associate. This is valid if OP1
2737 can be safely shifted left C bits. */
2738 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2739 && GET_CODE (XEXP (op0, 0)) == PLUS
2740 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2741 && CONST_INT_P (XEXP (op0, 1))
2742 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2744 int count = INTVAL (XEXP (op0, 1));
2745 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2747 if (mask >> count == INTVAL (trueop1)
2748 && trunc_int_for_mode (mask, mode) == mask
2749 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2750 return simplify_gen_binary (ASHIFTRT, mode,
2751 plus_constant (mode, XEXP (op0, 0),
2752 mask),
2753 XEXP (op0, 1));
2756 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2757 if (tem)
2758 return tem;
2760 tem = simplify_associative_operation (code, mode, op0, op1);
2761 if (tem)
2762 return tem;
2763 break;
2765 case XOR:
2766 if (trueop1 == CONST0_RTX (mode))
2767 return op0;
2768 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2769 return simplify_gen_unary (NOT, mode, op0, mode);
2770 if (rtx_equal_p (trueop0, trueop1)
2771 && ! side_effects_p (op0)
2772 && GET_MODE_CLASS (mode) != MODE_CC)
2773 return CONST0_RTX (mode);
2775 /* Canonicalize XOR of the most significant bit to PLUS. */
2776 if (CONST_SCALAR_INT_P (op1)
2777 && mode_signbit_p (mode, op1))
2778 return simplify_gen_binary (PLUS, mode, op0, op1);
2779 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2780 if (CONST_SCALAR_INT_P (op1)
2781 && GET_CODE (op0) == PLUS
2782 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2783 && mode_signbit_p (mode, XEXP (op0, 1)))
2784 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2785 simplify_gen_binary (XOR, mode, op1,
2786 XEXP (op0, 1)));
2788 /* If we are XORing two things that have no bits in common,
2789 convert them into an IOR. This helps to detect rotation encoded
2790 using those methods and possibly other simplifications. */
2792 if (HWI_COMPUTABLE_MODE_P (mode)
2793 && (nonzero_bits (op0, mode)
2794 & nonzero_bits (op1, mode)) == 0)
2795 return (simplify_gen_binary (IOR, mode, op0, op1));
2797 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2798 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2799 (NOT y). */
2801 int num_negated = 0;
2803 if (GET_CODE (op0) == NOT)
2804 num_negated++, op0 = XEXP (op0, 0);
2805 if (GET_CODE (op1) == NOT)
2806 num_negated++, op1 = XEXP (op1, 0);
2808 if (num_negated == 2)
2809 return simplify_gen_binary (XOR, mode, op0, op1);
2810 else if (num_negated == 1)
2811 return simplify_gen_unary (NOT, mode,
2812 simplify_gen_binary (XOR, mode, op0, op1),
2813 mode);
2816 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2817 correspond to a machine insn or result in further simplifications
2818 if B is a constant. */
2820 if (GET_CODE (op0) == AND
2821 && rtx_equal_p (XEXP (op0, 1), op1)
2822 && ! side_effects_p (op1))
2823 return simplify_gen_binary (AND, mode,
2824 simplify_gen_unary (NOT, mode,
2825 XEXP (op0, 0), mode),
2826 op1);
2828 else if (GET_CODE (op0) == AND
2829 && rtx_equal_p (XEXP (op0, 0), op1)
2830 && ! side_effects_p (op1))
2831 return simplify_gen_binary (AND, mode,
2832 simplify_gen_unary (NOT, mode,
2833 XEXP (op0, 1), mode),
2834 op1);
2836 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2837 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2838 out bits inverted twice and not set by C. Similarly, given
2839 (xor (and (xor A B) C) D), simplify without inverting C in
2840 the xor operand: (xor (and A C) (B&C)^D).
2842 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2843 && GET_CODE (XEXP (op0, 0)) == XOR
2844 && CONST_INT_P (op1)
2845 && CONST_INT_P (XEXP (op0, 1))
2846 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2848 enum rtx_code op = GET_CODE (op0);
2849 rtx a = XEXP (XEXP (op0, 0), 0);
2850 rtx b = XEXP (XEXP (op0, 0), 1);
2851 rtx c = XEXP (op0, 1);
2852 rtx d = op1;
2853 HOST_WIDE_INT bval = INTVAL (b);
2854 HOST_WIDE_INT cval = INTVAL (c);
2855 HOST_WIDE_INT dval = INTVAL (d);
2856 HOST_WIDE_INT xcval;
2858 if (op == IOR)
2859 xcval = ~cval;
2860 else
2861 xcval = cval;
2863 return simplify_gen_binary (XOR, mode,
2864 simplify_gen_binary (op, mode, a, c),
2865 gen_int_mode ((bval & xcval) ^ dval,
2866 mode));
2869 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2870 we can transform like this:
2871 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2872 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2873 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2874 Attempt a few simplifications when B and C are both constants. */
2875 if (GET_CODE (op0) == AND
2876 && CONST_INT_P (op1)
2877 && CONST_INT_P (XEXP (op0, 1)))
2879 rtx a = XEXP (op0, 0);
2880 rtx b = XEXP (op0, 1);
2881 rtx c = op1;
2882 HOST_WIDE_INT bval = INTVAL (b);
2883 HOST_WIDE_INT cval = INTVAL (c);
2885 /* Instead of computing ~A&C, we compute its negated value,
2886 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2887 optimize for sure. If it does not simplify, we still try
2888 to compute ~A&C below, but since that always allocates
2889 RTL, we don't try that before committing to returning a
2890 simplified expression. */
2891 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2892 GEN_INT (~cval));
2894 if ((~cval & bval) == 0)
2896 rtx na_c = NULL_RTX;
2897 if (n_na_c)
2898 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2899 else
2901 /* If ~A does not simplify, don't bother: we don't
2902 want to simplify 2 operations into 3, and if na_c
2903 were to simplify with na, n_na_c would have
2904 simplified as well. */
2905 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2906 if (na)
2907 na_c = simplify_gen_binary (AND, mode, na, c);
2910 /* Try to simplify ~A&C | ~B&C. */
2911 if (na_c != NULL_RTX)
2912 return simplify_gen_binary (IOR, mode, na_c,
2913 gen_int_mode (~bval & cval, mode));
2915 else
2917 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2918 if (n_na_c == CONSTM1_RTX (mode))
2920 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2921 gen_int_mode (~cval & bval,
2922 mode));
2923 return simplify_gen_binary (IOR, mode, a_nc_b,
2924 gen_int_mode (~bval & cval,
2925 mode));
2930 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
2931 do (ior (and A ~C) (and B C)) which is a machine instruction on some
2932 machines, and also has shorter instruction path length. */
2933 if (GET_CODE (op0) == AND
2934 && GET_CODE (XEXP (op0, 0)) == XOR
2935 && CONST_INT_P (XEXP (op0, 1))
2936 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
2938 rtx a = trueop1;
2939 rtx b = XEXP (XEXP (op0, 0), 1);
2940 rtx c = XEXP (op0, 1);
2941 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2942 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
2943 rtx bc = simplify_gen_binary (AND, mode, b, c);
2944 return simplify_gen_binary (IOR, mode, a_nc, bc);
2946 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
2947 else if (GET_CODE (op0) == AND
2948 && GET_CODE (XEXP (op0, 0)) == XOR
2949 && CONST_INT_P (XEXP (op0, 1))
2950 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
2952 rtx a = XEXP (XEXP (op0, 0), 0);
2953 rtx b = trueop1;
2954 rtx c = XEXP (op0, 1);
2955 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2956 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
2957 rtx ac = simplify_gen_binary (AND, mode, a, c);
2958 return simplify_gen_binary (IOR, mode, ac, b_nc);
2961 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2962 comparison if STORE_FLAG_VALUE is 1. */
2963 if (STORE_FLAG_VALUE == 1
2964 && trueop1 == const1_rtx
2965 && COMPARISON_P (op0)
2966 && (reversed = reversed_comparison (op0, mode)))
2967 return reversed;
2969 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2970 is (lt foo (const_int 0)), so we can perform the above
2971 simplification if STORE_FLAG_VALUE is 1. */
2973 if (STORE_FLAG_VALUE == 1
2974 && trueop1 == const1_rtx
2975 && GET_CODE (op0) == LSHIFTRT
2976 && CONST_INT_P (XEXP (op0, 1))
2977 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2978 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2980 /* (xor (comparison foo bar) (const_int sign-bit))
2981 when STORE_FLAG_VALUE is the sign bit. */
2982 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2983 && trueop1 == const_true_rtx
2984 && COMPARISON_P (op0)
2985 && (reversed = reversed_comparison (op0, mode)))
2986 return reversed;
2988 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2989 if (tem)
2990 return tem;
2992 tem = simplify_associative_operation (code, mode, op0, op1);
2993 if (tem)
2994 return tem;
2995 break;
2997 case AND:
2998 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2999 return trueop1;
3000 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3001 return op0;
3002 if (HWI_COMPUTABLE_MODE_P (mode))
3004 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3005 HOST_WIDE_INT nzop1;
3006 if (CONST_INT_P (trueop1))
3008 HOST_WIDE_INT val1 = INTVAL (trueop1);
3009 /* If we are turning off bits already known off in OP0, we need
3010 not do an AND. */
3011 if ((nzop0 & ~val1) == 0)
3012 return op0;
3014 nzop1 = nonzero_bits (trueop1, mode);
3015 /* If we are clearing all the nonzero bits, the result is zero. */
3016 if ((nzop1 & nzop0) == 0
3017 && !side_effects_p (op0) && !side_effects_p (op1))
3018 return CONST0_RTX (mode);
3020 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3021 && GET_MODE_CLASS (mode) != MODE_CC)
3022 return op0;
3023 /* A & (~A) -> 0 */
3024 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3025 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3026 && ! side_effects_p (op0)
3027 && GET_MODE_CLASS (mode) != MODE_CC)
3028 return CONST0_RTX (mode);
3030 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3031 there are no nonzero bits of C outside of X's mode. */
3032 if ((GET_CODE (op0) == SIGN_EXTEND
3033 || GET_CODE (op0) == ZERO_EXTEND)
3034 && CONST_INT_P (trueop1)
3035 && HWI_COMPUTABLE_MODE_P (mode)
3036 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3037 & UINTVAL (trueop1)) == 0)
3039 machine_mode imode = GET_MODE (XEXP (op0, 0));
3040 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3041 gen_int_mode (INTVAL (trueop1),
3042 imode));
3043 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3046 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3047 we might be able to further simplify the AND with X and potentially
3048 remove the truncation altogether. */
3049 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3051 rtx x = XEXP (op0, 0);
3052 machine_mode xmode = GET_MODE (x);
3053 tem = simplify_gen_binary (AND, xmode, x,
3054 gen_int_mode (INTVAL (trueop1), xmode));
3055 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3058 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3059 if (GET_CODE (op0) == IOR
3060 && CONST_INT_P (trueop1)
3061 && CONST_INT_P (XEXP (op0, 1)))
3063 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3064 return simplify_gen_binary (IOR, mode,
3065 simplify_gen_binary (AND, mode,
3066 XEXP (op0, 0), op1),
3067 gen_int_mode (tmp, mode));
3070 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3071 insn (and may simplify more). */
3072 if (GET_CODE (op0) == XOR
3073 && rtx_equal_p (XEXP (op0, 0), op1)
3074 && ! side_effects_p (op1))
3075 return simplify_gen_binary (AND, mode,
3076 simplify_gen_unary (NOT, mode,
3077 XEXP (op0, 1), mode),
3078 op1);
3080 if (GET_CODE (op0) == XOR
3081 && rtx_equal_p (XEXP (op0, 1), op1)
3082 && ! side_effects_p (op1))
3083 return simplify_gen_binary (AND, mode,
3084 simplify_gen_unary (NOT, mode,
3085 XEXP (op0, 0), mode),
3086 op1);
3088 /* Similarly for (~(A ^ B)) & A. */
3089 if (GET_CODE (op0) == NOT
3090 && GET_CODE (XEXP (op0, 0)) == XOR
3091 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3092 && ! side_effects_p (op1))
3093 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3095 if (GET_CODE (op0) == NOT
3096 && GET_CODE (XEXP (op0, 0)) == XOR
3097 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3098 && ! side_effects_p (op1))
3099 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3101 /* Convert (A | B) & A to A. */
3102 if (GET_CODE (op0) == IOR
3103 && (rtx_equal_p (XEXP (op0, 0), op1)
3104 || rtx_equal_p (XEXP (op0, 1), op1))
3105 && ! side_effects_p (XEXP (op0, 0))
3106 && ! side_effects_p (XEXP (op0, 1)))
3107 return op1;
3109 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3110 ((A & N) + B) & M -> (A + B) & M
3111 Similarly if (N & M) == 0,
3112 ((A | N) + B) & M -> (A + B) & M
3113 and for - instead of + and/or ^ instead of |.
3114 Also, if (N & M) == 0, then
3115 (A +- N) & M -> A & M. */
3116 if (CONST_INT_P (trueop1)
3117 && HWI_COMPUTABLE_MODE_P (mode)
3118 && ~UINTVAL (trueop1)
3119 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3120 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3122 rtx pmop[2];
3123 int which;
3125 pmop[0] = XEXP (op0, 0);
3126 pmop[1] = XEXP (op0, 1);
3128 if (CONST_INT_P (pmop[1])
3129 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3130 return simplify_gen_binary (AND, mode, pmop[0], op1);
3132 for (which = 0; which < 2; which++)
3134 tem = pmop[which];
3135 switch (GET_CODE (tem))
3137 case AND:
3138 if (CONST_INT_P (XEXP (tem, 1))
3139 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3140 == UINTVAL (trueop1))
3141 pmop[which] = XEXP (tem, 0);
3142 break;
3143 case IOR:
3144 case XOR:
3145 if (CONST_INT_P (XEXP (tem, 1))
3146 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3147 pmop[which] = XEXP (tem, 0);
3148 break;
3149 default:
3150 break;
3154 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3156 tem = simplify_gen_binary (GET_CODE (op0), mode,
3157 pmop[0], pmop[1]);
3158 return simplify_gen_binary (code, mode, tem, op1);
3162 /* (and X (ior (not X) Y) -> (and X Y) */
3163 if (GET_CODE (op1) == IOR
3164 && GET_CODE (XEXP (op1, 0)) == NOT
3165 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3166 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3168 /* (and (ior (not X) Y) X) -> (and X Y) */
3169 if (GET_CODE (op0) == IOR
3170 && GET_CODE (XEXP (op0, 0)) == NOT
3171 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3172 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3174 /* (and X (ior Y (not X)) -> (and X Y) */
3175 if (GET_CODE (op1) == IOR
3176 && GET_CODE (XEXP (op1, 1)) == NOT
3177 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3178 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3180 /* (and (ior Y (not X)) X) -> (and X Y) */
3181 if (GET_CODE (op0) == IOR
3182 && GET_CODE (XEXP (op0, 1)) == NOT
3183 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3184 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3186 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3187 if (tem)
3188 return tem;
3190 tem = simplify_associative_operation (code, mode, op0, op1);
3191 if (tem)
3192 return tem;
3193 break;
3195 case UDIV:
3196 /* 0/x is 0 (or x&0 if x has side-effects). */
3197 if (trueop0 == CONST0_RTX (mode))
3199 if (side_effects_p (op1))
3200 return simplify_gen_binary (AND, mode, op1, trueop0);
3201 return trueop0;
3203 /* x/1 is x. */
3204 if (trueop1 == CONST1_RTX (mode))
3206 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3207 if (tem)
3208 return tem;
3210 /* Convert divide by power of two into shift. */
3211 if (CONST_INT_P (trueop1)
3212 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3213 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3214 break;
3216 case DIV:
3217 /* Handle floating point and integers separately. */
3218 if (SCALAR_FLOAT_MODE_P (mode))
3220 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3221 safe for modes with NaNs, since 0.0 / 0.0 will then be
3222 NaN rather than 0.0. Nor is it safe for modes with signed
3223 zeros, since dividing 0 by a negative number gives -0.0 */
3224 if (trueop0 == CONST0_RTX (mode)
3225 && !HONOR_NANS (mode)
3226 && !HONOR_SIGNED_ZEROS (mode)
3227 && ! side_effects_p (op1))
3228 return op0;
3229 /* x/1.0 is x. */
3230 if (trueop1 == CONST1_RTX (mode)
3231 && !HONOR_SNANS (mode))
3232 return op0;
3234 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3235 && trueop1 != CONST0_RTX (mode))
3237 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3239 /* x/-1.0 is -x. */
3240 if (real_equal (d1, &dconstm1)
3241 && !HONOR_SNANS (mode))
3242 return simplify_gen_unary (NEG, mode, op0, mode);
3244 /* Change FP division by a constant into multiplication.
3245 Only do this with -freciprocal-math. */
3246 if (flag_reciprocal_math
3247 && !real_equal (d1, &dconst0))
3249 REAL_VALUE_TYPE d;
3250 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3251 tem = const_double_from_real_value (d, mode);
3252 return simplify_gen_binary (MULT, mode, op0, tem);
3256 else if (SCALAR_INT_MODE_P (mode))
3258 /* 0/x is 0 (or x&0 if x has side-effects). */
3259 if (trueop0 == CONST0_RTX (mode)
3260 && !cfun->can_throw_non_call_exceptions)
3262 if (side_effects_p (op1))
3263 return simplify_gen_binary (AND, mode, op1, trueop0);
3264 return trueop0;
3266 /* x/1 is x. */
3267 if (trueop1 == CONST1_RTX (mode))
3269 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3270 if (tem)
3271 return tem;
3273 /* x/-1 is -x. */
3274 if (trueop1 == constm1_rtx)
3276 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3277 if (x)
3278 return simplify_gen_unary (NEG, mode, x, mode);
3281 break;
3283 case UMOD:
3284 /* 0%x is 0 (or x&0 if x has side-effects). */
3285 if (trueop0 == CONST0_RTX (mode))
3287 if (side_effects_p (op1))
3288 return simplify_gen_binary (AND, mode, op1, trueop0);
3289 return trueop0;
3291 /* x%1 is 0 (of x&0 if x has side-effects). */
3292 if (trueop1 == CONST1_RTX (mode))
3294 if (side_effects_p (op0))
3295 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3296 return CONST0_RTX (mode);
3298 /* Implement modulus by power of two as AND. */
3299 if (CONST_INT_P (trueop1)
3300 && exact_log2 (UINTVAL (trueop1)) > 0)
3301 return simplify_gen_binary (AND, mode, op0,
3302 gen_int_mode (INTVAL (op1) - 1, mode));
3303 break;
3305 case MOD:
3306 /* 0%x is 0 (or x&0 if x has side-effects). */
3307 if (trueop0 == CONST0_RTX (mode))
3309 if (side_effects_p (op1))
3310 return simplify_gen_binary (AND, mode, op1, trueop0);
3311 return trueop0;
3313 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3314 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3316 if (side_effects_p (op0))
3317 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3318 return CONST0_RTX (mode);
3320 break;
3322 case ROTATERT:
3323 case ROTATE:
3324 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3325 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3326 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3327 amount instead. */
3328 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3329 if (CONST_INT_P (trueop1)
3330 && IN_RANGE (INTVAL (trueop1),
3331 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3332 GET_MODE_PRECISION (mode) - 1))
3333 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3334 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3335 - INTVAL (trueop1)));
3336 #endif
3337 /* FALLTHRU */
3338 case ASHIFTRT:
3339 if (trueop1 == CONST0_RTX (mode))
3340 return op0;
3341 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3342 return op0;
3343 /* Rotating ~0 always results in ~0. */
3344 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3345 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3346 && ! side_effects_p (op1))
3347 return op0;
3348 /* Given:
3349 scalar modes M1, M2
3350 scalar constants c1, c2
3351 size (M2) > size (M1)
3352 c1 == size (M2) - size (M1)
3353 optimize:
3354 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3355 <low_part>)
3356 (const_int <c2>))
3358 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3359 <low_part>). */
3360 if (code == ASHIFTRT
3361 && !VECTOR_MODE_P (mode)
3362 && SUBREG_P (op0)
3363 && CONST_INT_P (op1)
3364 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3365 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3366 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3367 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3368 > GET_MODE_BITSIZE (mode))
3369 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3370 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3371 - GET_MODE_BITSIZE (mode)))
3372 && subreg_lowpart_p (op0))
3374 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3375 + INTVAL (op1));
3376 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3377 tmp = simplify_gen_binary (ASHIFTRT,
3378 GET_MODE (SUBREG_REG (op0)),
3379 XEXP (SUBREG_REG (op0), 0),
3380 tmp);
3381 return lowpart_subreg (mode, tmp, inner_mode);
3383 canonicalize_shift:
3384 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3386 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3387 if (val != INTVAL (op1))
3388 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3390 break;
3392 case ASHIFT:
3393 case SS_ASHIFT:
3394 case US_ASHIFT:
3395 if (trueop1 == CONST0_RTX (mode))
3396 return op0;
3397 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3398 return op0;
3399 goto canonicalize_shift;
3401 case LSHIFTRT:
3402 if (trueop1 == CONST0_RTX (mode))
3403 return op0;
3404 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3405 return op0;
3406 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3407 if (GET_CODE (op0) == CLZ
3408 && CONST_INT_P (trueop1)
3409 && STORE_FLAG_VALUE == 1
3410 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3412 machine_mode imode = GET_MODE (XEXP (op0, 0));
3413 unsigned HOST_WIDE_INT zero_val = 0;
3415 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3416 && zero_val == GET_MODE_PRECISION (imode)
3417 && INTVAL (trueop1) == exact_log2 (zero_val))
3418 return simplify_gen_relational (EQ, mode, imode,
3419 XEXP (op0, 0), const0_rtx);
3421 goto canonicalize_shift;
3423 case SMIN:
3424 if (width <= HOST_BITS_PER_WIDE_INT
3425 && mode_signbit_p (mode, trueop1)
3426 && ! side_effects_p (op0))
3427 return op1;
3428 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3429 return op0;
3430 tem = simplify_associative_operation (code, mode, op0, op1);
3431 if (tem)
3432 return tem;
3433 break;
3435 case SMAX:
3436 if (width <= HOST_BITS_PER_WIDE_INT
3437 && CONST_INT_P (trueop1)
3438 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3439 && ! side_effects_p (op0))
3440 return op1;
3441 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3442 return op0;
3443 tem = simplify_associative_operation (code, mode, op0, op1);
3444 if (tem)
3445 return tem;
3446 break;
3448 case UMIN:
3449 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3450 return op1;
3451 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3452 return op0;
3453 tem = simplify_associative_operation (code, mode, op0, op1);
3454 if (tem)
3455 return tem;
3456 break;
3458 case UMAX:
3459 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3460 return op1;
3461 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3462 return op0;
3463 tem = simplify_associative_operation (code, mode, op0, op1);
3464 if (tem)
3465 return tem;
3466 break;
3468 case SS_PLUS:
3469 case US_PLUS:
3470 case SS_MINUS:
3471 case US_MINUS:
3472 case SS_MULT:
3473 case US_MULT:
3474 case SS_DIV:
3475 case US_DIV:
3476 /* ??? There are simplifications that can be done. */
3477 return 0;
3479 case VEC_SELECT:
3480 if (!VECTOR_MODE_P (mode))
3482 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3483 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3484 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3485 gcc_assert (XVECLEN (trueop1, 0) == 1);
3486 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3488 if (GET_CODE (trueop0) == CONST_VECTOR)
3489 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3490 (trueop1, 0, 0)));
3492 /* Extract a scalar element from a nested VEC_SELECT expression
3493 (with optional nested VEC_CONCAT expression). Some targets
3494 (i386) extract scalar element from a vector using chain of
3495 nested VEC_SELECT expressions. When input operand is a memory
3496 operand, this operation can be simplified to a simple scalar
3497 load from an offseted memory address. */
3498 if (GET_CODE (trueop0) == VEC_SELECT)
3500 rtx op0 = XEXP (trueop0, 0);
3501 rtx op1 = XEXP (trueop0, 1);
3503 machine_mode opmode = GET_MODE (op0);
3504 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3505 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3507 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3508 int elem;
3510 rtvec vec;
3511 rtx tmp_op, tmp;
3513 gcc_assert (GET_CODE (op1) == PARALLEL);
3514 gcc_assert (i < n_elts);
3516 /* Select element, pointed by nested selector. */
3517 elem = INTVAL (XVECEXP (op1, 0, i));
3519 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3520 if (GET_CODE (op0) == VEC_CONCAT)
3522 rtx op00 = XEXP (op0, 0);
3523 rtx op01 = XEXP (op0, 1);
3525 machine_mode mode00, mode01;
3526 int n_elts00, n_elts01;
3528 mode00 = GET_MODE (op00);
3529 mode01 = GET_MODE (op01);
3531 /* Find out number of elements of each operand. */
3532 if (VECTOR_MODE_P (mode00))
3534 elt_size = GET_MODE_UNIT_SIZE (mode00);
3535 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3537 else
3538 n_elts00 = 1;
3540 if (VECTOR_MODE_P (mode01))
3542 elt_size = GET_MODE_UNIT_SIZE (mode01);
3543 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3545 else
3546 n_elts01 = 1;
3548 gcc_assert (n_elts == n_elts00 + n_elts01);
3550 /* Select correct operand of VEC_CONCAT
3551 and adjust selector. */
3552 if (elem < n_elts01)
3553 tmp_op = op00;
3554 else
3556 tmp_op = op01;
3557 elem -= n_elts00;
3560 else
3561 tmp_op = op0;
3563 vec = rtvec_alloc (1);
3564 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3566 tmp = gen_rtx_fmt_ee (code, mode,
3567 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3568 return tmp;
3570 if (GET_CODE (trueop0) == VEC_DUPLICATE
3571 && GET_MODE (XEXP (trueop0, 0)) == mode)
3572 return XEXP (trueop0, 0);
3574 else
3576 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3577 gcc_assert (GET_MODE_INNER (mode)
3578 == GET_MODE_INNER (GET_MODE (trueop0)));
3579 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3581 if (GET_CODE (trueop0) == CONST_VECTOR)
3583 int elt_size = GET_MODE_UNIT_SIZE (mode);
3584 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3585 rtvec v = rtvec_alloc (n_elts);
3586 unsigned int i;
3588 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3589 for (i = 0; i < n_elts; i++)
3591 rtx x = XVECEXP (trueop1, 0, i);
3593 gcc_assert (CONST_INT_P (x));
3594 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3595 INTVAL (x));
3598 return gen_rtx_CONST_VECTOR (mode, v);
3601 /* Recognize the identity. */
3602 if (GET_MODE (trueop0) == mode)
3604 bool maybe_ident = true;
3605 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3607 rtx j = XVECEXP (trueop1, 0, i);
3608 if (!CONST_INT_P (j) || INTVAL (j) != i)
3610 maybe_ident = false;
3611 break;
3614 if (maybe_ident)
3615 return trueop0;
3618 /* If we build {a,b} then permute it, build the result directly. */
3619 if (XVECLEN (trueop1, 0) == 2
3620 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3621 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3622 && GET_CODE (trueop0) == VEC_CONCAT
3623 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3624 && GET_MODE (XEXP (trueop0, 0)) == mode
3625 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3626 && GET_MODE (XEXP (trueop0, 1)) == mode)
3628 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3629 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3630 rtx subop0, subop1;
3632 gcc_assert (i0 < 4 && i1 < 4);
3633 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3634 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3636 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3639 if (XVECLEN (trueop1, 0) == 2
3640 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3641 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3642 && GET_CODE (trueop0) == VEC_CONCAT
3643 && GET_MODE (trueop0) == mode)
3645 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3646 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3647 rtx subop0, subop1;
3649 gcc_assert (i0 < 2 && i1 < 2);
3650 subop0 = XEXP (trueop0, i0);
3651 subop1 = XEXP (trueop0, i1);
3653 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3656 /* If we select one half of a vec_concat, return that. */
3657 if (GET_CODE (trueop0) == VEC_CONCAT
3658 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3660 rtx subop0 = XEXP (trueop0, 0);
3661 rtx subop1 = XEXP (trueop0, 1);
3662 machine_mode mode0 = GET_MODE (subop0);
3663 machine_mode mode1 = GET_MODE (subop1);
3664 int li = GET_MODE_UNIT_SIZE (mode0);
3665 int l0 = GET_MODE_SIZE (mode0) / li;
3666 int l1 = GET_MODE_SIZE (mode1) / li;
3667 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3668 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3670 bool success = true;
3671 for (int i = 1; i < l0; ++i)
3673 rtx j = XVECEXP (trueop1, 0, i);
3674 if (!CONST_INT_P (j) || INTVAL (j) != i)
3676 success = false;
3677 break;
3680 if (success)
3681 return subop0;
3683 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3685 bool success = true;
3686 for (int i = 1; i < l1; ++i)
3688 rtx j = XVECEXP (trueop1, 0, i);
3689 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3691 success = false;
3692 break;
3695 if (success)
3696 return subop1;
3701 if (XVECLEN (trueop1, 0) == 1
3702 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3703 && GET_CODE (trueop0) == VEC_CONCAT)
3705 rtx vec = trueop0;
3706 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3708 /* Try to find the element in the VEC_CONCAT. */
3709 while (GET_MODE (vec) != mode
3710 && GET_CODE (vec) == VEC_CONCAT)
3712 HOST_WIDE_INT vec_size;
3714 if (CONST_INT_P (XEXP (vec, 0)))
3716 /* vec_concat of two const_ints doesn't make sense with
3717 respect to modes. */
3718 if (CONST_INT_P (XEXP (vec, 1)))
3719 return 0;
3721 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3722 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3724 else
3725 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3727 if (offset < vec_size)
3728 vec = XEXP (vec, 0);
3729 else
3731 offset -= vec_size;
3732 vec = XEXP (vec, 1);
3734 vec = avoid_constant_pool_reference (vec);
3737 if (GET_MODE (vec) == mode)
3738 return vec;
3741 /* If we select elements in a vec_merge that all come from the same
3742 operand, select from that operand directly. */
3743 if (GET_CODE (op0) == VEC_MERGE)
3745 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3746 if (CONST_INT_P (trueop02))
3748 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3749 bool all_operand0 = true;
3750 bool all_operand1 = true;
3751 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3753 rtx j = XVECEXP (trueop1, 0, i);
3754 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3755 all_operand1 = false;
3756 else
3757 all_operand0 = false;
3759 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3760 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3761 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3762 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3766 /* If we have two nested selects that are inverses of each
3767 other, replace them with the source operand. */
3768 if (GET_CODE (trueop0) == VEC_SELECT
3769 && GET_MODE (XEXP (trueop0, 0)) == mode)
3771 rtx op0_subop1 = XEXP (trueop0, 1);
3772 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3773 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3775 /* Apply the outer ordering vector to the inner one. (The inner
3776 ordering vector is expressly permitted to be of a different
3777 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3778 then the two VEC_SELECTs cancel. */
3779 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3781 rtx x = XVECEXP (trueop1, 0, i);
3782 if (!CONST_INT_P (x))
3783 return 0;
3784 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3785 if (!CONST_INT_P (y) || i != INTVAL (y))
3786 return 0;
3788 return XEXP (trueop0, 0);
3791 return 0;
3792 case VEC_CONCAT:
3794 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3795 ? GET_MODE (trueop0)
3796 : GET_MODE_INNER (mode));
3797 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3798 ? GET_MODE (trueop1)
3799 : GET_MODE_INNER (mode));
3801 gcc_assert (VECTOR_MODE_P (mode));
3802 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3803 == GET_MODE_SIZE (mode));
3805 if (VECTOR_MODE_P (op0_mode))
3806 gcc_assert (GET_MODE_INNER (mode)
3807 == GET_MODE_INNER (op0_mode));
3808 else
3809 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3811 if (VECTOR_MODE_P (op1_mode))
3812 gcc_assert (GET_MODE_INNER (mode)
3813 == GET_MODE_INNER (op1_mode));
3814 else
3815 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3817 if ((GET_CODE (trueop0) == CONST_VECTOR
3818 || CONST_SCALAR_INT_P (trueop0)
3819 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3820 && (GET_CODE (trueop1) == CONST_VECTOR
3821 || CONST_SCALAR_INT_P (trueop1)
3822 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3824 int elt_size = GET_MODE_UNIT_SIZE (mode);
3825 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3826 rtvec v = rtvec_alloc (n_elts);
3827 unsigned int i;
3828 unsigned in_n_elts = 1;
3830 if (VECTOR_MODE_P (op0_mode))
3831 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3832 for (i = 0; i < n_elts; i++)
3834 if (i < in_n_elts)
3836 if (!VECTOR_MODE_P (op0_mode))
3837 RTVEC_ELT (v, i) = trueop0;
3838 else
3839 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3841 else
3843 if (!VECTOR_MODE_P (op1_mode))
3844 RTVEC_ELT (v, i) = trueop1;
3845 else
3846 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3847 i - in_n_elts);
3851 return gen_rtx_CONST_VECTOR (mode, v);
3854 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3855 Restrict the transformation to avoid generating a VEC_SELECT with a
3856 mode unrelated to its operand. */
3857 if (GET_CODE (trueop0) == VEC_SELECT
3858 && GET_CODE (trueop1) == VEC_SELECT
3859 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3860 && GET_MODE (XEXP (trueop0, 0)) == mode)
3862 rtx par0 = XEXP (trueop0, 1);
3863 rtx par1 = XEXP (trueop1, 1);
3864 int len0 = XVECLEN (par0, 0);
3865 int len1 = XVECLEN (par1, 0);
3866 rtvec vec = rtvec_alloc (len0 + len1);
3867 for (int i = 0; i < len0; i++)
3868 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3869 for (int i = 0; i < len1; i++)
3870 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3871 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3872 gen_rtx_PARALLEL (VOIDmode, vec));
3875 return 0;
3877 default:
3878 gcc_unreachable ();
3881 return 0;
3885 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3886 rtx op0, rtx op1)
3888 unsigned int width = GET_MODE_PRECISION (mode);
3890 if (VECTOR_MODE_P (mode)
3891 && code != VEC_CONCAT
3892 && GET_CODE (op0) == CONST_VECTOR
3893 && GET_CODE (op1) == CONST_VECTOR)
3895 unsigned n_elts = GET_MODE_NUNITS (mode);
3896 machine_mode op0mode = GET_MODE (op0);
3897 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3898 machine_mode op1mode = GET_MODE (op1);
3899 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3900 rtvec v = rtvec_alloc (n_elts);
3901 unsigned int i;
3903 gcc_assert (op0_n_elts == n_elts);
3904 gcc_assert (op1_n_elts == n_elts);
3905 for (i = 0; i < n_elts; i++)
3907 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3908 CONST_VECTOR_ELT (op0, i),
3909 CONST_VECTOR_ELT (op1, i));
3910 if (!x)
3911 return 0;
3912 RTVEC_ELT (v, i) = x;
3915 return gen_rtx_CONST_VECTOR (mode, v);
3918 if (VECTOR_MODE_P (mode)
3919 && code == VEC_CONCAT
3920 && (CONST_SCALAR_INT_P (op0)
3921 || GET_CODE (op0) == CONST_FIXED
3922 || CONST_DOUBLE_AS_FLOAT_P (op0))
3923 && (CONST_SCALAR_INT_P (op1)
3924 || CONST_DOUBLE_AS_FLOAT_P (op1)
3925 || GET_CODE (op1) == CONST_FIXED))
3927 unsigned n_elts = GET_MODE_NUNITS (mode);
3928 rtvec v = rtvec_alloc (n_elts);
3930 gcc_assert (n_elts >= 2);
3931 if (n_elts == 2)
3933 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3934 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3936 RTVEC_ELT (v, 0) = op0;
3937 RTVEC_ELT (v, 1) = op1;
3939 else
3941 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3942 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3943 unsigned i;
3945 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3946 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3947 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3949 for (i = 0; i < op0_n_elts; ++i)
3950 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3951 for (i = 0; i < op1_n_elts; ++i)
3952 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3955 return gen_rtx_CONST_VECTOR (mode, v);
3958 if (SCALAR_FLOAT_MODE_P (mode)
3959 && CONST_DOUBLE_AS_FLOAT_P (op0)
3960 && CONST_DOUBLE_AS_FLOAT_P (op1)
3961 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3963 if (code == AND
3964 || code == IOR
3965 || code == XOR)
3967 long tmp0[4];
3968 long tmp1[4];
3969 REAL_VALUE_TYPE r;
3970 int i;
3972 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3973 GET_MODE (op0));
3974 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3975 GET_MODE (op1));
3976 for (i = 0; i < 4; i++)
3978 switch (code)
3980 case AND:
3981 tmp0[i] &= tmp1[i];
3982 break;
3983 case IOR:
3984 tmp0[i] |= tmp1[i];
3985 break;
3986 case XOR:
3987 tmp0[i] ^= tmp1[i];
3988 break;
3989 default:
3990 gcc_unreachable ();
3993 real_from_target (&r, tmp0, mode);
3994 return const_double_from_real_value (r, mode);
3996 else
3998 REAL_VALUE_TYPE f0, f1, value, result;
3999 const REAL_VALUE_TYPE *opr0, *opr1;
4000 bool inexact;
4002 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4003 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4005 if (HONOR_SNANS (mode)
4006 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4007 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4008 return 0;
4010 real_convert (&f0, mode, opr0);
4011 real_convert (&f1, mode, opr1);
4013 if (code == DIV
4014 && real_equal (&f1, &dconst0)
4015 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4016 return 0;
4018 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4019 && flag_trapping_math
4020 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4022 int s0 = REAL_VALUE_NEGATIVE (f0);
4023 int s1 = REAL_VALUE_NEGATIVE (f1);
4025 switch (code)
4027 case PLUS:
4028 /* Inf + -Inf = NaN plus exception. */
4029 if (s0 != s1)
4030 return 0;
4031 break;
4032 case MINUS:
4033 /* Inf - Inf = NaN plus exception. */
4034 if (s0 == s1)
4035 return 0;
4036 break;
4037 case DIV:
4038 /* Inf / Inf = NaN plus exception. */
4039 return 0;
4040 default:
4041 break;
4045 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4046 && flag_trapping_math
4047 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4048 || (REAL_VALUE_ISINF (f1)
4049 && real_equal (&f0, &dconst0))))
4050 /* Inf * 0 = NaN plus exception. */
4051 return 0;
4053 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4054 &f0, &f1);
4055 real_convert (&result, mode, &value);
4057 /* Don't constant fold this floating point operation if
4058 the result has overflowed and flag_trapping_math. */
4060 if (flag_trapping_math
4061 && MODE_HAS_INFINITIES (mode)
4062 && REAL_VALUE_ISINF (result)
4063 && !REAL_VALUE_ISINF (f0)
4064 && !REAL_VALUE_ISINF (f1))
4065 /* Overflow plus exception. */
4066 return 0;
4068 /* Don't constant fold this floating point operation if the
4069 result may dependent upon the run-time rounding mode and
4070 flag_rounding_math is set, or if GCC's software emulation
4071 is unable to accurately represent the result. */
4073 if ((flag_rounding_math
4074 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4075 && (inexact || !real_identical (&result, &value)))
4076 return NULL_RTX;
4078 return const_double_from_real_value (result, mode);
4082 /* We can fold some multi-word operations. */
4083 if ((GET_MODE_CLASS (mode) == MODE_INT
4084 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
4085 && CONST_SCALAR_INT_P (op0)
4086 && CONST_SCALAR_INT_P (op1))
4088 wide_int result;
4089 bool overflow;
4090 rtx_mode_t pop0 = rtx_mode_t (op0, mode);
4091 rtx_mode_t pop1 = rtx_mode_t (op1, mode);
4093 #if TARGET_SUPPORTS_WIDE_INT == 0
4094 /* This assert keeps the simplification from producing a result
4095 that cannot be represented in a CONST_DOUBLE but a lot of
4096 upstream callers expect that this function never fails to
4097 simplify something and so you if you added this to the test
4098 above the code would die later anyway. If this assert
4099 happens, you just need to make the port support wide int. */
4100 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
4101 #endif
4102 switch (code)
4104 case MINUS:
4105 result = wi::sub (pop0, pop1);
4106 break;
4108 case PLUS:
4109 result = wi::add (pop0, pop1);
4110 break;
4112 case MULT:
4113 result = wi::mul (pop0, pop1);
4114 break;
4116 case DIV:
4117 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4118 if (overflow)
4119 return NULL_RTX;
4120 break;
4122 case MOD:
4123 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4124 if (overflow)
4125 return NULL_RTX;
4126 break;
4128 case UDIV:
4129 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4130 if (overflow)
4131 return NULL_RTX;
4132 break;
4134 case UMOD:
4135 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4136 if (overflow)
4137 return NULL_RTX;
4138 break;
4140 case AND:
4141 result = wi::bit_and (pop0, pop1);
4142 break;
4144 case IOR:
4145 result = wi::bit_or (pop0, pop1);
4146 break;
4148 case XOR:
4149 result = wi::bit_xor (pop0, pop1);
4150 break;
4152 case SMIN:
4153 result = wi::smin (pop0, pop1);
4154 break;
4156 case SMAX:
4157 result = wi::smax (pop0, pop1);
4158 break;
4160 case UMIN:
4161 result = wi::umin (pop0, pop1);
4162 break;
4164 case UMAX:
4165 result = wi::umax (pop0, pop1);
4166 break;
4168 case LSHIFTRT:
4169 case ASHIFTRT:
4170 case ASHIFT:
4172 wide_int wop1 = pop1;
4173 if (SHIFT_COUNT_TRUNCATED)
4174 wop1 = wi::umod_trunc (wop1, width);
4175 else if (wi::geu_p (wop1, width))
4176 return NULL_RTX;
4178 switch (code)
4180 case LSHIFTRT:
4181 result = wi::lrshift (pop0, wop1);
4182 break;
4184 case ASHIFTRT:
4185 result = wi::arshift (pop0, wop1);
4186 break;
4188 case ASHIFT:
4189 result = wi::lshift (pop0, wop1);
4190 break;
4192 default:
4193 gcc_unreachable ();
4195 break;
4197 case ROTATE:
4198 case ROTATERT:
4200 if (wi::neg_p (pop1))
4201 return NULL_RTX;
4203 switch (code)
4205 case ROTATE:
4206 result = wi::lrotate (pop0, pop1);
4207 break;
4209 case ROTATERT:
4210 result = wi::rrotate (pop0, pop1);
4211 break;
4213 default:
4214 gcc_unreachable ();
4216 break;
4218 default:
4219 return NULL_RTX;
4221 return immed_wide_int_const (result, mode);
4224 return NULL_RTX;
4229 /* Return a positive integer if X should sort after Y. The value
4230 returned is 1 if and only if X and Y are both regs. */
4232 static int
4233 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4235 int result;
4237 result = (commutative_operand_precedence (y)
4238 - commutative_operand_precedence (x));
4239 if (result)
4240 return result + result;
4242 /* Group together equal REGs to do more simplification. */
4243 if (REG_P (x) && REG_P (y))
4244 return REGNO (x) > REGNO (y);
4246 return 0;
4249 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4250 operands may be another PLUS or MINUS.
4252 Rather than test for specific case, we do this by a brute-force method
4253 and do all possible simplifications until no more changes occur. Then
4254 we rebuild the operation.
4256 May return NULL_RTX when no changes were made. */
4258 static rtx
4259 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4260 rtx op1)
4262 struct simplify_plus_minus_op_data
4264 rtx op;
4265 short neg;
4266 } ops[16];
4267 rtx result, tem;
4268 int n_ops = 2;
4269 int changed, n_constants, canonicalized = 0;
4270 int i, j;
4272 memset (ops, 0, sizeof ops);
4274 /* Set up the two operands and then expand them until nothing has been
4275 changed. If we run out of room in our array, give up; this should
4276 almost never happen. */
4278 ops[0].op = op0;
4279 ops[0].neg = 0;
4280 ops[1].op = op1;
4281 ops[1].neg = (code == MINUS);
4285 changed = 0;
4286 n_constants = 0;
4288 for (i = 0; i < n_ops; i++)
4290 rtx this_op = ops[i].op;
4291 int this_neg = ops[i].neg;
4292 enum rtx_code this_code = GET_CODE (this_op);
4294 switch (this_code)
4296 case PLUS:
4297 case MINUS:
4298 if (n_ops == ARRAY_SIZE (ops))
4299 return NULL_RTX;
4301 ops[n_ops].op = XEXP (this_op, 1);
4302 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4303 n_ops++;
4305 ops[i].op = XEXP (this_op, 0);
4306 changed = 1;
4307 /* If this operand was negated then we will potentially
4308 canonicalize the expression. Similarly if we don't
4309 place the operands adjacent we're re-ordering the
4310 expression and thus might be performing a
4311 canonicalization. Ignore register re-ordering.
4312 ??? It might be better to shuffle the ops array here,
4313 but then (plus (plus (A, B), plus (C, D))) wouldn't
4314 be seen as non-canonical. */
4315 if (this_neg
4316 || (i != n_ops - 2
4317 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4318 canonicalized = 1;
4319 break;
4321 case NEG:
4322 ops[i].op = XEXP (this_op, 0);
4323 ops[i].neg = ! this_neg;
4324 changed = 1;
4325 canonicalized = 1;
4326 break;
4328 case CONST:
4329 if (n_ops != ARRAY_SIZE (ops)
4330 && GET_CODE (XEXP (this_op, 0)) == PLUS
4331 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4332 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4334 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4335 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4336 ops[n_ops].neg = this_neg;
4337 n_ops++;
4338 changed = 1;
4339 canonicalized = 1;
4341 break;
4343 case NOT:
4344 /* ~a -> (-a - 1) */
4345 if (n_ops != ARRAY_SIZE (ops))
4347 ops[n_ops].op = CONSTM1_RTX (mode);
4348 ops[n_ops++].neg = this_neg;
4349 ops[i].op = XEXP (this_op, 0);
4350 ops[i].neg = !this_neg;
4351 changed = 1;
4352 canonicalized = 1;
4354 break;
4356 case CONST_INT:
4357 n_constants++;
4358 if (this_neg)
4360 ops[i].op = neg_const_int (mode, this_op);
4361 ops[i].neg = 0;
4362 changed = 1;
4363 canonicalized = 1;
4365 break;
4367 default:
4368 break;
4372 while (changed);
4374 if (n_constants > 1)
4375 canonicalized = 1;
4377 gcc_assert (n_ops >= 2);
4379 /* If we only have two operands, we can avoid the loops. */
4380 if (n_ops == 2)
4382 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4383 rtx lhs, rhs;
4385 /* Get the two operands. Be careful with the order, especially for
4386 the cases where code == MINUS. */
4387 if (ops[0].neg && ops[1].neg)
4389 lhs = gen_rtx_NEG (mode, ops[0].op);
4390 rhs = ops[1].op;
4392 else if (ops[0].neg)
4394 lhs = ops[1].op;
4395 rhs = ops[0].op;
4397 else
4399 lhs = ops[0].op;
4400 rhs = ops[1].op;
4403 return simplify_const_binary_operation (code, mode, lhs, rhs);
4406 /* Now simplify each pair of operands until nothing changes. */
4407 while (1)
4409 /* Insertion sort is good enough for a small array. */
4410 for (i = 1; i < n_ops; i++)
4412 struct simplify_plus_minus_op_data save;
4413 int cmp;
4415 j = i - 1;
4416 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4417 if (cmp <= 0)
4418 continue;
4419 /* Just swapping registers doesn't count as canonicalization. */
4420 if (cmp != 1)
4421 canonicalized = 1;
4423 save = ops[i];
4425 ops[j + 1] = ops[j];
4426 while (j--
4427 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4428 ops[j + 1] = save;
4431 changed = 0;
4432 for (i = n_ops - 1; i > 0; i--)
4433 for (j = i - 1; j >= 0; j--)
4435 rtx lhs = ops[j].op, rhs = ops[i].op;
4436 int lneg = ops[j].neg, rneg = ops[i].neg;
4438 if (lhs != 0 && rhs != 0)
4440 enum rtx_code ncode = PLUS;
4442 if (lneg != rneg)
4444 ncode = MINUS;
4445 if (lneg)
4446 std::swap (lhs, rhs);
4448 else if (swap_commutative_operands_p (lhs, rhs))
4449 std::swap (lhs, rhs);
4451 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4452 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4454 rtx tem_lhs, tem_rhs;
4456 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4457 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4458 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4459 tem_rhs);
4461 if (tem && !CONSTANT_P (tem))
4462 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4464 else
4465 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4467 if (tem)
4469 /* Reject "simplifications" that just wrap the two
4470 arguments in a CONST. Failure to do so can result
4471 in infinite recursion with simplify_binary_operation
4472 when it calls us to simplify CONST operations.
4473 Also, if we find such a simplification, don't try
4474 any more combinations with this rhs: We must have
4475 something like symbol+offset, ie. one of the
4476 trivial CONST expressions we handle later. */
4477 if (GET_CODE (tem) == CONST
4478 && GET_CODE (XEXP (tem, 0)) == ncode
4479 && XEXP (XEXP (tem, 0), 0) == lhs
4480 && XEXP (XEXP (tem, 0), 1) == rhs)
4481 break;
4482 lneg &= rneg;
4483 if (GET_CODE (tem) == NEG)
4484 tem = XEXP (tem, 0), lneg = !lneg;
4485 if (CONST_INT_P (tem) && lneg)
4486 tem = neg_const_int (mode, tem), lneg = 0;
4488 ops[i].op = tem;
4489 ops[i].neg = lneg;
4490 ops[j].op = NULL_RTX;
4491 changed = 1;
4492 canonicalized = 1;
4497 if (!changed)
4498 break;
4500 /* Pack all the operands to the lower-numbered entries. */
4501 for (i = 0, j = 0; j < n_ops; j++)
4502 if (ops[j].op)
4504 ops[i] = ops[j];
4505 i++;
4507 n_ops = i;
4510 /* If nothing changed, check that rematerialization of rtl instructions
4511 is still required. */
4512 if (!canonicalized)
4514 /* Perform rematerialization if only all operands are registers and
4515 all operations are PLUS. */
4516 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4517 around rs6000 and how it uses the CA register. See PR67145. */
4518 for (i = 0; i < n_ops; i++)
4519 if (ops[i].neg
4520 || !REG_P (ops[i].op)
4521 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4522 && fixed_regs[REGNO (ops[i].op)]
4523 && !global_regs[REGNO (ops[i].op)]
4524 && ops[i].op != frame_pointer_rtx
4525 && ops[i].op != arg_pointer_rtx
4526 && ops[i].op != stack_pointer_rtx))
4527 return NULL_RTX;
4528 goto gen_result;
4531 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4532 if (n_ops == 2
4533 && CONST_INT_P (ops[1].op)
4534 && CONSTANT_P (ops[0].op)
4535 && ops[0].neg)
4536 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4538 /* We suppressed creation of trivial CONST expressions in the
4539 combination loop to avoid recursion. Create one manually now.
4540 The combination loop should have ensured that there is exactly
4541 one CONST_INT, and the sort will have ensured that it is last
4542 in the array and that any other constant will be next-to-last. */
4544 if (n_ops > 1
4545 && CONST_INT_P (ops[n_ops - 1].op)
4546 && CONSTANT_P (ops[n_ops - 2].op))
4548 rtx value = ops[n_ops - 1].op;
4549 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4550 value = neg_const_int (mode, value);
4551 if (CONST_INT_P (value))
4553 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4554 INTVAL (value));
4555 n_ops--;
4559 /* Put a non-negated operand first, if possible. */
4561 for (i = 0; i < n_ops && ops[i].neg; i++)
4562 continue;
4563 if (i == n_ops)
4564 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4565 else if (i != 0)
4567 tem = ops[0].op;
4568 ops[0] = ops[i];
4569 ops[i].op = tem;
4570 ops[i].neg = 1;
4573 /* Now make the result by performing the requested operations. */
4574 gen_result:
4575 result = ops[0].op;
4576 for (i = 1; i < n_ops; i++)
4577 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4578 mode, result, ops[i].op);
4580 return result;
4583 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4584 static bool
4585 plus_minus_operand_p (const_rtx x)
4587 return GET_CODE (x) == PLUS
4588 || GET_CODE (x) == MINUS
4589 || (GET_CODE (x) == CONST
4590 && GET_CODE (XEXP (x, 0)) == PLUS
4591 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4592 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4595 /* Like simplify_binary_operation except used for relational operators.
4596 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4597 not also be VOIDmode.
4599 CMP_MODE specifies in which mode the comparison is done in, so it is
4600 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4601 the operands or, if both are VOIDmode, the operands are compared in
4602 "infinite precision". */
4604 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4605 machine_mode cmp_mode, rtx op0, rtx op1)
4607 rtx tem, trueop0, trueop1;
4609 if (cmp_mode == VOIDmode)
4610 cmp_mode = GET_MODE (op0);
4611 if (cmp_mode == VOIDmode)
4612 cmp_mode = GET_MODE (op1);
4614 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4615 if (tem)
4617 if (SCALAR_FLOAT_MODE_P (mode))
4619 if (tem == const0_rtx)
4620 return CONST0_RTX (mode);
4621 #ifdef FLOAT_STORE_FLAG_VALUE
4623 REAL_VALUE_TYPE val;
4624 val = FLOAT_STORE_FLAG_VALUE (mode);
4625 return const_double_from_real_value (val, mode);
4627 #else
4628 return NULL_RTX;
4629 #endif
4631 if (VECTOR_MODE_P (mode))
4633 if (tem == const0_rtx)
4634 return CONST0_RTX (mode);
4635 #ifdef VECTOR_STORE_FLAG_VALUE
4637 int i, units;
4638 rtvec v;
4640 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4641 if (val == NULL_RTX)
4642 return NULL_RTX;
4643 if (val == const1_rtx)
4644 return CONST1_RTX (mode);
4646 units = GET_MODE_NUNITS (mode);
4647 v = rtvec_alloc (units);
4648 for (i = 0; i < units; i++)
4649 RTVEC_ELT (v, i) = val;
4650 return gen_rtx_raw_CONST_VECTOR (mode, v);
4652 #else
4653 return NULL_RTX;
4654 #endif
4657 return tem;
4660 /* For the following tests, ensure const0_rtx is op1. */
4661 if (swap_commutative_operands_p (op0, op1)
4662 || (op0 == const0_rtx && op1 != const0_rtx))
4663 std::swap (op0, op1), code = swap_condition (code);
4665 /* If op0 is a compare, extract the comparison arguments from it. */
4666 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4667 return simplify_gen_relational (code, mode, VOIDmode,
4668 XEXP (op0, 0), XEXP (op0, 1));
4670 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4671 || CC0_P (op0))
4672 return NULL_RTX;
4674 trueop0 = avoid_constant_pool_reference (op0);
4675 trueop1 = avoid_constant_pool_reference (op1);
4676 return simplify_relational_operation_1 (code, mode, cmp_mode,
4677 trueop0, trueop1);
4680 /* This part of simplify_relational_operation is only used when CMP_MODE
4681 is not in class MODE_CC (i.e. it is a real comparison).
4683 MODE is the mode of the result, while CMP_MODE specifies in which
4684 mode the comparison is done in, so it is the mode of the operands. */
4686 static rtx
4687 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4688 machine_mode cmp_mode, rtx op0, rtx op1)
4690 enum rtx_code op0code = GET_CODE (op0);
4692 if (op1 == const0_rtx && COMPARISON_P (op0))
4694 /* If op0 is a comparison, extract the comparison arguments
4695 from it. */
4696 if (code == NE)
4698 if (GET_MODE (op0) == mode)
4699 return simplify_rtx (op0);
4700 else
4701 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4702 XEXP (op0, 0), XEXP (op0, 1));
4704 else if (code == EQ)
4706 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4707 if (new_code != UNKNOWN)
4708 return simplify_gen_relational (new_code, mode, VOIDmode,
4709 XEXP (op0, 0), XEXP (op0, 1));
4713 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4714 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4715 if ((code == LTU || code == GEU)
4716 && GET_CODE (op0) == PLUS
4717 && CONST_INT_P (XEXP (op0, 1))
4718 && (rtx_equal_p (op1, XEXP (op0, 0))
4719 || rtx_equal_p (op1, XEXP (op0, 1)))
4720 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4721 && XEXP (op0, 1) != const0_rtx)
4723 rtx new_cmp
4724 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4725 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4726 cmp_mode, XEXP (op0, 0), new_cmp);
4729 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4730 transformed into (LTU a -C). */
4731 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4732 && CONST_INT_P (XEXP (op0, 1))
4733 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4734 && XEXP (op0, 1) != const0_rtx)
4736 rtx new_cmp
4737 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4738 return simplify_gen_relational (LTU, mode, cmp_mode,
4739 XEXP (op0, 0), new_cmp);
4742 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4743 if ((code == LTU || code == GEU)
4744 && GET_CODE (op0) == PLUS
4745 && rtx_equal_p (op1, XEXP (op0, 1))
4746 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4747 && !rtx_equal_p (op1, XEXP (op0, 0)))
4748 return simplify_gen_relational (code, mode, cmp_mode, op0,
4749 copy_rtx (XEXP (op0, 0)));
4751 if (op1 == const0_rtx)
4753 /* Canonicalize (GTU x 0) as (NE x 0). */
4754 if (code == GTU)
4755 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4756 /* Canonicalize (LEU x 0) as (EQ x 0). */
4757 if (code == LEU)
4758 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4760 else if (op1 == const1_rtx)
4762 switch (code)
4764 case GE:
4765 /* Canonicalize (GE x 1) as (GT x 0). */
4766 return simplify_gen_relational (GT, mode, cmp_mode,
4767 op0, const0_rtx);
4768 case GEU:
4769 /* Canonicalize (GEU x 1) as (NE x 0). */
4770 return simplify_gen_relational (NE, mode, cmp_mode,
4771 op0, const0_rtx);
4772 case LT:
4773 /* Canonicalize (LT x 1) as (LE x 0). */
4774 return simplify_gen_relational (LE, mode, cmp_mode,
4775 op0, const0_rtx);
4776 case LTU:
4777 /* Canonicalize (LTU x 1) as (EQ x 0). */
4778 return simplify_gen_relational (EQ, mode, cmp_mode,
4779 op0, const0_rtx);
4780 default:
4781 break;
4784 else if (op1 == constm1_rtx)
4786 /* Canonicalize (LE x -1) as (LT x 0). */
4787 if (code == LE)
4788 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4789 /* Canonicalize (GT x -1) as (GE x 0). */
4790 if (code == GT)
4791 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4794 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4795 if ((code == EQ || code == NE)
4796 && (op0code == PLUS || op0code == MINUS)
4797 && CONSTANT_P (op1)
4798 && CONSTANT_P (XEXP (op0, 1))
4799 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4801 rtx x = XEXP (op0, 0);
4802 rtx c = XEXP (op0, 1);
4803 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4804 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4806 /* Detect an infinite recursive condition, where we oscillate at this
4807 simplification case between:
4808 A + B == C <---> C - B == A,
4809 where A, B, and C are all constants with non-simplifiable expressions,
4810 usually SYMBOL_REFs. */
4811 if (GET_CODE (tem) == invcode
4812 && CONSTANT_P (x)
4813 && rtx_equal_p (c, XEXP (tem, 1)))
4814 return NULL_RTX;
4816 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4819 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4820 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4821 if (code == NE
4822 && op1 == const0_rtx
4823 && GET_MODE_CLASS (mode) == MODE_INT
4824 && cmp_mode != VOIDmode
4825 /* ??? Work-around BImode bugs in the ia64 backend. */
4826 && mode != BImode
4827 && cmp_mode != BImode
4828 && nonzero_bits (op0, cmp_mode) == 1
4829 && STORE_FLAG_VALUE == 1)
4830 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4831 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4832 : lowpart_subreg (mode, op0, cmp_mode);
4834 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4835 if ((code == EQ || code == NE)
4836 && op1 == const0_rtx
4837 && op0code == XOR)
4838 return simplify_gen_relational (code, mode, cmp_mode,
4839 XEXP (op0, 0), XEXP (op0, 1));
4841 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4842 if ((code == EQ || code == NE)
4843 && op0code == XOR
4844 && rtx_equal_p (XEXP (op0, 0), op1)
4845 && !side_effects_p (XEXP (op0, 0)))
4846 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4847 CONST0_RTX (mode));
4849 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4850 if ((code == EQ || code == NE)
4851 && op0code == XOR
4852 && rtx_equal_p (XEXP (op0, 1), op1)
4853 && !side_effects_p (XEXP (op0, 1)))
4854 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4855 CONST0_RTX (mode));
4857 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4858 if ((code == EQ || code == NE)
4859 && op0code == XOR
4860 && CONST_SCALAR_INT_P (op1)
4861 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4862 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4863 simplify_gen_binary (XOR, cmp_mode,
4864 XEXP (op0, 1), op1));
4866 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4867 can be implemented with a BICS instruction on some targets, or
4868 constant-folded if y is a constant. */
4869 if ((code == EQ || code == NE)
4870 && op0code == AND
4871 && rtx_equal_p (XEXP (op0, 0), op1)
4872 && !side_effects_p (op1)
4873 && op1 != CONST0_RTX (cmp_mode))
4875 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4876 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4878 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4879 CONST0_RTX (cmp_mode));
4882 /* Likewise for (eq/ne (and x y) y). */
4883 if ((code == EQ || code == NE)
4884 && op0code == AND
4885 && rtx_equal_p (XEXP (op0, 1), op1)
4886 && !side_effects_p (op1)
4887 && op1 != CONST0_RTX (cmp_mode))
4889 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4890 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4892 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4893 CONST0_RTX (cmp_mode));
4896 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4897 if ((code == EQ || code == NE)
4898 && GET_CODE (op0) == BSWAP
4899 && CONST_SCALAR_INT_P (op1))
4900 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4901 simplify_gen_unary (BSWAP, cmp_mode,
4902 op1, cmp_mode));
4904 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4905 if ((code == EQ || code == NE)
4906 && GET_CODE (op0) == BSWAP
4907 && GET_CODE (op1) == BSWAP)
4908 return simplify_gen_relational (code, mode, cmp_mode,
4909 XEXP (op0, 0), XEXP (op1, 0));
4911 if (op0code == POPCOUNT && op1 == const0_rtx)
4912 switch (code)
4914 case EQ:
4915 case LE:
4916 case LEU:
4917 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4918 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4919 XEXP (op0, 0), const0_rtx);
4921 case NE:
4922 case GT:
4923 case GTU:
4924 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4925 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4926 XEXP (op0, 0), const0_rtx);
4928 default:
4929 break;
4932 return NULL_RTX;
4935 enum
4937 CMP_EQ = 1,
4938 CMP_LT = 2,
4939 CMP_GT = 4,
4940 CMP_LTU = 8,
4941 CMP_GTU = 16
4945 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4946 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4947 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4948 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4949 For floating-point comparisons, assume that the operands were ordered. */
4951 static rtx
4952 comparison_result (enum rtx_code code, int known_results)
4954 switch (code)
4956 case EQ:
4957 case UNEQ:
4958 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4959 case NE:
4960 case LTGT:
4961 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4963 case LT:
4964 case UNLT:
4965 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4966 case GE:
4967 case UNGE:
4968 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4970 case GT:
4971 case UNGT:
4972 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4973 case LE:
4974 case UNLE:
4975 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4977 case LTU:
4978 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4979 case GEU:
4980 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4982 case GTU:
4983 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4984 case LEU:
4985 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4987 case ORDERED:
4988 return const_true_rtx;
4989 case UNORDERED:
4990 return const0_rtx;
4991 default:
4992 gcc_unreachable ();
4996 /* Check if the given comparison (done in the given MODE) is actually
4997 a tautology or a contradiction. If the mode is VOID_mode, the
4998 comparison is done in "infinite precision". If no simplification
4999 is possible, this function returns zero. Otherwise, it returns
5000 either const_true_rtx or const0_rtx. */
5003 simplify_const_relational_operation (enum rtx_code code,
5004 machine_mode mode,
5005 rtx op0, rtx op1)
5007 rtx tem;
5008 rtx trueop0;
5009 rtx trueop1;
5011 gcc_assert (mode != VOIDmode
5012 || (GET_MODE (op0) == VOIDmode
5013 && GET_MODE (op1) == VOIDmode));
5015 /* If op0 is a compare, extract the comparison arguments from it. */
5016 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5018 op1 = XEXP (op0, 1);
5019 op0 = XEXP (op0, 0);
5021 if (GET_MODE (op0) != VOIDmode)
5022 mode = GET_MODE (op0);
5023 else if (GET_MODE (op1) != VOIDmode)
5024 mode = GET_MODE (op1);
5025 else
5026 return 0;
5029 /* We can't simplify MODE_CC values since we don't know what the
5030 actual comparison is. */
5031 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5032 return 0;
5034 /* Make sure the constant is second. */
5035 if (swap_commutative_operands_p (op0, op1))
5037 std::swap (op0, op1);
5038 code = swap_condition (code);
5041 trueop0 = avoid_constant_pool_reference (op0);
5042 trueop1 = avoid_constant_pool_reference (op1);
5044 /* For integer comparisons of A and B maybe we can simplify A - B and can
5045 then simplify a comparison of that with zero. If A and B are both either
5046 a register or a CONST_INT, this can't help; testing for these cases will
5047 prevent infinite recursion here and speed things up.
5049 We can only do this for EQ and NE comparisons as otherwise we may
5050 lose or introduce overflow which we cannot disregard as undefined as
5051 we do not know the signedness of the operation on either the left or
5052 the right hand side of the comparison. */
5054 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5055 && (code == EQ || code == NE)
5056 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5057 && (REG_P (op1) || CONST_INT_P (trueop1)))
5058 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
5059 /* We cannot do this if tem is a nonzero address. */
5060 && ! nonzero_address_p (tem))
5061 return simplify_const_relational_operation (signed_condition (code),
5062 mode, tem, const0_rtx);
5064 if (! HONOR_NANS (mode) && code == ORDERED)
5065 return const_true_rtx;
5067 if (! HONOR_NANS (mode) && code == UNORDERED)
5068 return const0_rtx;
5070 /* For modes without NaNs, if the two operands are equal, we know the
5071 result except if they have side-effects. Even with NaNs we know
5072 the result of unordered comparisons and, if signaling NaNs are
5073 irrelevant, also the result of LT/GT/LTGT. */
5074 if ((! HONOR_NANS (trueop0)
5075 || code == UNEQ || code == UNLE || code == UNGE
5076 || ((code == LT || code == GT || code == LTGT)
5077 && ! HONOR_SNANS (trueop0)))
5078 && rtx_equal_p (trueop0, trueop1)
5079 && ! side_effects_p (trueop0))
5080 return comparison_result (code, CMP_EQ);
5082 /* If the operands are floating-point constants, see if we can fold
5083 the result. */
5084 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5085 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5086 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5088 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5089 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5091 /* Comparisons are unordered iff at least one of the values is NaN. */
5092 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5093 switch (code)
5095 case UNEQ:
5096 case UNLT:
5097 case UNGT:
5098 case UNLE:
5099 case UNGE:
5100 case NE:
5101 case UNORDERED:
5102 return const_true_rtx;
5103 case EQ:
5104 case LT:
5105 case GT:
5106 case LE:
5107 case GE:
5108 case LTGT:
5109 case ORDERED:
5110 return const0_rtx;
5111 default:
5112 return 0;
5115 return comparison_result (code,
5116 (real_equal (d0, d1) ? CMP_EQ :
5117 real_less (d0, d1) ? CMP_LT : CMP_GT));
5120 /* Otherwise, see if the operands are both integers. */
5121 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5122 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5124 /* It would be nice if we really had a mode here. However, the
5125 largest int representable on the target is as good as
5126 infinite. */
5127 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5128 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5129 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5131 if (wi::eq_p (ptrueop0, ptrueop1))
5132 return comparison_result (code, CMP_EQ);
5133 else
5135 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5136 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5137 return comparison_result (code, cr);
5141 /* Optimize comparisons with upper and lower bounds. */
5142 if (HWI_COMPUTABLE_MODE_P (mode)
5143 && CONST_INT_P (trueop1)
5144 && !side_effects_p (trueop0))
5146 int sign;
5147 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5148 HOST_WIDE_INT val = INTVAL (trueop1);
5149 HOST_WIDE_INT mmin, mmax;
5151 if (code == GEU
5152 || code == LEU
5153 || code == GTU
5154 || code == LTU)
5155 sign = 0;
5156 else
5157 sign = 1;
5159 /* Get a reduced range if the sign bit is zero. */
5160 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5162 mmin = 0;
5163 mmax = nonzero;
5165 else
5167 rtx mmin_rtx, mmax_rtx;
5168 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5170 mmin = INTVAL (mmin_rtx);
5171 mmax = INTVAL (mmax_rtx);
5172 if (sign)
5174 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5176 mmin >>= (sign_copies - 1);
5177 mmax >>= (sign_copies - 1);
5181 switch (code)
5183 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5184 case GEU:
5185 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5186 return const_true_rtx;
5187 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5188 return const0_rtx;
5189 break;
5190 case GE:
5191 if (val <= mmin)
5192 return const_true_rtx;
5193 if (val > mmax)
5194 return const0_rtx;
5195 break;
5197 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5198 case LEU:
5199 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5200 return const_true_rtx;
5201 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5202 return const0_rtx;
5203 break;
5204 case LE:
5205 if (val >= mmax)
5206 return const_true_rtx;
5207 if (val < mmin)
5208 return const0_rtx;
5209 break;
5211 case EQ:
5212 /* x == y is always false for y out of range. */
5213 if (val < mmin || val > mmax)
5214 return const0_rtx;
5215 break;
5217 /* x > y is always false for y >= mmax, always true for y < mmin. */
5218 case GTU:
5219 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5220 return const0_rtx;
5221 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5222 return const_true_rtx;
5223 break;
5224 case GT:
5225 if (val >= mmax)
5226 return const0_rtx;
5227 if (val < mmin)
5228 return const_true_rtx;
5229 break;
5231 /* x < y is always false for y <= mmin, always true for y > mmax. */
5232 case LTU:
5233 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5234 return const0_rtx;
5235 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5236 return const_true_rtx;
5237 break;
5238 case LT:
5239 if (val <= mmin)
5240 return const0_rtx;
5241 if (val > mmax)
5242 return const_true_rtx;
5243 break;
5245 case NE:
5246 /* x != y is always true for y out of range. */
5247 if (val < mmin || val > mmax)
5248 return const_true_rtx;
5249 break;
5251 default:
5252 break;
5256 /* Optimize integer comparisons with zero. */
5257 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5259 /* Some addresses are known to be nonzero. We don't know
5260 their sign, but equality comparisons are known. */
5261 if (nonzero_address_p (trueop0))
5263 if (code == EQ || code == LEU)
5264 return const0_rtx;
5265 if (code == NE || code == GTU)
5266 return const_true_rtx;
5269 /* See if the first operand is an IOR with a constant. If so, we
5270 may be able to determine the result of this comparison. */
5271 if (GET_CODE (op0) == IOR)
5273 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5274 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5276 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5277 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5278 && (UINTVAL (inner_const)
5279 & (HOST_WIDE_INT_1U
5280 << sign_bitnum)));
5282 switch (code)
5284 case EQ:
5285 case LEU:
5286 return const0_rtx;
5287 case NE:
5288 case GTU:
5289 return const_true_rtx;
5290 case LT:
5291 case LE:
5292 if (has_sign)
5293 return const_true_rtx;
5294 break;
5295 case GT:
5296 case GE:
5297 if (has_sign)
5298 return const0_rtx;
5299 break;
5300 default:
5301 break;
5307 /* Optimize comparison of ABS with zero. */
5308 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5309 && (GET_CODE (trueop0) == ABS
5310 || (GET_CODE (trueop0) == FLOAT_EXTEND
5311 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5313 switch (code)
5315 case LT:
5316 /* Optimize abs(x) < 0.0. */
5317 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5318 return const0_rtx;
5319 break;
5321 case GE:
5322 /* Optimize abs(x) >= 0.0. */
5323 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5324 return const_true_rtx;
5325 break;
5327 case UNGE:
5328 /* Optimize ! (abs(x) < 0.0). */
5329 return const_true_rtx;
5331 default:
5332 break;
5336 return 0;
5339 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5340 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5341 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5342 can be simplified to that or NULL_RTX if not.
5343 Assume X is compared against zero with CMP_CODE and the true
5344 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5346 static rtx
5347 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5349 if (cmp_code != EQ && cmp_code != NE)
5350 return NULL_RTX;
5352 /* Result on X == 0 and X !=0 respectively. */
5353 rtx on_zero, on_nonzero;
5354 if (cmp_code == EQ)
5356 on_zero = true_val;
5357 on_nonzero = false_val;
5359 else
5361 on_zero = false_val;
5362 on_nonzero = true_val;
5365 rtx_code op_code = GET_CODE (on_nonzero);
5366 if ((op_code != CLZ && op_code != CTZ)
5367 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5368 || !CONST_INT_P (on_zero))
5369 return NULL_RTX;
5371 HOST_WIDE_INT op_val;
5372 if (((op_code == CLZ
5373 && CLZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero), op_val))
5374 || (op_code == CTZ
5375 && CTZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero), op_val)))
5376 && op_val == INTVAL (on_zero))
5377 return on_nonzero;
5379 return NULL_RTX;
5383 /* Simplify CODE, an operation with result mode MODE and three operands,
5384 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5385 a constant. Return 0 if no simplifications is possible. */
5388 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5389 machine_mode op0_mode, rtx op0, rtx op1,
5390 rtx op2)
5392 unsigned int width = GET_MODE_PRECISION (mode);
5393 bool any_change = false;
5394 rtx tem, trueop2;
5396 /* VOIDmode means "infinite" precision. */
5397 if (width == 0)
5398 width = HOST_BITS_PER_WIDE_INT;
5400 switch (code)
5402 case FMA:
5403 /* Simplify negations around the multiplication. */
5404 /* -a * -b + c => a * b + c. */
5405 if (GET_CODE (op0) == NEG)
5407 tem = simplify_unary_operation (NEG, mode, op1, mode);
5408 if (tem)
5409 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5411 else if (GET_CODE (op1) == NEG)
5413 tem = simplify_unary_operation (NEG, mode, op0, mode);
5414 if (tem)
5415 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5418 /* Canonicalize the two multiplication operands. */
5419 /* a * -b + c => -b * a + c. */
5420 if (swap_commutative_operands_p (op0, op1))
5421 std::swap (op0, op1), any_change = true;
5423 if (any_change)
5424 return gen_rtx_FMA (mode, op0, op1, op2);
5425 return NULL_RTX;
5427 case SIGN_EXTRACT:
5428 case ZERO_EXTRACT:
5429 if (CONST_INT_P (op0)
5430 && CONST_INT_P (op1)
5431 && CONST_INT_P (op2)
5432 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5433 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5435 /* Extracting a bit-field from a constant */
5436 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5437 HOST_WIDE_INT op1val = INTVAL (op1);
5438 HOST_WIDE_INT op2val = INTVAL (op2);
5439 if (BITS_BIG_ENDIAN)
5440 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5441 else
5442 val >>= op2val;
5444 if (HOST_BITS_PER_WIDE_INT != op1val)
5446 /* First zero-extend. */
5447 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5448 /* If desired, propagate sign bit. */
5449 if (code == SIGN_EXTRACT
5450 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5451 != 0)
5452 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5455 return gen_int_mode (val, mode);
5457 break;
5459 case IF_THEN_ELSE:
5460 if (CONST_INT_P (op0))
5461 return op0 != const0_rtx ? op1 : op2;
5463 /* Convert c ? a : a into "a". */
5464 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5465 return op1;
5467 /* Convert a != b ? a : b into "a". */
5468 if (GET_CODE (op0) == NE
5469 && ! side_effects_p (op0)
5470 && ! HONOR_NANS (mode)
5471 && ! HONOR_SIGNED_ZEROS (mode)
5472 && ((rtx_equal_p (XEXP (op0, 0), op1)
5473 && rtx_equal_p (XEXP (op0, 1), op2))
5474 || (rtx_equal_p (XEXP (op0, 0), op2)
5475 && rtx_equal_p (XEXP (op0, 1), op1))))
5476 return op1;
5478 /* Convert a == b ? a : b into "b". */
5479 if (GET_CODE (op0) == EQ
5480 && ! side_effects_p (op0)
5481 && ! HONOR_NANS (mode)
5482 && ! HONOR_SIGNED_ZEROS (mode)
5483 && ((rtx_equal_p (XEXP (op0, 0), op1)
5484 && rtx_equal_p (XEXP (op0, 1), op2))
5485 || (rtx_equal_p (XEXP (op0, 0), op2)
5486 && rtx_equal_p (XEXP (op0, 1), op1))))
5487 return op2;
5489 /* Convert (!c) != {0,...,0} ? a : b into
5490 c != {0,...,0} ? b : a for vector modes. */
5491 if (VECTOR_MODE_P (GET_MODE (op1))
5492 && GET_CODE (op0) == NE
5493 && GET_CODE (XEXP (op0, 0)) == NOT
5494 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5496 rtx cv = XEXP (op0, 1);
5497 int nunits = CONST_VECTOR_NUNITS (cv);
5498 bool ok = true;
5499 for (int i = 0; i < nunits; ++i)
5500 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5502 ok = false;
5503 break;
5505 if (ok)
5507 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5508 XEXP (XEXP (op0, 0), 0),
5509 XEXP (op0, 1));
5510 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5511 return retval;
5515 /* Convert x == 0 ? N : clz (x) into clz (x) when
5516 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5517 Similarly for ctz (x). */
5518 if (COMPARISON_P (op0) && !side_effects_p (op0)
5519 && XEXP (op0, 1) == const0_rtx)
5521 rtx simplified
5522 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5523 op1, op2);
5524 if (simplified)
5525 return simplified;
5528 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5530 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5531 ? GET_MODE (XEXP (op0, 1))
5532 : GET_MODE (XEXP (op0, 0)));
5533 rtx temp;
5535 /* Look for happy constants in op1 and op2. */
5536 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5538 HOST_WIDE_INT t = INTVAL (op1);
5539 HOST_WIDE_INT f = INTVAL (op2);
5541 if (t == STORE_FLAG_VALUE && f == 0)
5542 code = GET_CODE (op0);
5543 else if (t == 0 && f == STORE_FLAG_VALUE)
5545 enum rtx_code tmp;
5546 tmp = reversed_comparison_code (op0, NULL);
5547 if (tmp == UNKNOWN)
5548 break;
5549 code = tmp;
5551 else
5552 break;
5554 return simplify_gen_relational (code, mode, cmp_mode,
5555 XEXP (op0, 0), XEXP (op0, 1));
5558 if (cmp_mode == VOIDmode)
5559 cmp_mode = op0_mode;
5560 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5561 cmp_mode, XEXP (op0, 0),
5562 XEXP (op0, 1));
5564 /* See if any simplifications were possible. */
5565 if (temp)
5567 if (CONST_INT_P (temp))
5568 return temp == const0_rtx ? op2 : op1;
5569 else if (temp)
5570 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5573 break;
5575 case VEC_MERGE:
5576 gcc_assert (GET_MODE (op0) == mode);
5577 gcc_assert (GET_MODE (op1) == mode);
5578 gcc_assert (VECTOR_MODE_P (mode));
5579 trueop2 = avoid_constant_pool_reference (op2);
5580 if (CONST_INT_P (trueop2))
5582 int elt_size = GET_MODE_UNIT_SIZE (mode);
5583 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5584 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5585 unsigned HOST_WIDE_INT mask;
5586 if (n_elts == HOST_BITS_PER_WIDE_INT)
5587 mask = -1;
5588 else
5589 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5591 if (!(sel & mask) && !side_effects_p (op0))
5592 return op1;
5593 if ((sel & mask) == mask && !side_effects_p (op1))
5594 return op0;
5596 rtx trueop0 = avoid_constant_pool_reference (op0);
5597 rtx trueop1 = avoid_constant_pool_reference (op1);
5598 if (GET_CODE (trueop0) == CONST_VECTOR
5599 && GET_CODE (trueop1) == CONST_VECTOR)
5601 rtvec v = rtvec_alloc (n_elts);
5602 unsigned int i;
5604 for (i = 0; i < n_elts; i++)
5605 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5606 ? CONST_VECTOR_ELT (trueop0, i)
5607 : CONST_VECTOR_ELT (trueop1, i));
5608 return gen_rtx_CONST_VECTOR (mode, v);
5611 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5612 if no element from a appears in the result. */
5613 if (GET_CODE (op0) == VEC_MERGE)
5615 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5616 if (CONST_INT_P (tem))
5618 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5619 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5620 return simplify_gen_ternary (code, mode, mode,
5621 XEXP (op0, 1), op1, op2);
5622 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5623 return simplify_gen_ternary (code, mode, mode,
5624 XEXP (op0, 0), op1, op2);
5627 if (GET_CODE (op1) == VEC_MERGE)
5629 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5630 if (CONST_INT_P (tem))
5632 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5633 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5634 return simplify_gen_ternary (code, mode, mode,
5635 op0, XEXP (op1, 1), op2);
5636 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5637 return simplify_gen_ternary (code, mode, mode,
5638 op0, XEXP (op1, 0), op2);
5642 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5643 with a. */
5644 if (GET_CODE (op0) == VEC_DUPLICATE
5645 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5646 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5647 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5649 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5650 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5652 if (XEXP (XEXP (op0, 0), 0) == op1
5653 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5654 return op1;
5659 if (rtx_equal_p (op0, op1)
5660 && !side_effects_p (op2) && !side_effects_p (op1))
5661 return op0;
5663 break;
5665 default:
5666 gcc_unreachable ();
5669 return 0;
5672 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5673 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5674 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5676 Works by unpacking OP into a collection of 8-bit values
5677 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5678 and then repacking them again for OUTERMODE. */
5680 static rtx
5681 simplify_immed_subreg (machine_mode outermode, rtx op,
5682 machine_mode innermode, unsigned int byte)
5684 enum {
5685 value_bit = 8,
5686 value_mask = (1 << value_bit) - 1
5688 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5689 int value_start;
5690 int i;
5691 int elem;
5693 int num_elem;
5694 rtx * elems;
5695 int elem_bitsize;
5696 rtx result_s = NULL;
5697 rtvec result_v = NULL;
5698 enum mode_class outer_class;
5699 machine_mode outer_submode;
5700 int max_bitsize;
5702 /* Some ports misuse CCmode. */
5703 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5704 return op;
5706 /* We have no way to represent a complex constant at the rtl level. */
5707 if (COMPLEX_MODE_P (outermode))
5708 return NULL_RTX;
5710 /* We support any size mode. */
5711 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5712 GET_MODE_BITSIZE (innermode));
5714 /* Unpack the value. */
5716 if (GET_CODE (op) == CONST_VECTOR)
5718 num_elem = CONST_VECTOR_NUNITS (op);
5719 elems = &CONST_VECTOR_ELT (op, 0);
5720 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5722 else
5724 num_elem = 1;
5725 elems = &op;
5726 elem_bitsize = max_bitsize;
5728 /* If this asserts, it is too complicated; reducing value_bit may help. */
5729 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5730 /* I don't know how to handle endianness of sub-units. */
5731 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5733 for (elem = 0; elem < num_elem; elem++)
5735 unsigned char * vp;
5736 rtx el = elems[elem];
5738 /* Vectors are kept in target memory order. (This is probably
5739 a mistake.) */
5741 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5742 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5743 / BITS_PER_UNIT);
5744 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5745 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5746 unsigned bytele = (subword_byte % UNITS_PER_WORD
5747 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5748 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5751 switch (GET_CODE (el))
5753 case CONST_INT:
5754 for (i = 0;
5755 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5756 i += value_bit)
5757 *vp++ = INTVAL (el) >> i;
5758 /* CONST_INTs are always logically sign-extended. */
5759 for (; i < elem_bitsize; i += value_bit)
5760 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5761 break;
5763 case CONST_WIDE_INT:
5765 rtx_mode_t val = rtx_mode_t (el, innermode);
5766 unsigned char extend = wi::sign_mask (val);
5767 int prec = wi::get_precision (val);
5769 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
5770 *vp++ = wi::extract_uhwi (val, i, value_bit);
5771 for (; i < elem_bitsize; i += value_bit)
5772 *vp++ = extend;
5774 break;
5776 case CONST_DOUBLE:
5777 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5779 unsigned char extend = 0;
5780 /* If this triggers, someone should have generated a
5781 CONST_INT instead. */
5782 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5784 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5785 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5786 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5788 *vp++
5789 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5790 i += value_bit;
5793 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5794 extend = -1;
5795 for (; i < elem_bitsize; i += value_bit)
5796 *vp++ = extend;
5798 else
5800 /* This is big enough for anything on the platform. */
5801 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5802 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5804 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5805 gcc_assert (bitsize <= elem_bitsize);
5806 gcc_assert (bitsize % value_bit == 0);
5808 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5809 GET_MODE (el));
5811 /* real_to_target produces its result in words affected by
5812 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5813 and use WORDS_BIG_ENDIAN instead; see the documentation
5814 of SUBREG in rtl.texi. */
5815 for (i = 0; i < bitsize; i += value_bit)
5817 int ibase;
5818 if (WORDS_BIG_ENDIAN)
5819 ibase = bitsize - 1 - i;
5820 else
5821 ibase = i;
5822 *vp++ = tmp[ibase / 32] >> i % 32;
5825 /* It shouldn't matter what's done here, so fill it with
5826 zero. */
5827 for (; i < elem_bitsize; i += value_bit)
5828 *vp++ = 0;
5830 break;
5832 case CONST_FIXED:
5833 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5835 for (i = 0; i < elem_bitsize; i += value_bit)
5836 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5838 else
5840 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5841 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5842 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5843 i += value_bit)
5844 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5845 >> (i - HOST_BITS_PER_WIDE_INT);
5846 for (; i < elem_bitsize; i += value_bit)
5847 *vp++ = 0;
5849 break;
5851 default:
5852 gcc_unreachable ();
5856 /* Now, pick the right byte to start with. */
5857 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5858 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5859 will already have offset 0. */
5860 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5862 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5863 - byte);
5864 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5865 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5866 byte = (subword_byte % UNITS_PER_WORD
5867 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5870 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5871 so if it's become negative it will instead be very large.) */
5872 gcc_assert (byte < GET_MODE_SIZE (innermode));
5874 /* Convert from bytes to chunks of size value_bit. */
5875 value_start = byte * (BITS_PER_UNIT / value_bit);
5877 /* Re-pack the value. */
5878 num_elem = GET_MODE_NUNITS (outermode);
5880 if (VECTOR_MODE_P (outermode))
5882 result_v = rtvec_alloc (num_elem);
5883 elems = &RTVEC_ELT (result_v, 0);
5885 else
5886 elems = &result_s;
5888 outer_submode = GET_MODE_INNER (outermode);
5889 outer_class = GET_MODE_CLASS (outer_submode);
5890 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5892 gcc_assert (elem_bitsize % value_bit == 0);
5893 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5895 for (elem = 0; elem < num_elem; elem++)
5897 unsigned char *vp;
5899 /* Vectors are stored in target memory order. (This is probably
5900 a mistake.) */
5902 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5903 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5904 / BITS_PER_UNIT);
5905 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5906 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5907 unsigned bytele = (subword_byte % UNITS_PER_WORD
5908 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5909 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5912 switch (outer_class)
5914 case MODE_INT:
5915 case MODE_PARTIAL_INT:
5917 int u;
5918 int base = 0;
5919 int units
5920 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5921 / HOST_BITS_PER_WIDE_INT;
5922 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5923 wide_int r;
5925 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5926 return NULL_RTX;
5927 for (u = 0; u < units; u++)
5929 unsigned HOST_WIDE_INT buf = 0;
5930 for (i = 0;
5931 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5932 i += value_bit)
5933 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5935 tmp[u] = buf;
5936 base += HOST_BITS_PER_WIDE_INT;
5938 r = wide_int::from_array (tmp, units,
5939 GET_MODE_PRECISION (outer_submode));
5940 #if TARGET_SUPPORTS_WIDE_INT == 0
5941 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5942 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5943 return NULL_RTX;
5944 #endif
5945 elems[elem] = immed_wide_int_const (r, outer_submode);
5947 break;
5949 case MODE_FLOAT:
5950 case MODE_DECIMAL_FLOAT:
5952 REAL_VALUE_TYPE r;
5953 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
5955 /* real_from_target wants its input in words affected by
5956 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5957 and use WORDS_BIG_ENDIAN instead; see the documentation
5958 of SUBREG in rtl.texi. */
5959 for (i = 0; i < elem_bitsize; i += value_bit)
5961 int ibase;
5962 if (WORDS_BIG_ENDIAN)
5963 ibase = elem_bitsize - 1 - i;
5964 else
5965 ibase = i;
5966 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5969 real_from_target (&r, tmp, outer_submode);
5970 elems[elem] = const_double_from_real_value (r, outer_submode);
5972 break;
5974 case MODE_FRACT:
5975 case MODE_UFRACT:
5976 case MODE_ACCUM:
5977 case MODE_UACCUM:
5979 FIXED_VALUE_TYPE f;
5980 f.data.low = 0;
5981 f.data.high = 0;
5982 f.mode = outer_submode;
5984 for (i = 0;
5985 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5986 i += value_bit)
5987 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5988 for (; i < elem_bitsize; i += value_bit)
5989 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5990 << (i - HOST_BITS_PER_WIDE_INT));
5992 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5994 break;
5996 default:
5997 gcc_unreachable ();
6000 if (VECTOR_MODE_P (outermode))
6001 return gen_rtx_CONST_VECTOR (outermode, result_v);
6002 else
6003 return result_s;
6006 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6007 Return 0 if no simplifications are possible. */
6009 simplify_subreg (machine_mode outermode, rtx op,
6010 machine_mode innermode, unsigned int byte)
6012 /* Little bit of sanity checking. */
6013 gcc_assert (innermode != VOIDmode);
6014 gcc_assert (outermode != VOIDmode);
6015 gcc_assert (innermode != BLKmode);
6016 gcc_assert (outermode != BLKmode);
6018 gcc_assert (GET_MODE (op) == innermode
6019 || GET_MODE (op) == VOIDmode);
6021 if ((byte % GET_MODE_SIZE (outermode)) != 0)
6022 return NULL_RTX;
6024 if (byte >= GET_MODE_SIZE (innermode))
6025 return NULL_RTX;
6027 if (outermode == innermode && !byte)
6028 return op;
6030 if (CONST_SCALAR_INT_P (op)
6031 || CONST_DOUBLE_AS_FLOAT_P (op)
6032 || GET_CODE (op) == CONST_FIXED
6033 || GET_CODE (op) == CONST_VECTOR)
6034 return simplify_immed_subreg (outermode, op, innermode, byte);
6036 /* Changing mode twice with SUBREG => just change it once,
6037 or not at all if changing back op starting mode. */
6038 if (GET_CODE (op) == SUBREG)
6040 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6041 int final_offset = byte + SUBREG_BYTE (op);
6042 rtx newx;
6044 if (outermode == innermostmode
6045 && byte == 0 && SUBREG_BYTE (op) == 0)
6046 return SUBREG_REG (op);
6048 /* The SUBREG_BYTE represents offset, as if the value were stored
6049 in memory. Irritating exception is paradoxical subreg, where
6050 we define SUBREG_BYTE to be 0. On big endian machines, this
6051 value should be negative. For a moment, undo this exception. */
6052 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6054 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
6055 if (WORDS_BIG_ENDIAN)
6056 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6057 if (BYTES_BIG_ENDIAN)
6058 final_offset += difference % UNITS_PER_WORD;
6060 if (SUBREG_BYTE (op) == 0
6061 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
6063 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
6064 if (WORDS_BIG_ENDIAN)
6065 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6066 if (BYTES_BIG_ENDIAN)
6067 final_offset += difference % UNITS_PER_WORD;
6070 /* See whether resulting subreg will be paradoxical. */
6071 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
6073 /* In nonparadoxical subregs we can't handle negative offsets. */
6074 if (final_offset < 0)
6075 return NULL_RTX;
6076 /* Bail out in case resulting subreg would be incorrect. */
6077 if (final_offset % GET_MODE_SIZE (outermode)
6078 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
6079 return NULL_RTX;
6081 else
6083 int offset = 0;
6084 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
6086 /* In paradoxical subreg, see if we are still looking on lower part.
6087 If so, our SUBREG_BYTE will be 0. */
6088 if (WORDS_BIG_ENDIAN)
6089 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6090 if (BYTES_BIG_ENDIAN)
6091 offset += difference % UNITS_PER_WORD;
6092 if (offset == final_offset)
6093 final_offset = 0;
6094 else
6095 return NULL_RTX;
6098 /* Recurse for further possible simplifications. */
6099 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6100 final_offset);
6101 if (newx)
6102 return newx;
6103 if (validate_subreg (outermode, innermostmode,
6104 SUBREG_REG (op), final_offset))
6106 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6107 if (SUBREG_PROMOTED_VAR_P (op)
6108 && SUBREG_PROMOTED_SIGN (op) >= 0
6109 && GET_MODE_CLASS (outermode) == MODE_INT
6110 && IN_RANGE (GET_MODE_SIZE (outermode),
6111 GET_MODE_SIZE (innermode),
6112 GET_MODE_SIZE (innermostmode))
6113 && subreg_lowpart_p (newx))
6115 SUBREG_PROMOTED_VAR_P (newx) = 1;
6116 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6118 return newx;
6120 return NULL_RTX;
6123 /* SUBREG of a hard register => just change the register number
6124 and/or mode. If the hard register is not valid in that mode,
6125 suppress this simplification. If the hard register is the stack,
6126 frame, or argument pointer, leave this as a SUBREG. */
6128 if (REG_P (op) && HARD_REGISTER_P (op))
6130 unsigned int regno, final_regno;
6132 regno = REGNO (op);
6133 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6134 if (HARD_REGISTER_NUM_P (final_regno))
6136 rtx x;
6137 int final_offset = byte;
6139 /* Adjust offset for paradoxical subregs. */
6140 if (byte == 0
6141 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6143 int difference = (GET_MODE_SIZE (innermode)
6144 - GET_MODE_SIZE (outermode));
6145 if (WORDS_BIG_ENDIAN)
6146 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6147 if (BYTES_BIG_ENDIAN)
6148 final_offset += difference % UNITS_PER_WORD;
6151 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
6153 /* Propagate original regno. We don't have any way to specify
6154 the offset inside original regno, so do so only for lowpart.
6155 The information is used only by alias analysis that can not
6156 grog partial register anyway. */
6158 if (subreg_lowpart_offset (outermode, innermode) == byte)
6159 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6160 return x;
6164 /* If we have a SUBREG of a register that we are replacing and we are
6165 replacing it with a MEM, make a new MEM and try replacing the
6166 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6167 or if we would be widening it. */
6169 if (MEM_P (op)
6170 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6171 /* Allow splitting of volatile memory references in case we don't
6172 have instruction to move the whole thing. */
6173 && (! MEM_VOLATILE_P (op)
6174 || ! have_insn_for (SET, innermode))
6175 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6176 return adjust_address_nv (op, outermode, byte);
6178 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6179 of two parts. */
6180 if (GET_CODE (op) == CONCAT
6181 || GET_CODE (op) == VEC_CONCAT)
6183 unsigned int part_size, final_offset;
6184 rtx part, res;
6186 enum machine_mode part_mode = GET_MODE (XEXP (op, 0));
6187 if (part_mode == VOIDmode)
6188 part_mode = GET_MODE_INNER (GET_MODE (op));
6189 part_size = GET_MODE_SIZE (part_mode);
6190 if (byte < part_size)
6192 part = XEXP (op, 0);
6193 final_offset = byte;
6195 else
6197 part = XEXP (op, 1);
6198 final_offset = byte - part_size;
6201 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6202 return NULL_RTX;
6204 part_mode = GET_MODE (part);
6205 if (part_mode == VOIDmode)
6206 part_mode = GET_MODE_INNER (GET_MODE (op));
6207 res = simplify_subreg (outermode, part, part_mode, final_offset);
6208 if (res)
6209 return res;
6210 if (validate_subreg (outermode, part_mode, part, final_offset))
6211 return gen_rtx_SUBREG (outermode, part, final_offset);
6212 return NULL_RTX;
6215 /* A SUBREG resulting from a zero extension may fold to zero if
6216 it extracts higher bits that the ZERO_EXTEND's source bits. */
6217 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6219 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6220 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6221 return CONST0_RTX (outermode);
6224 if (SCALAR_INT_MODE_P (outermode)
6225 && SCALAR_INT_MODE_P (innermode)
6226 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6227 && byte == subreg_lowpart_offset (outermode, innermode))
6229 rtx tem = simplify_truncation (outermode, op, innermode);
6230 if (tem)
6231 return tem;
6234 return NULL_RTX;
6237 /* Make a SUBREG operation or equivalent if it folds. */
6240 simplify_gen_subreg (machine_mode outermode, rtx op,
6241 machine_mode innermode, unsigned int byte)
6243 rtx newx;
6245 newx = simplify_subreg (outermode, op, innermode, byte);
6246 if (newx)
6247 return newx;
6249 if (GET_CODE (op) == SUBREG
6250 || GET_CODE (op) == CONCAT
6251 || GET_MODE (op) == VOIDmode)
6252 return NULL_RTX;
6254 if (validate_subreg (outermode, innermode, op, byte))
6255 return gen_rtx_SUBREG (outermode, op, byte);
6257 return NULL_RTX;
6260 /* Generates a subreg to get the least significant part of EXPR (in mode
6261 INNER_MODE) to OUTER_MODE. */
6264 lowpart_subreg (machine_mode outer_mode, rtx expr,
6265 machine_mode inner_mode)
6267 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6268 subreg_lowpart_offset (outer_mode, inner_mode));
6271 /* Simplify X, an rtx expression.
6273 Return the simplified expression or NULL if no simplifications
6274 were possible.
6276 This is the preferred entry point into the simplification routines;
6277 however, we still allow passes to call the more specific routines.
6279 Right now GCC has three (yes, three) major bodies of RTL simplification
6280 code that need to be unified.
6282 1. fold_rtx in cse.c. This code uses various CSE specific
6283 information to aid in RTL simplification.
6285 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6286 it uses combine specific information to aid in RTL
6287 simplification.
6289 3. The routines in this file.
6292 Long term we want to only have one body of simplification code; to
6293 get to that state I recommend the following steps:
6295 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6296 which are not pass dependent state into these routines.
6298 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6299 use this routine whenever possible.
6301 3. Allow for pass dependent state to be provided to these
6302 routines and add simplifications based on the pass dependent
6303 state. Remove code from cse.c & combine.c that becomes
6304 redundant/dead.
6306 It will take time, but ultimately the compiler will be easier to
6307 maintain and improve. It's totally silly that when we add a
6308 simplification that it needs to be added to 4 places (3 for RTL
6309 simplification and 1 for tree simplification. */
6312 simplify_rtx (const_rtx x)
6314 const enum rtx_code code = GET_CODE (x);
6315 const machine_mode mode = GET_MODE (x);
6317 switch (GET_RTX_CLASS (code))
6319 case RTX_UNARY:
6320 return simplify_unary_operation (code, mode,
6321 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6322 case RTX_COMM_ARITH:
6323 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6324 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6326 /* Fall through. */
6328 case RTX_BIN_ARITH:
6329 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6331 case RTX_TERNARY:
6332 case RTX_BITFIELD_OPS:
6333 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6334 XEXP (x, 0), XEXP (x, 1),
6335 XEXP (x, 2));
6337 case RTX_COMPARE:
6338 case RTX_COMM_COMPARE:
6339 return simplify_relational_operation (code, mode,
6340 ((GET_MODE (XEXP (x, 0))
6341 != VOIDmode)
6342 ? GET_MODE (XEXP (x, 0))
6343 : GET_MODE (XEXP (x, 1))),
6344 XEXP (x, 0),
6345 XEXP (x, 1));
6347 case RTX_EXTRA:
6348 if (code == SUBREG)
6349 return simplify_subreg (mode, SUBREG_REG (x),
6350 GET_MODE (SUBREG_REG (x)),
6351 SUBREG_BYTE (x));
6352 break;
6354 case RTX_OBJ:
6355 if (code == LO_SUM)
6357 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6358 if (GET_CODE (XEXP (x, 0)) == HIGH
6359 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6360 return XEXP (x, 1);
6362 break;
6364 default:
6365 break;
6367 return NULL;