PR78134 fix return types of heterogeneous lookup functions
[official-gcc.git] / gcc / simplify-rtx.c
blobaa459731a82b102c136df5d60dd51dc5d0934f2f
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
37 /* Simplification and canonicalization of RTL. */
39 /* Much code operates on (low, high) pairs; the low value is an
40 unsigned wide int, the high value a signed wide int. We
41 occasionally need to sign extend from low to high as if low were a
42 signed wide int. */
43 #define HWI_SIGN_EXTEND(low) \
44 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
46 static rtx neg_const_int (machine_mode, const_rtx);
47 static bool plus_minus_operand_p (const_rtx);
48 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
49 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
50 unsigned int);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
59 /* Negate a CONST_INT rtx. */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
63 unsigned HOST_WIDE_INT val = -UINTVAL (i);
65 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
66 && val == UINTVAL (i))
67 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
68 mode);
69 return gen_int_mode (val, mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 bool
76 mode_signbit_p (machine_mode mode, const_rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
84 width = GET_MODE_PRECISION (mode);
85 if (width == 0)
86 return false;
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && CONST_INT_P (x))
90 val = INTVAL (x);
91 #if TARGET_SUPPORTS_WIDE_INT
92 else if (CONST_WIDE_INT_P (x))
94 unsigned int i;
95 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
96 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
97 return false;
98 for (i = 0; i < elts - 1; i++)
99 if (CONST_WIDE_INT_ELT (x, i) != 0)
100 return false;
101 val = CONST_WIDE_INT_ELT (x, elts - 1);
102 width %= HOST_BITS_PER_WIDE_INT;
103 if (width == 0)
104 width = HOST_BITS_PER_WIDE_INT;
106 #else
107 else if (width <= HOST_BITS_PER_DOUBLE_INT
108 && CONST_DOUBLE_AS_INT_P (x)
109 && CONST_DOUBLE_LOW (x) == 0)
111 val = CONST_DOUBLE_HIGH (x);
112 width -= HOST_BITS_PER_WIDE_INT;
114 #endif
115 else
116 /* X is not an integer constant. */
117 return false;
119 if (width < HOST_BITS_PER_WIDE_INT)
120 val &= (HOST_WIDE_INT_1U << width) - 1;
121 return val == (HOST_WIDE_INT_1U << (width - 1));
124 /* Test whether VAL is equal to the most significant bit of mode MODE
125 (after masking with the mode mask of MODE). Returns false if the
126 precision of MODE is too large to handle. */
128 bool
129 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
131 unsigned int width;
133 if (GET_MODE_CLASS (mode) != MODE_INT)
134 return false;
136 width = GET_MODE_PRECISION (mode);
137 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
138 return false;
140 val &= GET_MODE_MASK (mode);
141 return val == (HOST_WIDE_INT_1U << (width - 1));
144 /* Test whether the most significant bit of mode MODE is set in VAL.
145 Returns false if the precision of MODE is too large to handle. */
146 bool
147 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
149 unsigned int width;
151 if (GET_MODE_CLASS (mode) != MODE_INT)
152 return false;
154 width = GET_MODE_PRECISION (mode);
155 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
156 return false;
158 val &= HOST_WIDE_INT_1U << (width - 1);
159 return val != 0;
162 /* Test whether the most significant bit of mode MODE is clear in VAL.
163 Returns false if the precision of MODE is too large to handle. */
164 bool
165 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
167 unsigned int width;
169 if (GET_MODE_CLASS (mode) != MODE_INT)
170 return false;
172 width = GET_MODE_PRECISION (mode);
173 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
174 return false;
176 val &= HOST_WIDE_INT_1U << (width - 1);
177 return val == 0;
180 /* Make a binary operation by properly ordering the operands and
181 seeing if the expression folds. */
184 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
185 rtx op1)
187 rtx tem;
189 /* If this simplifies, do it. */
190 tem = simplify_binary_operation (code, mode, op0, op1);
191 if (tem)
192 return tem;
194 /* Put complex operands first and constants second if commutative. */
195 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
196 && swap_commutative_operands_p (op0, op1))
197 std::swap (op0, op1);
199 return gen_rtx_fmt_ee (code, mode, op0, op1);
202 /* If X is a MEM referencing the constant pool, return the real value.
203 Otherwise return X. */
205 avoid_constant_pool_reference (rtx x)
207 rtx c, tmp, addr;
208 machine_mode cmode;
209 HOST_WIDE_INT offset = 0;
211 switch (GET_CODE (x))
213 case MEM:
214 break;
216 case FLOAT_EXTEND:
217 /* Handle float extensions of constant pool references. */
218 tmp = XEXP (x, 0);
219 c = avoid_constant_pool_reference (tmp);
220 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
221 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
222 GET_MODE (x));
223 return x;
225 default:
226 return x;
229 if (GET_MODE (x) == BLKmode)
230 return x;
232 addr = XEXP (x, 0);
234 /* Call target hook to avoid the effects of -fpic etc.... */
235 addr = targetm.delegitimize_address (addr);
237 /* Split the address into a base and integer offset. */
238 if (GET_CODE (addr) == CONST
239 && GET_CODE (XEXP (addr, 0)) == PLUS
240 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
242 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
243 addr = XEXP (XEXP (addr, 0), 0);
246 if (GET_CODE (addr) == LO_SUM)
247 addr = XEXP (addr, 1);
249 /* If this is a constant pool reference, we can turn it into its
250 constant and hope that simplifications happen. */
251 if (GET_CODE (addr) == SYMBOL_REF
252 && CONSTANT_POOL_ADDRESS_P (addr))
254 c = get_pool_constant (addr);
255 cmode = get_pool_mode (addr);
257 /* If we're accessing the constant in a different mode than it was
258 originally stored, attempt to fix that up via subreg simplifications.
259 If that fails we have no choice but to return the original memory. */
260 if (offset == 0 && cmode == GET_MODE (x))
261 return c;
262 else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
264 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
265 if (tem && CONSTANT_P (tem))
266 return tem;
270 return x;
273 /* Simplify a MEM based on its attributes. This is the default
274 delegitimize_address target hook, and it's recommended that every
275 overrider call it. */
278 delegitimize_mem_from_attrs (rtx x)
280 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
281 use their base addresses as equivalent. */
282 if (MEM_P (x)
283 && MEM_EXPR (x)
284 && MEM_OFFSET_KNOWN_P (x))
286 tree decl = MEM_EXPR (x);
287 machine_mode mode = GET_MODE (x);
288 HOST_WIDE_INT offset = 0;
290 switch (TREE_CODE (decl))
292 default:
293 decl = NULL;
294 break;
296 case VAR_DECL:
297 break;
299 case ARRAY_REF:
300 case ARRAY_RANGE_REF:
301 case COMPONENT_REF:
302 case BIT_FIELD_REF:
303 case REALPART_EXPR:
304 case IMAGPART_EXPR:
305 case VIEW_CONVERT_EXPR:
307 HOST_WIDE_INT bitsize, bitpos;
308 tree toffset;
309 int unsignedp, reversep, volatilep = 0;
311 decl
312 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
313 &unsignedp, &reversep, &volatilep);
314 if (bitsize != GET_MODE_BITSIZE (mode)
315 || (bitpos % BITS_PER_UNIT)
316 || (toffset && !tree_fits_shwi_p (toffset)))
317 decl = NULL;
318 else
320 offset += bitpos / BITS_PER_UNIT;
321 if (toffset)
322 offset += tree_to_shwi (toffset);
324 break;
328 if (decl
329 && mode == GET_MODE (x)
330 && VAR_P (decl)
331 && (TREE_STATIC (decl)
332 || DECL_THREAD_LOCAL_P (decl))
333 && DECL_RTL_SET_P (decl)
334 && MEM_P (DECL_RTL (decl)))
336 rtx newx;
338 offset += MEM_OFFSET (x);
340 newx = DECL_RTL (decl);
342 if (MEM_P (newx))
344 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
346 /* Avoid creating a new MEM needlessly if we already had
347 the same address. We do if there's no OFFSET and the
348 old address X is identical to NEWX, or if X is of the
349 form (plus NEWX OFFSET), or the NEWX is of the form
350 (plus Y (const_int Z)) and X is that with the offset
351 added: (plus Y (const_int Z+OFFSET)). */
352 if (!((offset == 0
353 || (GET_CODE (o) == PLUS
354 && GET_CODE (XEXP (o, 1)) == CONST_INT
355 && (offset == INTVAL (XEXP (o, 1))
356 || (GET_CODE (n) == PLUS
357 && GET_CODE (XEXP (n, 1)) == CONST_INT
358 && (INTVAL (XEXP (n, 1)) + offset
359 == INTVAL (XEXP (o, 1)))
360 && (n = XEXP (n, 0))))
361 && (o = XEXP (o, 0))))
362 && rtx_equal_p (o, n)))
363 x = adjust_address_nv (newx, mode, offset);
365 else if (GET_MODE (x) == GET_MODE (newx)
366 && offset == 0)
367 x = newx;
371 return x;
374 /* Make a unary operation by first seeing if it folds and otherwise making
375 the specified operation. */
378 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
379 machine_mode op_mode)
381 rtx tem;
383 /* If this simplifies, use it. */
384 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
385 return tem;
387 return gen_rtx_fmt_e (code, mode, op);
390 /* Likewise for ternary operations. */
393 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
394 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
396 rtx tem;
398 /* If this simplifies, use it. */
399 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
400 op0, op1, op2)))
401 return tem;
403 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
406 /* Likewise, for relational operations.
407 CMP_MODE specifies mode comparison is done in. */
410 simplify_gen_relational (enum rtx_code code, machine_mode mode,
411 machine_mode cmp_mode, rtx op0, rtx op1)
413 rtx tem;
415 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
416 op0, op1)))
417 return tem;
419 return gen_rtx_fmt_ee (code, mode, op0, op1);
422 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
423 and simplify the result. If FN is non-NULL, call this callback on each
424 X, if it returns non-NULL, replace X with its return value and simplify the
425 result. */
428 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
429 rtx (*fn) (rtx, const_rtx, void *), void *data)
431 enum rtx_code code = GET_CODE (x);
432 machine_mode mode = GET_MODE (x);
433 machine_mode op_mode;
434 const char *fmt;
435 rtx op0, op1, op2, newx, op;
436 rtvec vec, newvec;
437 int i, j;
439 if (__builtin_expect (fn != NULL, 0))
441 newx = fn (x, old_rtx, data);
442 if (newx)
443 return newx;
445 else if (rtx_equal_p (x, old_rtx))
446 return copy_rtx ((rtx) data);
448 switch (GET_RTX_CLASS (code))
450 case RTX_UNARY:
451 op0 = XEXP (x, 0);
452 op_mode = GET_MODE (op0);
453 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
454 if (op0 == XEXP (x, 0))
455 return x;
456 return simplify_gen_unary (code, mode, op0, op_mode);
458 case RTX_BIN_ARITH:
459 case RTX_COMM_ARITH:
460 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
461 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
462 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
463 return x;
464 return simplify_gen_binary (code, mode, op0, op1);
466 case RTX_COMPARE:
467 case RTX_COMM_COMPARE:
468 op0 = XEXP (x, 0);
469 op1 = XEXP (x, 1);
470 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
471 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
472 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
473 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
474 return x;
475 return simplify_gen_relational (code, mode, op_mode, op0, op1);
477 case RTX_TERNARY:
478 case RTX_BITFIELD_OPS:
479 op0 = XEXP (x, 0);
480 op_mode = GET_MODE (op0);
481 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
482 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
483 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
484 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
485 return x;
486 if (op_mode == VOIDmode)
487 op_mode = GET_MODE (op0);
488 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
490 case RTX_EXTRA:
491 if (code == SUBREG)
493 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
494 if (op0 == SUBREG_REG (x))
495 return x;
496 op0 = simplify_gen_subreg (GET_MODE (x), op0,
497 GET_MODE (SUBREG_REG (x)),
498 SUBREG_BYTE (x));
499 return op0 ? op0 : x;
501 break;
503 case RTX_OBJ:
504 if (code == MEM)
506 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
507 if (op0 == XEXP (x, 0))
508 return x;
509 return replace_equiv_address_nv (x, op0);
511 else if (code == LO_SUM)
513 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
514 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
516 /* (lo_sum (high x) y) -> y where x and y have the same base. */
517 if (GET_CODE (op0) == HIGH)
519 rtx base0, base1, offset0, offset1;
520 split_const (XEXP (op0, 0), &base0, &offset0);
521 split_const (op1, &base1, &offset1);
522 if (rtx_equal_p (base0, base1))
523 return op1;
526 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
527 return x;
528 return gen_rtx_LO_SUM (mode, op0, op1);
530 break;
532 default:
533 break;
536 newx = x;
537 fmt = GET_RTX_FORMAT (code);
538 for (i = 0; fmt[i]; i++)
539 switch (fmt[i])
541 case 'E':
542 vec = XVEC (x, i);
543 newvec = XVEC (newx, i);
544 for (j = 0; j < GET_NUM_ELEM (vec); j++)
546 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
547 old_rtx, fn, data);
548 if (op != RTVEC_ELT (vec, j))
550 if (newvec == vec)
552 newvec = shallow_copy_rtvec (vec);
553 if (x == newx)
554 newx = shallow_copy_rtx (x);
555 XVEC (newx, i) = newvec;
557 RTVEC_ELT (newvec, j) = op;
560 break;
562 case 'e':
563 if (XEXP (x, i))
565 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
566 if (op != XEXP (x, i))
568 if (x == newx)
569 newx = shallow_copy_rtx (x);
570 XEXP (newx, i) = op;
573 break;
575 return newx;
578 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
579 resulting RTX. Return a new RTX which is as simplified as possible. */
582 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
584 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
587 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
588 Only handle cases where the truncated value is inherently an rvalue.
590 RTL provides two ways of truncating a value:
592 1. a lowpart subreg. This form is only a truncation when both
593 the outer and inner modes (here MODE and OP_MODE respectively)
594 are scalar integers, and only then when the subreg is used as
595 an rvalue.
597 It is only valid to form such truncating subregs if the
598 truncation requires no action by the target. The onus for
599 proving this is on the creator of the subreg -- e.g. the
600 caller to simplify_subreg or simplify_gen_subreg -- and typically
601 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
603 2. a TRUNCATE. This form handles both scalar and compound integers.
605 The first form is preferred where valid. However, the TRUNCATE
606 handling in simplify_unary_operation turns the second form into the
607 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
608 so it is generally safe to form rvalue truncations using:
610 simplify_gen_unary (TRUNCATE, ...)
612 and leave simplify_unary_operation to work out which representation
613 should be used.
615 Because of the proof requirements on (1), simplify_truncation must
616 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
617 regardless of whether the outer truncation came from a SUBREG or a
618 TRUNCATE. For example, if the caller has proven that an SImode
619 truncation of:
621 (and:DI X Y)
623 is a no-op and can be represented as a subreg, it does not follow
624 that SImode truncations of X and Y are also no-ops. On a target
625 like 64-bit MIPS that requires SImode values to be stored in
626 sign-extended form, an SImode truncation of:
628 (and:DI (reg:DI X) (const_int 63))
630 is trivially a no-op because only the lower 6 bits can be set.
631 However, X is still an arbitrary 64-bit number and so we cannot
632 assume that truncating it too is a no-op. */
634 static rtx
635 simplify_truncation (machine_mode mode, rtx op,
636 machine_mode op_mode)
638 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
639 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
640 gcc_assert (precision <= op_precision);
642 /* Optimize truncations of zero and sign extended values. */
643 if (GET_CODE (op) == ZERO_EXTEND
644 || GET_CODE (op) == SIGN_EXTEND)
646 /* There are three possibilities. If MODE is the same as the
647 origmode, we can omit both the extension and the subreg.
648 If MODE is not larger than the origmode, we can apply the
649 truncation without the extension. Finally, if the outermode
650 is larger than the origmode, we can just extend to the appropriate
651 mode. */
652 machine_mode origmode = GET_MODE (XEXP (op, 0));
653 if (mode == origmode)
654 return XEXP (op, 0);
655 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
656 return simplify_gen_unary (TRUNCATE, mode,
657 XEXP (op, 0), origmode);
658 else
659 return simplify_gen_unary (GET_CODE (op), mode,
660 XEXP (op, 0), origmode);
663 /* If the machine can perform operations in the truncated mode, distribute
664 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
665 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
666 if (1
667 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
668 && (GET_CODE (op) == PLUS
669 || GET_CODE (op) == MINUS
670 || GET_CODE (op) == MULT))
672 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
673 if (op0)
675 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
676 if (op1)
677 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
681 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
682 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
683 the outer subreg is effectively a truncation to the original mode. */
684 if ((GET_CODE (op) == LSHIFTRT
685 || GET_CODE (op) == ASHIFTRT)
686 /* Ensure that OP_MODE is at least twice as wide as MODE
687 to avoid the possibility that an outer LSHIFTRT shifts by more
688 than the sign extension's sign_bit_copies and introduces zeros
689 into the high bits of the result. */
690 && 2 * precision <= op_precision
691 && CONST_INT_P (XEXP (op, 1))
692 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
693 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
694 && UINTVAL (XEXP (op, 1)) < precision)
695 return simplify_gen_binary (ASHIFTRT, mode,
696 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
698 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
699 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
700 the outer subreg is effectively a truncation to the original mode. */
701 if ((GET_CODE (op) == LSHIFTRT
702 || GET_CODE (op) == ASHIFTRT)
703 && CONST_INT_P (XEXP (op, 1))
704 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
705 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
706 && UINTVAL (XEXP (op, 1)) < precision)
707 return simplify_gen_binary (LSHIFTRT, mode,
708 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
710 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
711 to (ashift:QI (x:QI) C), where C is a suitable small constant and
712 the outer subreg is effectively a truncation to the original mode. */
713 if (GET_CODE (op) == ASHIFT
714 && CONST_INT_P (XEXP (op, 1))
715 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
716 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
717 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
718 && UINTVAL (XEXP (op, 1)) < precision)
719 return simplify_gen_binary (ASHIFT, mode,
720 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
722 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
723 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
724 and C2. */
725 if (GET_CODE (op) == AND
726 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
727 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
728 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
729 && CONST_INT_P (XEXP (op, 1)))
731 rtx op0 = (XEXP (XEXP (op, 0), 0));
732 rtx shift_op = XEXP (XEXP (op, 0), 1);
733 rtx mask_op = XEXP (op, 1);
734 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
735 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
737 if (shift < precision
738 /* If doing this transform works for an X with all bits set,
739 it works for any X. */
740 && ((GET_MODE_MASK (mode) >> shift) & mask)
741 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
742 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
743 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
745 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
746 return simplify_gen_binary (AND, mode, op0, mask_op);
750 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
751 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
752 changing len. */
753 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
754 && REG_P (XEXP (op, 0))
755 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
756 && CONST_INT_P (XEXP (op, 1))
757 && CONST_INT_P (XEXP (op, 2)))
759 rtx op0 = XEXP (op, 0);
760 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
761 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
762 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
764 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
765 if (op0)
767 pos -= op_precision - precision;
768 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
769 XEXP (op, 1), GEN_INT (pos));
772 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
774 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
775 if (op0)
776 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
777 XEXP (op, 1), XEXP (op, 2));
781 /* Recognize a word extraction from a multi-word subreg. */
782 if ((GET_CODE (op) == LSHIFTRT
783 || GET_CODE (op) == ASHIFTRT)
784 && SCALAR_INT_MODE_P (mode)
785 && SCALAR_INT_MODE_P (op_mode)
786 && precision >= BITS_PER_WORD
787 && 2 * precision <= op_precision
788 && CONST_INT_P (XEXP (op, 1))
789 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
790 && UINTVAL (XEXP (op, 1)) < op_precision)
792 int byte = subreg_lowpart_offset (mode, op_mode);
793 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
794 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
795 (WORDS_BIG_ENDIAN
796 ? byte - shifted_bytes
797 : byte + shifted_bytes));
800 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
801 and try replacing the TRUNCATE and shift with it. Don't do this
802 if the MEM has a mode-dependent address. */
803 if ((GET_CODE (op) == LSHIFTRT
804 || GET_CODE (op) == ASHIFTRT)
805 && SCALAR_INT_MODE_P (op_mode)
806 && MEM_P (XEXP (op, 0))
807 && CONST_INT_P (XEXP (op, 1))
808 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
809 && INTVAL (XEXP (op, 1)) > 0
810 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
811 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
812 MEM_ADDR_SPACE (XEXP (op, 0)))
813 && ! MEM_VOLATILE_P (XEXP (op, 0))
814 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
815 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
817 int byte = subreg_lowpart_offset (mode, op_mode);
818 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
819 return adjust_address_nv (XEXP (op, 0), mode,
820 (WORDS_BIG_ENDIAN
821 ? byte - shifted_bytes
822 : byte + shifted_bytes));
825 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
826 (OP:SI foo:SI) if OP is NEG or ABS. */
827 if ((GET_CODE (op) == ABS
828 || GET_CODE (op) == NEG)
829 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
830 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
831 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
832 return simplify_gen_unary (GET_CODE (op), mode,
833 XEXP (XEXP (op, 0), 0), mode);
835 /* (truncate:A (subreg:B (truncate:C X) 0)) is
836 (truncate:A X). */
837 if (GET_CODE (op) == SUBREG
838 && SCALAR_INT_MODE_P (mode)
839 && SCALAR_INT_MODE_P (op_mode)
840 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
841 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
842 && subreg_lowpart_p (op))
844 rtx inner = XEXP (SUBREG_REG (op), 0);
845 if (GET_MODE_PRECISION (mode)
846 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
847 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
848 else
849 /* If subreg above is paradoxical and C is narrower
850 than A, return (subreg:A (truncate:C X) 0). */
851 return simplify_gen_subreg (mode, SUBREG_REG (op),
852 GET_MODE (SUBREG_REG (op)), 0);
855 /* (truncate:A (truncate:B X)) is (truncate:A X). */
856 if (GET_CODE (op) == TRUNCATE)
857 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
858 GET_MODE (XEXP (op, 0)));
860 return NULL_RTX;
863 /* Try to simplify a unary operation CODE whose output mode is to be
864 MODE with input operand OP whose mode was originally OP_MODE.
865 Return zero if no simplification can be made. */
867 simplify_unary_operation (enum rtx_code code, machine_mode mode,
868 rtx op, machine_mode op_mode)
870 rtx trueop, tem;
872 trueop = avoid_constant_pool_reference (op);
874 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
875 if (tem)
876 return tem;
878 return simplify_unary_operation_1 (code, mode, op);
881 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
882 to be exact. */
884 static bool
885 exact_int_to_float_conversion_p (const_rtx op)
887 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
888 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
889 /* Constants shouldn't reach here. */
890 gcc_assert (op0_mode != VOIDmode);
891 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
892 int in_bits = in_prec;
893 if (HWI_COMPUTABLE_MODE_P (op0_mode))
895 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
896 if (GET_CODE (op) == FLOAT)
897 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
898 else if (GET_CODE (op) == UNSIGNED_FLOAT)
899 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
900 else
901 gcc_unreachable ();
902 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
904 return in_bits <= out_bits;
907 /* Perform some simplifications we can do even if the operands
908 aren't constant. */
909 static rtx
910 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
912 enum rtx_code reversed;
913 rtx temp;
915 switch (code)
917 case NOT:
918 /* (not (not X)) == X. */
919 if (GET_CODE (op) == NOT)
920 return XEXP (op, 0);
922 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
923 comparison is all ones. */
924 if (COMPARISON_P (op)
925 && (mode == BImode || STORE_FLAG_VALUE == -1)
926 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
927 return simplify_gen_relational (reversed, mode, VOIDmode,
928 XEXP (op, 0), XEXP (op, 1));
930 /* (not (plus X -1)) can become (neg X). */
931 if (GET_CODE (op) == PLUS
932 && XEXP (op, 1) == constm1_rtx)
933 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
935 /* Similarly, (not (neg X)) is (plus X -1). */
936 if (GET_CODE (op) == NEG)
937 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
938 CONSTM1_RTX (mode));
940 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
941 if (GET_CODE (op) == XOR
942 && CONST_INT_P (XEXP (op, 1))
943 && (temp = simplify_unary_operation (NOT, mode,
944 XEXP (op, 1), mode)) != 0)
945 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
947 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
948 if (GET_CODE (op) == PLUS
949 && CONST_INT_P (XEXP (op, 1))
950 && mode_signbit_p (mode, XEXP (op, 1))
951 && (temp = simplify_unary_operation (NOT, mode,
952 XEXP (op, 1), mode)) != 0)
953 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
956 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
957 operands other than 1, but that is not valid. We could do a
958 similar simplification for (not (lshiftrt C X)) where C is
959 just the sign bit, but this doesn't seem common enough to
960 bother with. */
961 if (GET_CODE (op) == ASHIFT
962 && XEXP (op, 0) == const1_rtx)
964 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
965 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
968 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
969 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
970 so we can perform the above simplification. */
971 if (STORE_FLAG_VALUE == -1
972 && GET_CODE (op) == ASHIFTRT
973 && CONST_INT_P (XEXP (op, 1))
974 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
975 return simplify_gen_relational (GE, mode, VOIDmode,
976 XEXP (op, 0), const0_rtx);
979 if (GET_CODE (op) == SUBREG
980 && subreg_lowpart_p (op)
981 && (GET_MODE_SIZE (GET_MODE (op))
982 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
983 && GET_CODE (SUBREG_REG (op)) == ASHIFT
984 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
986 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
987 rtx x;
989 x = gen_rtx_ROTATE (inner_mode,
990 simplify_gen_unary (NOT, inner_mode, const1_rtx,
991 inner_mode),
992 XEXP (SUBREG_REG (op), 1));
993 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
994 if (temp)
995 return temp;
998 /* Apply De Morgan's laws to reduce number of patterns for machines
999 with negating logical insns (and-not, nand, etc.). If result has
1000 only one NOT, put it first, since that is how the patterns are
1001 coded. */
1002 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1004 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1005 machine_mode op_mode;
1007 op_mode = GET_MODE (in1);
1008 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1010 op_mode = GET_MODE (in2);
1011 if (op_mode == VOIDmode)
1012 op_mode = mode;
1013 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1015 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1016 std::swap (in1, in2);
1018 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1019 mode, in1, in2);
1022 /* (not (bswap x)) -> (bswap (not x)). */
1023 if (GET_CODE (op) == BSWAP)
1025 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1026 return simplify_gen_unary (BSWAP, mode, x, mode);
1028 break;
1030 case NEG:
1031 /* (neg (neg X)) == X. */
1032 if (GET_CODE (op) == NEG)
1033 return XEXP (op, 0);
1035 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1036 If comparison is not reversible use
1037 x ? y : (neg y). */
1038 if (GET_CODE (op) == IF_THEN_ELSE)
1040 rtx cond = XEXP (op, 0);
1041 rtx true_rtx = XEXP (op, 1);
1042 rtx false_rtx = XEXP (op, 2);
1044 if ((GET_CODE (true_rtx) == NEG
1045 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1046 || (GET_CODE (false_rtx) == NEG
1047 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1049 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1050 temp = reversed_comparison (cond, mode);
1051 else
1053 temp = cond;
1054 std::swap (true_rtx, false_rtx);
1056 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1057 mode, temp, true_rtx, false_rtx);
1061 /* (neg (plus X 1)) can become (not X). */
1062 if (GET_CODE (op) == PLUS
1063 && XEXP (op, 1) == const1_rtx)
1064 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1066 /* Similarly, (neg (not X)) is (plus X 1). */
1067 if (GET_CODE (op) == NOT)
1068 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1069 CONST1_RTX (mode));
1071 /* (neg (minus X Y)) can become (minus Y X). This transformation
1072 isn't safe for modes with signed zeros, since if X and Y are
1073 both +0, (minus Y X) is the same as (minus X Y). If the
1074 rounding mode is towards +infinity (or -infinity) then the two
1075 expressions will be rounded differently. */
1076 if (GET_CODE (op) == MINUS
1077 && !HONOR_SIGNED_ZEROS (mode)
1078 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1079 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1081 if (GET_CODE (op) == PLUS
1082 && !HONOR_SIGNED_ZEROS (mode)
1083 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1085 /* (neg (plus A C)) is simplified to (minus -C A). */
1086 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1087 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1089 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1090 if (temp)
1091 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1094 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1095 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1096 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1099 /* (neg (mult A B)) becomes (mult A (neg B)).
1100 This works even for floating-point values. */
1101 if (GET_CODE (op) == MULT
1102 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1104 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1105 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1108 /* NEG commutes with ASHIFT since it is multiplication. Only do
1109 this if we can then eliminate the NEG (e.g., if the operand
1110 is a constant). */
1111 if (GET_CODE (op) == ASHIFT)
1113 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1114 if (temp)
1115 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1118 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1119 C is equal to the width of MODE minus 1. */
1120 if (GET_CODE (op) == ASHIFTRT
1121 && CONST_INT_P (XEXP (op, 1))
1122 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1123 return simplify_gen_binary (LSHIFTRT, mode,
1124 XEXP (op, 0), XEXP (op, 1));
1126 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1127 C is equal to the width of MODE minus 1. */
1128 if (GET_CODE (op) == LSHIFTRT
1129 && CONST_INT_P (XEXP (op, 1))
1130 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1131 return simplify_gen_binary (ASHIFTRT, mode,
1132 XEXP (op, 0), XEXP (op, 1));
1134 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1135 if (GET_CODE (op) == XOR
1136 && XEXP (op, 1) == const1_rtx
1137 && nonzero_bits (XEXP (op, 0), mode) == 1)
1138 return plus_constant (mode, XEXP (op, 0), -1);
1140 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1141 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1142 if (GET_CODE (op) == LT
1143 && XEXP (op, 1) == const0_rtx
1144 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1146 machine_mode inner = GET_MODE (XEXP (op, 0));
1147 int isize = GET_MODE_PRECISION (inner);
1148 if (STORE_FLAG_VALUE == 1)
1150 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1151 GEN_INT (isize - 1));
1152 if (mode == inner)
1153 return temp;
1154 if (GET_MODE_PRECISION (mode) > isize)
1155 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1156 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1158 else if (STORE_FLAG_VALUE == -1)
1160 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1161 GEN_INT (isize - 1));
1162 if (mode == inner)
1163 return temp;
1164 if (GET_MODE_PRECISION (mode) > isize)
1165 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1166 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1169 break;
1171 case TRUNCATE:
1172 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1173 with the umulXi3_highpart patterns. */
1174 if (GET_CODE (op) == LSHIFTRT
1175 && GET_CODE (XEXP (op, 0)) == MULT)
1176 break;
1178 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1180 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1182 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1183 if (temp)
1184 return temp;
1186 /* We can't handle truncation to a partial integer mode here
1187 because we don't know the real bitsize of the partial
1188 integer mode. */
1189 break;
1192 if (GET_MODE (op) != VOIDmode)
1194 temp = simplify_truncation (mode, op, GET_MODE (op));
1195 if (temp)
1196 return temp;
1199 /* If we know that the value is already truncated, we can
1200 replace the TRUNCATE with a SUBREG. */
1201 if (GET_MODE_NUNITS (mode) == 1
1202 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1203 || truncated_to_mode (mode, op)))
1205 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1206 if (temp)
1207 return temp;
1210 /* A truncate of a comparison can be replaced with a subreg if
1211 STORE_FLAG_VALUE permits. This is like the previous test,
1212 but it works even if the comparison is done in a mode larger
1213 than HOST_BITS_PER_WIDE_INT. */
1214 if (HWI_COMPUTABLE_MODE_P (mode)
1215 && COMPARISON_P (op)
1216 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1218 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1219 if (temp)
1220 return temp;
1223 /* A truncate of a memory is just loading the low part of the memory
1224 if we are not changing the meaning of the address. */
1225 if (GET_CODE (op) == MEM
1226 && !VECTOR_MODE_P (mode)
1227 && !MEM_VOLATILE_P (op)
1228 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1230 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1231 if (temp)
1232 return temp;
1235 break;
1237 case FLOAT_TRUNCATE:
1238 if (DECIMAL_FLOAT_MODE_P (mode))
1239 break;
1241 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1242 if (GET_CODE (op) == FLOAT_EXTEND
1243 && GET_MODE (XEXP (op, 0)) == mode)
1244 return XEXP (op, 0);
1246 /* (float_truncate:SF (float_truncate:DF foo:XF))
1247 = (float_truncate:SF foo:XF).
1248 This may eliminate double rounding, so it is unsafe.
1250 (float_truncate:SF (float_extend:XF foo:DF))
1251 = (float_truncate:SF foo:DF).
1253 (float_truncate:DF (float_extend:XF foo:SF))
1254 = (float_extend:DF foo:SF). */
1255 if ((GET_CODE (op) == FLOAT_TRUNCATE
1256 && flag_unsafe_math_optimizations)
1257 || GET_CODE (op) == FLOAT_EXTEND)
1258 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1259 0)))
1260 > GET_MODE_SIZE (mode)
1261 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1262 mode,
1263 XEXP (op, 0), mode);
1265 /* (float_truncate (float x)) is (float x) */
1266 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1267 && (flag_unsafe_math_optimizations
1268 || exact_int_to_float_conversion_p (op)))
1269 return simplify_gen_unary (GET_CODE (op), mode,
1270 XEXP (op, 0),
1271 GET_MODE (XEXP (op, 0)));
1273 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1274 (OP:SF foo:SF) if OP is NEG or ABS. */
1275 if ((GET_CODE (op) == ABS
1276 || GET_CODE (op) == NEG)
1277 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1278 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1279 return simplify_gen_unary (GET_CODE (op), mode,
1280 XEXP (XEXP (op, 0), 0), mode);
1282 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1283 is (float_truncate:SF x). */
1284 if (GET_CODE (op) == SUBREG
1285 && subreg_lowpart_p (op)
1286 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1287 return SUBREG_REG (op);
1288 break;
1290 case FLOAT_EXTEND:
1291 if (DECIMAL_FLOAT_MODE_P (mode))
1292 break;
1294 /* (float_extend (float_extend x)) is (float_extend x)
1296 (float_extend (float x)) is (float x) assuming that double
1297 rounding can't happen.
1299 if (GET_CODE (op) == FLOAT_EXTEND
1300 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1301 && exact_int_to_float_conversion_p (op)))
1302 return simplify_gen_unary (GET_CODE (op), mode,
1303 XEXP (op, 0),
1304 GET_MODE (XEXP (op, 0)));
1306 break;
1308 case ABS:
1309 /* (abs (neg <foo>)) -> (abs <foo>) */
1310 if (GET_CODE (op) == NEG)
1311 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1312 GET_MODE (XEXP (op, 0)));
1314 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1315 do nothing. */
1316 if (GET_MODE (op) == VOIDmode)
1317 break;
1319 /* If operand is something known to be positive, ignore the ABS. */
1320 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1321 || val_signbit_known_clear_p (GET_MODE (op),
1322 nonzero_bits (op, GET_MODE (op))))
1323 return op;
1325 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1326 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1327 return gen_rtx_NEG (mode, op);
1329 break;
1331 case FFS:
1332 /* (ffs (*_extend <X>)) = (ffs <X>) */
1333 if (GET_CODE (op) == SIGN_EXTEND
1334 || GET_CODE (op) == ZERO_EXTEND)
1335 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1336 GET_MODE (XEXP (op, 0)));
1337 break;
1339 case POPCOUNT:
1340 switch (GET_CODE (op))
1342 case BSWAP:
1343 case ZERO_EXTEND:
1344 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1345 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1346 GET_MODE (XEXP (op, 0)));
1348 case ROTATE:
1349 case ROTATERT:
1350 /* Rotations don't affect popcount. */
1351 if (!side_effects_p (XEXP (op, 1)))
1352 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1353 GET_MODE (XEXP (op, 0)));
1354 break;
1356 default:
1357 break;
1359 break;
1361 case PARITY:
1362 switch (GET_CODE (op))
1364 case NOT:
1365 case BSWAP:
1366 case ZERO_EXTEND:
1367 case SIGN_EXTEND:
1368 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1369 GET_MODE (XEXP (op, 0)));
1371 case ROTATE:
1372 case ROTATERT:
1373 /* Rotations don't affect parity. */
1374 if (!side_effects_p (XEXP (op, 1)))
1375 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1376 GET_MODE (XEXP (op, 0)));
1377 break;
1379 default:
1380 break;
1382 break;
1384 case BSWAP:
1385 /* (bswap (bswap x)) -> x. */
1386 if (GET_CODE (op) == BSWAP)
1387 return XEXP (op, 0);
1388 break;
1390 case FLOAT:
1391 /* (float (sign_extend <X>)) = (float <X>). */
1392 if (GET_CODE (op) == SIGN_EXTEND)
1393 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1394 GET_MODE (XEXP (op, 0)));
1395 break;
1397 case SIGN_EXTEND:
1398 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1399 becomes just the MINUS if its mode is MODE. This allows
1400 folding switch statements on machines using casesi (such as
1401 the VAX). */
1402 if (GET_CODE (op) == TRUNCATE
1403 && GET_MODE (XEXP (op, 0)) == mode
1404 && GET_CODE (XEXP (op, 0)) == MINUS
1405 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1406 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1407 return XEXP (op, 0);
1409 /* Extending a widening multiplication should be canonicalized to
1410 a wider widening multiplication. */
1411 if (GET_CODE (op) == MULT)
1413 rtx lhs = XEXP (op, 0);
1414 rtx rhs = XEXP (op, 1);
1415 enum rtx_code lcode = GET_CODE (lhs);
1416 enum rtx_code rcode = GET_CODE (rhs);
1418 /* Widening multiplies usually extend both operands, but sometimes
1419 they use a shift to extract a portion of a register. */
1420 if ((lcode == SIGN_EXTEND
1421 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1422 && (rcode == SIGN_EXTEND
1423 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1425 machine_mode lmode = GET_MODE (lhs);
1426 machine_mode rmode = GET_MODE (rhs);
1427 int bits;
1429 if (lcode == ASHIFTRT)
1430 /* Number of bits not shifted off the end. */
1431 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1432 else /* lcode == SIGN_EXTEND */
1433 /* Size of inner mode. */
1434 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1436 if (rcode == ASHIFTRT)
1437 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1438 else /* rcode == SIGN_EXTEND */
1439 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1441 /* We can only widen multiplies if the result is mathematiclly
1442 equivalent. I.e. if overflow was impossible. */
1443 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1444 return simplify_gen_binary
1445 (MULT, mode,
1446 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1447 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1451 /* Check for a sign extension of a subreg of a promoted
1452 variable, where the promotion is sign-extended, and the
1453 target mode is the same as the variable's promotion. */
1454 if (GET_CODE (op) == SUBREG
1455 && SUBREG_PROMOTED_VAR_P (op)
1456 && SUBREG_PROMOTED_SIGNED_P (op)
1457 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1459 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1460 if (temp)
1461 return temp;
1464 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1465 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1466 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1468 gcc_assert (GET_MODE_PRECISION (mode)
1469 > GET_MODE_PRECISION (GET_MODE (op)));
1470 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1471 GET_MODE (XEXP (op, 0)));
1474 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1475 is (sign_extend:M (subreg:O <X>)) if there is mode with
1476 GET_MODE_BITSIZE (N) - I bits.
1477 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1478 is similarly (zero_extend:M (subreg:O <X>)). */
1479 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1480 && GET_CODE (XEXP (op, 0)) == ASHIFT
1481 && CONST_INT_P (XEXP (op, 1))
1482 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1483 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1485 machine_mode tmode
1486 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1487 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1488 gcc_assert (GET_MODE_BITSIZE (mode)
1489 > GET_MODE_BITSIZE (GET_MODE (op)));
1490 if (tmode != BLKmode)
1492 rtx inner =
1493 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1494 if (inner)
1495 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1496 ? SIGN_EXTEND : ZERO_EXTEND,
1497 mode, inner, tmode);
1501 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1502 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1503 if (GET_CODE (op) == LSHIFTRT
1504 && CONST_INT_P (XEXP (op, 1))
1505 && XEXP (op, 1) != const0_rtx)
1506 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1508 #if defined(POINTERS_EXTEND_UNSIGNED)
1509 /* As we do not know which address space the pointer is referring to,
1510 we can do this only if the target does not support different pointer
1511 or address modes depending on the address space. */
1512 if (target_default_pointer_address_modes_p ()
1513 && ! POINTERS_EXTEND_UNSIGNED
1514 && mode == Pmode && GET_MODE (op) == ptr_mode
1515 && (CONSTANT_P (op)
1516 || (GET_CODE (op) == SUBREG
1517 && REG_P (SUBREG_REG (op))
1518 && REG_POINTER (SUBREG_REG (op))
1519 && GET_MODE (SUBREG_REG (op)) == Pmode))
1520 && !targetm.have_ptr_extend ())
1522 temp
1523 = convert_memory_address_addr_space_1 (Pmode, op,
1524 ADDR_SPACE_GENERIC, false,
1525 true);
1526 if (temp)
1527 return temp;
1529 #endif
1530 break;
1532 case ZERO_EXTEND:
1533 /* Check for a zero extension of a subreg of a promoted
1534 variable, where the promotion is zero-extended, and the
1535 target mode is the same as the variable's promotion. */
1536 if (GET_CODE (op) == SUBREG
1537 && SUBREG_PROMOTED_VAR_P (op)
1538 && SUBREG_PROMOTED_UNSIGNED_P (op)
1539 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1541 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1542 if (temp)
1543 return temp;
1546 /* Extending a widening multiplication should be canonicalized to
1547 a wider widening multiplication. */
1548 if (GET_CODE (op) == MULT)
1550 rtx lhs = XEXP (op, 0);
1551 rtx rhs = XEXP (op, 1);
1552 enum rtx_code lcode = GET_CODE (lhs);
1553 enum rtx_code rcode = GET_CODE (rhs);
1555 /* Widening multiplies usually extend both operands, but sometimes
1556 they use a shift to extract a portion of a register. */
1557 if ((lcode == ZERO_EXTEND
1558 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1559 && (rcode == ZERO_EXTEND
1560 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1562 machine_mode lmode = GET_MODE (lhs);
1563 machine_mode rmode = GET_MODE (rhs);
1564 int bits;
1566 if (lcode == LSHIFTRT)
1567 /* Number of bits not shifted off the end. */
1568 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1569 else /* lcode == ZERO_EXTEND */
1570 /* Size of inner mode. */
1571 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1573 if (rcode == LSHIFTRT)
1574 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1575 else /* rcode == ZERO_EXTEND */
1576 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1578 /* We can only widen multiplies if the result is mathematiclly
1579 equivalent. I.e. if overflow was impossible. */
1580 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1581 return simplify_gen_binary
1582 (MULT, mode,
1583 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1584 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1588 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1589 if (GET_CODE (op) == ZERO_EXTEND)
1590 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1591 GET_MODE (XEXP (op, 0)));
1593 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1594 is (zero_extend:M (subreg:O <X>)) if there is mode with
1595 GET_MODE_PRECISION (N) - I bits. */
1596 if (GET_CODE (op) == LSHIFTRT
1597 && GET_CODE (XEXP (op, 0)) == ASHIFT
1598 && CONST_INT_P (XEXP (op, 1))
1599 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1600 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1602 machine_mode tmode
1603 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1604 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1605 if (tmode != BLKmode)
1607 rtx inner =
1608 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1609 if (inner)
1610 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1614 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1615 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1616 of mode N. E.g.
1617 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1618 (and:SI (reg:SI) (const_int 63)). */
1619 if (GET_CODE (op) == SUBREG
1620 && GET_MODE_PRECISION (GET_MODE (op))
1621 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1622 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1623 <= HOST_BITS_PER_WIDE_INT
1624 && GET_MODE_PRECISION (mode)
1625 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1626 && subreg_lowpart_p (op)
1627 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1628 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1630 if (GET_MODE_PRECISION (mode)
1631 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1632 return SUBREG_REG (op);
1633 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1634 GET_MODE (SUBREG_REG (op)));
1637 #if defined(POINTERS_EXTEND_UNSIGNED)
1638 /* As we do not know which address space the pointer is referring to,
1639 we can do this only if the target does not support different pointer
1640 or address modes depending on the address space. */
1641 if (target_default_pointer_address_modes_p ()
1642 && POINTERS_EXTEND_UNSIGNED > 0
1643 && mode == Pmode && GET_MODE (op) == ptr_mode
1644 && (CONSTANT_P (op)
1645 || (GET_CODE (op) == SUBREG
1646 && REG_P (SUBREG_REG (op))
1647 && REG_POINTER (SUBREG_REG (op))
1648 && GET_MODE (SUBREG_REG (op)) == Pmode))
1649 && !targetm.have_ptr_extend ())
1651 temp
1652 = convert_memory_address_addr_space_1 (Pmode, op,
1653 ADDR_SPACE_GENERIC, false,
1654 true);
1655 if (temp)
1656 return temp;
1658 #endif
1659 break;
1661 default:
1662 break;
1665 return 0;
1668 /* Try to compute the value of a unary operation CODE whose output mode is to
1669 be MODE with input operand OP whose mode was originally OP_MODE.
1670 Return zero if the value cannot be computed. */
1672 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1673 rtx op, machine_mode op_mode)
1675 unsigned int width = GET_MODE_PRECISION (mode);
1677 if (code == VEC_DUPLICATE)
1679 gcc_assert (VECTOR_MODE_P (mode));
1680 if (GET_MODE (op) != VOIDmode)
1682 if (!VECTOR_MODE_P (GET_MODE (op)))
1683 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1684 else
1685 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1686 (GET_MODE (op)));
1688 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1689 || GET_CODE (op) == CONST_VECTOR)
1691 int elt_size = GET_MODE_UNIT_SIZE (mode);
1692 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1693 rtvec v = rtvec_alloc (n_elts);
1694 unsigned int i;
1696 if (GET_CODE (op) != CONST_VECTOR)
1697 for (i = 0; i < n_elts; i++)
1698 RTVEC_ELT (v, i) = op;
1699 else
1701 machine_mode inmode = GET_MODE (op);
1702 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1703 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1705 gcc_assert (in_n_elts < n_elts);
1706 gcc_assert ((n_elts % in_n_elts) == 0);
1707 for (i = 0; i < n_elts; i++)
1708 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1710 return gen_rtx_CONST_VECTOR (mode, v);
1714 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1716 int elt_size = GET_MODE_UNIT_SIZE (mode);
1717 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1718 machine_mode opmode = GET_MODE (op);
1719 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1720 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1721 rtvec v = rtvec_alloc (n_elts);
1722 unsigned int i;
1724 gcc_assert (op_n_elts == n_elts);
1725 for (i = 0; i < n_elts; i++)
1727 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1728 CONST_VECTOR_ELT (op, i),
1729 GET_MODE_INNER (opmode));
1730 if (!x)
1731 return 0;
1732 RTVEC_ELT (v, i) = x;
1734 return gen_rtx_CONST_VECTOR (mode, v);
1737 /* The order of these tests is critical so that, for example, we don't
1738 check the wrong mode (input vs. output) for a conversion operation,
1739 such as FIX. At some point, this should be simplified. */
1741 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1743 REAL_VALUE_TYPE d;
1745 if (op_mode == VOIDmode)
1747 /* CONST_INT have VOIDmode as the mode. We assume that all
1748 the bits of the constant are significant, though, this is
1749 a dangerous assumption as many times CONST_INTs are
1750 created and used with garbage in the bits outside of the
1751 precision of the implied mode of the const_int. */
1752 op_mode = MAX_MODE_INT;
1755 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1757 /* Avoid the folding if flag_signaling_nans is on and
1758 operand is a signaling NaN. */
1759 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1760 return 0;
1762 d = real_value_truncate (mode, d);
1763 return const_double_from_real_value (d, mode);
1765 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1767 REAL_VALUE_TYPE d;
1769 if (op_mode == VOIDmode)
1771 /* CONST_INT have VOIDmode as the mode. We assume that all
1772 the bits of the constant are significant, though, this is
1773 a dangerous assumption as many times CONST_INTs are
1774 created and used with garbage in the bits outside of the
1775 precision of the implied mode of the const_int. */
1776 op_mode = MAX_MODE_INT;
1779 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1781 /* Avoid the folding if flag_signaling_nans is on and
1782 operand is a signaling NaN. */
1783 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1784 return 0;
1786 d = real_value_truncate (mode, d);
1787 return const_double_from_real_value (d, mode);
1790 if (CONST_SCALAR_INT_P (op) && width > 0)
1792 wide_int result;
1793 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1794 rtx_mode_t op0 = rtx_mode_t (op, imode);
1795 int int_value;
1797 #if TARGET_SUPPORTS_WIDE_INT == 0
1798 /* This assert keeps the simplification from producing a result
1799 that cannot be represented in a CONST_DOUBLE but a lot of
1800 upstream callers expect that this function never fails to
1801 simplify something and so you if you added this to the test
1802 above the code would die later anyway. If this assert
1803 happens, you just need to make the port support wide int. */
1804 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1805 #endif
1807 switch (code)
1809 case NOT:
1810 result = wi::bit_not (op0);
1811 break;
1813 case NEG:
1814 result = wi::neg (op0);
1815 break;
1817 case ABS:
1818 result = wi::abs (op0);
1819 break;
1821 case FFS:
1822 result = wi::shwi (wi::ffs (op0), mode);
1823 break;
1825 case CLZ:
1826 if (wi::ne_p (op0, 0))
1827 int_value = wi::clz (op0);
1828 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1829 int_value = GET_MODE_PRECISION (mode);
1830 result = wi::shwi (int_value, mode);
1831 break;
1833 case CLRSB:
1834 result = wi::shwi (wi::clrsb (op0), mode);
1835 break;
1837 case CTZ:
1838 if (wi::ne_p (op0, 0))
1839 int_value = wi::ctz (op0);
1840 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1841 int_value = GET_MODE_PRECISION (mode);
1842 result = wi::shwi (int_value, mode);
1843 break;
1845 case POPCOUNT:
1846 result = wi::shwi (wi::popcount (op0), mode);
1847 break;
1849 case PARITY:
1850 result = wi::shwi (wi::parity (op0), mode);
1851 break;
1853 case BSWAP:
1854 result = wide_int (op0).bswap ();
1855 break;
1857 case TRUNCATE:
1858 case ZERO_EXTEND:
1859 result = wide_int::from (op0, width, UNSIGNED);
1860 break;
1862 case SIGN_EXTEND:
1863 result = wide_int::from (op0, width, SIGNED);
1864 break;
1866 case SQRT:
1867 default:
1868 return 0;
1871 return immed_wide_int_const (result, mode);
1874 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1875 && SCALAR_FLOAT_MODE_P (mode)
1876 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1878 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1879 switch (code)
1881 case SQRT:
1882 return 0;
1883 case ABS:
1884 d = real_value_abs (&d);
1885 break;
1886 case NEG:
1887 d = real_value_negate (&d);
1888 break;
1889 case FLOAT_TRUNCATE:
1890 /* Don't perform the operation if flag_signaling_nans is on
1891 and the operand is a signaling NaN. */
1892 if (!(HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)))
1893 d = real_value_truncate (mode, d);
1894 break;
1895 case FLOAT_EXTEND:
1896 /* All this does is change the mode, unless changing
1897 mode class. */
1898 /* Don't perform the operation if flag_signaling_nans is on
1899 and the operand is a signaling NaN. */
1900 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op))
1901 && !(HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)))
1902 real_convert (&d, mode, &d);
1903 break;
1904 case FIX:
1905 /* Don't perform the operation if flag_signaling_nans is on
1906 and the operand is a signaling NaN. */
1907 if (!(HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)))
1908 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1909 break;
1910 case NOT:
1912 long tmp[4];
1913 int i;
1915 real_to_target (tmp, &d, GET_MODE (op));
1916 for (i = 0; i < 4; i++)
1917 tmp[i] = ~tmp[i];
1918 real_from_target (&d, tmp, mode);
1919 break;
1921 default:
1922 gcc_unreachable ();
1924 return const_double_from_real_value (d, mode);
1926 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1927 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1928 && GET_MODE_CLASS (mode) == MODE_INT
1929 && width > 0)
1931 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1932 operators are intentionally left unspecified (to ease implementation
1933 by target backends), for consistency, this routine implements the
1934 same semantics for constant folding as used by the middle-end. */
1936 /* This was formerly used only for non-IEEE float.
1937 eggert@twinsun.com says it is safe for IEEE also. */
1938 REAL_VALUE_TYPE t;
1939 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1940 wide_int wmax, wmin;
1941 /* This is part of the abi to real_to_integer, but we check
1942 things before making this call. */
1943 bool fail;
1945 switch (code)
1947 case FIX:
1948 if (REAL_VALUE_ISNAN (*x))
1949 return const0_rtx;
1951 /* Test against the signed upper bound. */
1952 wmax = wi::max_value (width, SIGNED);
1953 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1954 if (real_less (&t, x))
1955 return immed_wide_int_const (wmax, mode);
1957 /* Test against the signed lower bound. */
1958 wmin = wi::min_value (width, SIGNED);
1959 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1960 if (real_less (x, &t))
1961 return immed_wide_int_const (wmin, mode);
1963 return immed_wide_int_const (real_to_integer (x, &fail, width),
1964 mode);
1966 case UNSIGNED_FIX:
1967 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1968 return const0_rtx;
1970 /* Test against the unsigned upper bound. */
1971 wmax = wi::max_value (width, UNSIGNED);
1972 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1973 if (real_less (&t, x))
1974 return immed_wide_int_const (wmax, mode);
1976 return immed_wide_int_const (real_to_integer (x, &fail, width),
1977 mode);
1979 default:
1980 gcc_unreachable ();
1984 return NULL_RTX;
1987 /* Subroutine of simplify_binary_operation to simplify a binary operation
1988 CODE that can commute with byte swapping, with result mode MODE and
1989 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1990 Return zero if no simplification or canonicalization is possible. */
1992 static rtx
1993 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1994 rtx op0, rtx op1)
1996 rtx tem;
1998 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1999 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2001 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2002 simplify_gen_unary (BSWAP, mode, op1, mode));
2003 return simplify_gen_unary (BSWAP, mode, tem, mode);
2006 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2007 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2009 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2010 return simplify_gen_unary (BSWAP, mode, tem, mode);
2013 return NULL_RTX;
2016 /* Subroutine of simplify_binary_operation to simplify a commutative,
2017 associative binary operation CODE with result mode MODE, operating
2018 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2019 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2020 canonicalization is possible. */
2022 static rtx
2023 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2024 rtx op0, rtx op1)
2026 rtx tem;
2028 /* Linearize the operator to the left. */
2029 if (GET_CODE (op1) == code)
2031 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2032 if (GET_CODE (op0) == code)
2034 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2035 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2038 /* "a op (b op c)" becomes "(b op c) op a". */
2039 if (! swap_commutative_operands_p (op1, op0))
2040 return simplify_gen_binary (code, mode, op1, op0);
2042 std::swap (op0, op1);
2045 if (GET_CODE (op0) == code)
2047 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2048 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2050 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2051 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2054 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2055 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2056 if (tem != 0)
2057 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2059 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2060 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2061 if (tem != 0)
2062 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2065 return 0;
2069 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2070 and OP1. Return 0 if no simplification is possible.
2072 Don't use this for relational operations such as EQ or LT.
2073 Use simplify_relational_operation instead. */
2075 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2076 rtx op0, rtx op1)
2078 rtx trueop0, trueop1;
2079 rtx tem;
2081 /* Relational operations don't work here. We must know the mode
2082 of the operands in order to do the comparison correctly.
2083 Assuming a full word can give incorrect results.
2084 Consider comparing 128 with -128 in QImode. */
2085 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2086 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2088 /* Make sure the constant is second. */
2089 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2090 && swap_commutative_operands_p (op0, op1))
2091 std::swap (op0, op1);
2093 trueop0 = avoid_constant_pool_reference (op0);
2094 trueop1 = avoid_constant_pool_reference (op1);
2096 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2097 if (tem)
2098 return tem;
2099 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2101 if (tem)
2102 return tem;
2104 /* If the above steps did not result in a simplification and op0 or op1
2105 were constant pool references, use the referenced constants directly. */
2106 if (trueop0 != op0 || trueop1 != op1)
2107 return simplify_gen_binary (code, mode, trueop0, trueop1);
2109 return NULL_RTX;
2112 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2113 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2114 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2115 actual constants. */
2117 static rtx
2118 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2119 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2121 rtx tem, reversed, opleft, opright;
2122 HOST_WIDE_INT val;
2123 unsigned int width = GET_MODE_PRECISION (mode);
2125 /* Even if we can't compute a constant result,
2126 there are some cases worth simplifying. */
2128 switch (code)
2130 case PLUS:
2131 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2132 when x is NaN, infinite, or finite and nonzero. They aren't
2133 when x is -0 and the rounding mode is not towards -infinity,
2134 since (-0) + 0 is then 0. */
2135 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2136 return op0;
2138 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2139 transformations are safe even for IEEE. */
2140 if (GET_CODE (op0) == NEG)
2141 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2142 else if (GET_CODE (op1) == NEG)
2143 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2145 /* (~a) + 1 -> -a */
2146 if (INTEGRAL_MODE_P (mode)
2147 && GET_CODE (op0) == NOT
2148 && trueop1 == const1_rtx)
2149 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2151 /* Handle both-operands-constant cases. We can only add
2152 CONST_INTs to constants since the sum of relocatable symbols
2153 can't be handled by most assemblers. Don't add CONST_INT
2154 to CONST_INT since overflow won't be computed properly if wider
2155 than HOST_BITS_PER_WIDE_INT. */
2157 if ((GET_CODE (op0) == CONST
2158 || GET_CODE (op0) == SYMBOL_REF
2159 || GET_CODE (op0) == LABEL_REF)
2160 && CONST_INT_P (op1))
2161 return plus_constant (mode, op0, INTVAL (op1));
2162 else if ((GET_CODE (op1) == CONST
2163 || GET_CODE (op1) == SYMBOL_REF
2164 || GET_CODE (op1) == LABEL_REF)
2165 && CONST_INT_P (op0))
2166 return plus_constant (mode, op1, INTVAL (op0));
2168 /* See if this is something like X * C - X or vice versa or
2169 if the multiplication is written as a shift. If so, we can
2170 distribute and make a new multiply, shift, or maybe just
2171 have X (if C is 2 in the example above). But don't make
2172 something more expensive than we had before. */
2174 if (SCALAR_INT_MODE_P (mode))
2176 rtx lhs = op0, rhs = op1;
2178 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2179 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2181 if (GET_CODE (lhs) == NEG)
2183 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2184 lhs = XEXP (lhs, 0);
2186 else if (GET_CODE (lhs) == MULT
2187 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2189 coeff0 = rtx_mode_t (XEXP (lhs, 1), mode);
2190 lhs = XEXP (lhs, 0);
2192 else if (GET_CODE (lhs) == ASHIFT
2193 && CONST_INT_P (XEXP (lhs, 1))
2194 && INTVAL (XEXP (lhs, 1)) >= 0
2195 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2197 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2198 GET_MODE_PRECISION (mode));
2199 lhs = XEXP (lhs, 0);
2202 if (GET_CODE (rhs) == NEG)
2204 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2205 rhs = XEXP (rhs, 0);
2207 else if (GET_CODE (rhs) == MULT
2208 && CONST_INT_P (XEXP (rhs, 1)))
2210 coeff1 = rtx_mode_t (XEXP (rhs, 1), mode);
2211 rhs = XEXP (rhs, 0);
2213 else if (GET_CODE (rhs) == ASHIFT
2214 && CONST_INT_P (XEXP (rhs, 1))
2215 && INTVAL (XEXP (rhs, 1)) >= 0
2216 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2218 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2219 GET_MODE_PRECISION (mode));
2220 rhs = XEXP (rhs, 0);
2223 if (rtx_equal_p (lhs, rhs))
2225 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2226 rtx coeff;
2227 bool speed = optimize_function_for_speed_p (cfun);
2229 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2231 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2232 return (set_src_cost (tem, mode, speed)
2233 <= set_src_cost (orig, mode, speed) ? tem : 0);
2237 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2238 if (CONST_SCALAR_INT_P (op1)
2239 && GET_CODE (op0) == XOR
2240 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2241 && mode_signbit_p (mode, op1))
2242 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2243 simplify_gen_binary (XOR, mode, op1,
2244 XEXP (op0, 1)));
2246 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2247 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2248 && GET_CODE (op0) == MULT
2249 && GET_CODE (XEXP (op0, 0)) == NEG)
2251 rtx in1, in2;
2253 in1 = XEXP (XEXP (op0, 0), 0);
2254 in2 = XEXP (op0, 1);
2255 return simplify_gen_binary (MINUS, mode, op1,
2256 simplify_gen_binary (MULT, mode,
2257 in1, in2));
2260 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2261 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2262 is 1. */
2263 if (COMPARISON_P (op0)
2264 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2265 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2266 && (reversed = reversed_comparison (op0, mode)))
2267 return
2268 simplify_gen_unary (NEG, mode, reversed, mode);
2270 /* If one of the operands is a PLUS or a MINUS, see if we can
2271 simplify this by the associative law.
2272 Don't use the associative law for floating point.
2273 The inaccuracy makes it nonassociative,
2274 and subtle programs can break if operations are associated. */
2276 if (INTEGRAL_MODE_P (mode)
2277 && (plus_minus_operand_p (op0)
2278 || plus_minus_operand_p (op1))
2279 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2280 return tem;
2282 /* Reassociate floating point addition only when the user
2283 specifies associative math operations. */
2284 if (FLOAT_MODE_P (mode)
2285 && flag_associative_math)
2287 tem = simplify_associative_operation (code, mode, op0, op1);
2288 if (tem)
2289 return tem;
2291 break;
2293 case COMPARE:
2294 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2295 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2296 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2297 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2299 rtx xop00 = XEXP (op0, 0);
2300 rtx xop10 = XEXP (op1, 0);
2302 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2303 return xop00;
2305 if (REG_P (xop00) && REG_P (xop10)
2306 && GET_MODE (xop00) == GET_MODE (xop10)
2307 && REGNO (xop00) == REGNO (xop10)
2308 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2309 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2310 return xop00;
2312 break;
2314 case MINUS:
2315 /* We can't assume x-x is 0 even with non-IEEE floating point,
2316 but since it is zero except in very strange circumstances, we
2317 will treat it as zero with -ffinite-math-only. */
2318 if (rtx_equal_p (trueop0, trueop1)
2319 && ! side_effects_p (op0)
2320 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2321 return CONST0_RTX (mode);
2323 /* Change subtraction from zero into negation. (0 - x) is the
2324 same as -x when x is NaN, infinite, or finite and nonzero.
2325 But if the mode has signed zeros, and does not round towards
2326 -infinity, then 0 - 0 is 0, not -0. */
2327 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2328 return simplify_gen_unary (NEG, mode, op1, mode);
2330 /* (-1 - a) is ~a, unless the expression contains symbolic
2331 constants, in which case not retaining additions and
2332 subtractions could cause invalid assembly to be produced. */
2333 if (trueop0 == constm1_rtx
2334 && !contains_symbolic_reference_p (op1))
2335 return simplify_gen_unary (NOT, mode, op1, mode);
2337 /* Subtracting 0 has no effect unless the mode has signed zeros
2338 and supports rounding towards -infinity. In such a case,
2339 0 - 0 is -0. */
2340 if (!(HONOR_SIGNED_ZEROS (mode)
2341 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2342 && trueop1 == CONST0_RTX (mode))
2343 return op0;
2345 /* See if this is something like X * C - X or vice versa or
2346 if the multiplication is written as a shift. If so, we can
2347 distribute and make a new multiply, shift, or maybe just
2348 have X (if C is 2 in the example above). But don't make
2349 something more expensive than we had before. */
2351 if (SCALAR_INT_MODE_P (mode))
2353 rtx lhs = op0, rhs = op1;
2355 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2356 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2358 if (GET_CODE (lhs) == NEG)
2360 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2361 lhs = XEXP (lhs, 0);
2363 else if (GET_CODE (lhs) == MULT
2364 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2366 coeff0 = rtx_mode_t (XEXP (lhs, 1), mode);
2367 lhs = XEXP (lhs, 0);
2369 else if (GET_CODE (lhs) == ASHIFT
2370 && CONST_INT_P (XEXP (lhs, 1))
2371 && INTVAL (XEXP (lhs, 1)) >= 0
2372 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2374 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2375 GET_MODE_PRECISION (mode));
2376 lhs = XEXP (lhs, 0);
2379 if (GET_CODE (rhs) == NEG)
2381 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2382 rhs = XEXP (rhs, 0);
2384 else if (GET_CODE (rhs) == MULT
2385 && CONST_INT_P (XEXP (rhs, 1)))
2387 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), mode));
2388 rhs = XEXP (rhs, 0);
2390 else if (GET_CODE (rhs) == ASHIFT
2391 && CONST_INT_P (XEXP (rhs, 1))
2392 && INTVAL (XEXP (rhs, 1)) >= 0
2393 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2395 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2396 GET_MODE_PRECISION (mode));
2397 negcoeff1 = -negcoeff1;
2398 rhs = XEXP (rhs, 0);
2401 if (rtx_equal_p (lhs, rhs))
2403 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2404 rtx coeff;
2405 bool speed = optimize_function_for_speed_p (cfun);
2407 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2409 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2410 return (set_src_cost (tem, mode, speed)
2411 <= set_src_cost (orig, mode, speed) ? tem : 0);
2415 /* (a - (-b)) -> (a + b). True even for IEEE. */
2416 if (GET_CODE (op1) == NEG)
2417 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2419 /* (-x - c) may be simplified as (-c - x). */
2420 if (GET_CODE (op0) == NEG
2421 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2423 tem = simplify_unary_operation (NEG, mode, op1, mode);
2424 if (tem)
2425 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2428 /* Don't let a relocatable value get a negative coeff. */
2429 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2430 return simplify_gen_binary (PLUS, mode,
2431 op0,
2432 neg_const_int (mode, op1));
2434 /* (x - (x & y)) -> (x & ~y) */
2435 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2437 if (rtx_equal_p (op0, XEXP (op1, 0)))
2439 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2440 GET_MODE (XEXP (op1, 1)));
2441 return simplify_gen_binary (AND, mode, op0, tem);
2443 if (rtx_equal_p (op0, XEXP (op1, 1)))
2445 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2446 GET_MODE (XEXP (op1, 0)));
2447 return simplify_gen_binary (AND, mode, op0, tem);
2451 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2452 by reversing the comparison code if valid. */
2453 if (STORE_FLAG_VALUE == 1
2454 && trueop0 == const1_rtx
2455 && COMPARISON_P (op1)
2456 && (reversed = reversed_comparison (op1, mode)))
2457 return reversed;
2459 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2460 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2461 && GET_CODE (op1) == MULT
2462 && GET_CODE (XEXP (op1, 0)) == NEG)
2464 rtx in1, in2;
2466 in1 = XEXP (XEXP (op1, 0), 0);
2467 in2 = XEXP (op1, 1);
2468 return simplify_gen_binary (PLUS, mode,
2469 simplify_gen_binary (MULT, mode,
2470 in1, in2),
2471 op0);
2474 /* Canonicalize (minus (neg A) (mult B C)) to
2475 (minus (mult (neg B) C) A). */
2476 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2477 && GET_CODE (op1) == MULT
2478 && GET_CODE (op0) == NEG)
2480 rtx in1, in2;
2482 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2483 in2 = XEXP (op1, 1);
2484 return simplify_gen_binary (MINUS, mode,
2485 simplify_gen_binary (MULT, mode,
2486 in1, in2),
2487 XEXP (op0, 0));
2490 /* If one of the operands is a PLUS or a MINUS, see if we can
2491 simplify this by the associative law. This will, for example,
2492 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2493 Don't use the associative law for floating point.
2494 The inaccuracy makes it nonassociative,
2495 and subtle programs can break if operations are associated. */
2497 if (INTEGRAL_MODE_P (mode)
2498 && (plus_minus_operand_p (op0)
2499 || plus_minus_operand_p (op1))
2500 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2501 return tem;
2502 break;
2504 case MULT:
2505 if (trueop1 == constm1_rtx)
2506 return simplify_gen_unary (NEG, mode, op0, mode);
2508 if (GET_CODE (op0) == NEG)
2510 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2511 /* If op1 is a MULT as well and simplify_unary_operation
2512 just moved the NEG to the second operand, simplify_gen_binary
2513 below could through simplify_associative_operation move
2514 the NEG around again and recurse endlessly. */
2515 if (temp
2516 && GET_CODE (op1) == MULT
2517 && GET_CODE (temp) == MULT
2518 && XEXP (op1, 0) == XEXP (temp, 0)
2519 && GET_CODE (XEXP (temp, 1)) == NEG
2520 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2521 temp = NULL_RTX;
2522 if (temp)
2523 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2525 if (GET_CODE (op1) == NEG)
2527 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2528 /* If op0 is a MULT as well and simplify_unary_operation
2529 just moved the NEG to the second operand, simplify_gen_binary
2530 below could through simplify_associative_operation move
2531 the NEG around again and recurse endlessly. */
2532 if (temp
2533 && GET_CODE (op0) == MULT
2534 && GET_CODE (temp) == MULT
2535 && XEXP (op0, 0) == XEXP (temp, 0)
2536 && GET_CODE (XEXP (temp, 1)) == NEG
2537 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2538 temp = NULL_RTX;
2539 if (temp)
2540 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2543 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2544 x is NaN, since x * 0 is then also NaN. Nor is it valid
2545 when the mode has signed zeros, since multiplying a negative
2546 number by 0 will give -0, not 0. */
2547 if (!HONOR_NANS (mode)
2548 && !HONOR_SIGNED_ZEROS (mode)
2549 && trueop1 == CONST0_RTX (mode)
2550 && ! side_effects_p (op0))
2551 return op1;
2553 /* In IEEE floating point, x*1 is not equivalent to x for
2554 signalling NaNs. */
2555 if (!HONOR_SNANS (mode)
2556 && trueop1 == CONST1_RTX (mode))
2557 return op0;
2559 /* Convert multiply by constant power of two into shift. */
2560 if (CONST_SCALAR_INT_P (trueop1))
2562 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2563 if (val >= 0)
2564 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2567 /* x*2 is x+x and x*(-1) is -x */
2568 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2569 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2570 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2571 && GET_MODE (op0) == mode)
2573 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2575 if (real_equal (d1, &dconst2))
2576 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2578 if (!HONOR_SNANS (mode)
2579 && real_equal (d1, &dconstm1))
2580 return simplify_gen_unary (NEG, mode, op0, mode);
2583 /* Optimize -x * -x as x * x. */
2584 if (FLOAT_MODE_P (mode)
2585 && GET_CODE (op0) == NEG
2586 && GET_CODE (op1) == NEG
2587 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2588 && !side_effects_p (XEXP (op0, 0)))
2589 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2591 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2592 if (SCALAR_FLOAT_MODE_P (mode)
2593 && GET_CODE (op0) == ABS
2594 && GET_CODE (op1) == ABS
2595 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2596 && !side_effects_p (XEXP (op0, 0)))
2597 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2599 /* Reassociate multiplication, but for floating point MULTs
2600 only when the user specifies unsafe math optimizations. */
2601 if (! FLOAT_MODE_P (mode)
2602 || flag_unsafe_math_optimizations)
2604 tem = simplify_associative_operation (code, mode, op0, op1);
2605 if (tem)
2606 return tem;
2608 break;
2610 case IOR:
2611 if (trueop1 == CONST0_RTX (mode))
2612 return op0;
2613 if (INTEGRAL_MODE_P (mode)
2614 && trueop1 == CONSTM1_RTX (mode)
2615 && !side_effects_p (op0))
2616 return op1;
2617 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2618 return op0;
2619 /* A | (~A) -> -1 */
2620 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2621 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2622 && ! side_effects_p (op0)
2623 && SCALAR_INT_MODE_P (mode))
2624 return constm1_rtx;
2626 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2627 if (CONST_INT_P (op1)
2628 && HWI_COMPUTABLE_MODE_P (mode)
2629 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2630 && !side_effects_p (op0))
2631 return op1;
2633 /* Canonicalize (X & C1) | C2. */
2634 if (GET_CODE (op0) == AND
2635 && CONST_INT_P (trueop1)
2636 && CONST_INT_P (XEXP (op0, 1)))
2638 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2639 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2640 HOST_WIDE_INT c2 = INTVAL (trueop1);
2642 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2643 if ((c1 & c2) == c1
2644 && !side_effects_p (XEXP (op0, 0)))
2645 return trueop1;
2647 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2648 if (((c1|c2) & mask) == mask)
2649 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2651 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2652 if (((c1 & ~c2) & mask) != (c1 & mask))
2654 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2655 gen_int_mode (c1 & ~c2, mode));
2656 return simplify_gen_binary (IOR, mode, tem, op1);
2660 /* Convert (A & B) | A to A. */
2661 if (GET_CODE (op0) == AND
2662 && (rtx_equal_p (XEXP (op0, 0), op1)
2663 || rtx_equal_p (XEXP (op0, 1), op1))
2664 && ! side_effects_p (XEXP (op0, 0))
2665 && ! side_effects_p (XEXP (op0, 1)))
2666 return op1;
2668 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2669 mode size to (rotate A CX). */
2671 if (GET_CODE (op1) == ASHIFT
2672 || GET_CODE (op1) == SUBREG)
2674 opleft = op1;
2675 opright = op0;
2677 else
2679 opright = op1;
2680 opleft = op0;
2683 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2684 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2685 && CONST_INT_P (XEXP (opleft, 1))
2686 && CONST_INT_P (XEXP (opright, 1))
2687 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2688 == GET_MODE_PRECISION (mode)))
2689 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2691 /* Same, but for ashift that has been "simplified" to a wider mode
2692 by simplify_shift_const. */
2694 if (GET_CODE (opleft) == SUBREG
2695 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2696 && GET_CODE (opright) == LSHIFTRT
2697 && GET_CODE (XEXP (opright, 0)) == SUBREG
2698 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2699 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2700 && (GET_MODE_SIZE (GET_MODE (opleft))
2701 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2702 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2703 SUBREG_REG (XEXP (opright, 0)))
2704 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2705 && CONST_INT_P (XEXP (opright, 1))
2706 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2707 == GET_MODE_PRECISION (mode)))
2708 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2709 XEXP (SUBREG_REG (opleft), 1));
2711 /* If we have (ior (and (X C1) C2)), simplify this by making
2712 C1 as small as possible if C1 actually changes. */
2713 if (CONST_INT_P (op1)
2714 && (HWI_COMPUTABLE_MODE_P (mode)
2715 || INTVAL (op1) > 0)
2716 && GET_CODE (op0) == AND
2717 && CONST_INT_P (XEXP (op0, 1))
2718 && CONST_INT_P (op1)
2719 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2721 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2722 gen_int_mode (UINTVAL (XEXP (op0, 1))
2723 & ~UINTVAL (op1),
2724 mode));
2725 return simplify_gen_binary (IOR, mode, tmp, op1);
2728 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2729 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2730 the PLUS does not affect any of the bits in OP1: then we can do
2731 the IOR as a PLUS and we can associate. This is valid if OP1
2732 can be safely shifted left C bits. */
2733 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2734 && GET_CODE (XEXP (op0, 0)) == PLUS
2735 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2736 && CONST_INT_P (XEXP (op0, 1))
2737 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2739 int count = INTVAL (XEXP (op0, 1));
2740 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2742 if (mask >> count == INTVAL (trueop1)
2743 && trunc_int_for_mode (mask, mode) == mask
2744 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2745 return simplify_gen_binary (ASHIFTRT, mode,
2746 plus_constant (mode, XEXP (op0, 0),
2747 mask),
2748 XEXP (op0, 1));
2751 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2752 if (tem)
2753 return tem;
2755 tem = simplify_associative_operation (code, mode, op0, op1);
2756 if (tem)
2757 return tem;
2758 break;
2760 case XOR:
2761 if (trueop1 == CONST0_RTX (mode))
2762 return op0;
2763 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2764 return simplify_gen_unary (NOT, mode, op0, mode);
2765 if (rtx_equal_p (trueop0, trueop1)
2766 && ! side_effects_p (op0)
2767 && GET_MODE_CLASS (mode) != MODE_CC)
2768 return CONST0_RTX (mode);
2770 /* Canonicalize XOR of the most significant bit to PLUS. */
2771 if (CONST_SCALAR_INT_P (op1)
2772 && mode_signbit_p (mode, op1))
2773 return simplify_gen_binary (PLUS, mode, op0, op1);
2774 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2775 if (CONST_SCALAR_INT_P (op1)
2776 && GET_CODE (op0) == PLUS
2777 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2778 && mode_signbit_p (mode, XEXP (op0, 1)))
2779 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2780 simplify_gen_binary (XOR, mode, op1,
2781 XEXP (op0, 1)));
2783 /* If we are XORing two things that have no bits in common,
2784 convert them into an IOR. This helps to detect rotation encoded
2785 using those methods and possibly other simplifications. */
2787 if (HWI_COMPUTABLE_MODE_P (mode)
2788 && (nonzero_bits (op0, mode)
2789 & nonzero_bits (op1, mode)) == 0)
2790 return (simplify_gen_binary (IOR, mode, op0, op1));
2792 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2793 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2794 (NOT y). */
2796 int num_negated = 0;
2798 if (GET_CODE (op0) == NOT)
2799 num_negated++, op0 = XEXP (op0, 0);
2800 if (GET_CODE (op1) == NOT)
2801 num_negated++, op1 = XEXP (op1, 0);
2803 if (num_negated == 2)
2804 return simplify_gen_binary (XOR, mode, op0, op1);
2805 else if (num_negated == 1)
2806 return simplify_gen_unary (NOT, mode,
2807 simplify_gen_binary (XOR, mode, op0, op1),
2808 mode);
2811 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2812 correspond to a machine insn or result in further simplifications
2813 if B is a constant. */
2815 if (GET_CODE (op0) == AND
2816 && rtx_equal_p (XEXP (op0, 1), op1)
2817 && ! side_effects_p (op1))
2818 return simplify_gen_binary (AND, mode,
2819 simplify_gen_unary (NOT, mode,
2820 XEXP (op0, 0), mode),
2821 op1);
2823 else if (GET_CODE (op0) == AND
2824 && rtx_equal_p (XEXP (op0, 0), op1)
2825 && ! side_effects_p (op1))
2826 return simplify_gen_binary (AND, mode,
2827 simplify_gen_unary (NOT, mode,
2828 XEXP (op0, 1), mode),
2829 op1);
2831 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2832 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2833 out bits inverted twice and not set by C. Similarly, given
2834 (xor (and (xor A B) C) D), simplify without inverting C in
2835 the xor operand: (xor (and A C) (B&C)^D).
2837 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2838 && GET_CODE (XEXP (op0, 0)) == XOR
2839 && CONST_INT_P (op1)
2840 && CONST_INT_P (XEXP (op0, 1))
2841 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2843 enum rtx_code op = GET_CODE (op0);
2844 rtx a = XEXP (XEXP (op0, 0), 0);
2845 rtx b = XEXP (XEXP (op0, 0), 1);
2846 rtx c = XEXP (op0, 1);
2847 rtx d = op1;
2848 HOST_WIDE_INT bval = INTVAL (b);
2849 HOST_WIDE_INT cval = INTVAL (c);
2850 HOST_WIDE_INT dval = INTVAL (d);
2851 HOST_WIDE_INT xcval;
2853 if (op == IOR)
2854 xcval = ~cval;
2855 else
2856 xcval = cval;
2858 return simplify_gen_binary (XOR, mode,
2859 simplify_gen_binary (op, mode, a, c),
2860 gen_int_mode ((bval & xcval) ^ dval,
2861 mode));
2864 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2865 we can transform like this:
2866 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2867 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2868 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2869 Attempt a few simplifications when B and C are both constants. */
2870 if (GET_CODE (op0) == AND
2871 && CONST_INT_P (op1)
2872 && CONST_INT_P (XEXP (op0, 1)))
2874 rtx a = XEXP (op0, 0);
2875 rtx b = XEXP (op0, 1);
2876 rtx c = op1;
2877 HOST_WIDE_INT bval = INTVAL (b);
2878 HOST_WIDE_INT cval = INTVAL (c);
2880 /* Instead of computing ~A&C, we compute its negated value,
2881 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2882 optimize for sure. If it does not simplify, we still try
2883 to compute ~A&C below, but since that always allocates
2884 RTL, we don't try that before committing to returning a
2885 simplified expression. */
2886 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2887 GEN_INT (~cval));
2889 if ((~cval & bval) == 0)
2891 rtx na_c = NULL_RTX;
2892 if (n_na_c)
2893 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2894 else
2896 /* If ~A does not simplify, don't bother: we don't
2897 want to simplify 2 operations into 3, and if na_c
2898 were to simplify with na, n_na_c would have
2899 simplified as well. */
2900 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2901 if (na)
2902 na_c = simplify_gen_binary (AND, mode, na, c);
2905 /* Try to simplify ~A&C | ~B&C. */
2906 if (na_c != NULL_RTX)
2907 return simplify_gen_binary (IOR, mode, na_c,
2908 gen_int_mode (~bval & cval, mode));
2910 else
2912 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2913 if (n_na_c == CONSTM1_RTX (mode))
2915 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2916 gen_int_mode (~cval & bval,
2917 mode));
2918 return simplify_gen_binary (IOR, mode, a_nc_b,
2919 gen_int_mode (~bval & cval,
2920 mode));
2925 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
2926 do (ior (and A ~C) (and B C)) which is a machine instruction on some
2927 machines, and also has shorter instruction path length. */
2928 if (GET_CODE (op0) == AND
2929 && GET_CODE (XEXP (op0, 0)) == XOR
2930 && CONST_INT_P (XEXP (op0, 1))
2931 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
2933 rtx a = trueop1;
2934 rtx b = XEXP (XEXP (op0, 0), 1);
2935 rtx c = XEXP (op0, 1);
2936 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2937 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
2938 rtx bc = simplify_gen_binary (AND, mode, b, c);
2939 return simplify_gen_binary (IOR, mode, a_nc, bc);
2941 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
2942 else if (GET_CODE (op0) == AND
2943 && GET_CODE (XEXP (op0, 0)) == XOR
2944 && CONST_INT_P (XEXP (op0, 1))
2945 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
2947 rtx a = XEXP (XEXP (op0, 0), 0);
2948 rtx b = trueop1;
2949 rtx c = XEXP (op0, 1);
2950 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2951 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
2952 rtx ac = simplify_gen_binary (AND, mode, a, c);
2953 return simplify_gen_binary (IOR, mode, ac, b_nc);
2956 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2957 comparison if STORE_FLAG_VALUE is 1. */
2958 if (STORE_FLAG_VALUE == 1
2959 && trueop1 == const1_rtx
2960 && COMPARISON_P (op0)
2961 && (reversed = reversed_comparison (op0, mode)))
2962 return reversed;
2964 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2965 is (lt foo (const_int 0)), so we can perform the above
2966 simplification if STORE_FLAG_VALUE is 1. */
2968 if (STORE_FLAG_VALUE == 1
2969 && trueop1 == const1_rtx
2970 && GET_CODE (op0) == LSHIFTRT
2971 && CONST_INT_P (XEXP (op0, 1))
2972 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2973 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2975 /* (xor (comparison foo bar) (const_int sign-bit))
2976 when STORE_FLAG_VALUE is the sign bit. */
2977 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2978 && trueop1 == const_true_rtx
2979 && COMPARISON_P (op0)
2980 && (reversed = reversed_comparison (op0, mode)))
2981 return reversed;
2983 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2984 if (tem)
2985 return tem;
2987 tem = simplify_associative_operation (code, mode, op0, op1);
2988 if (tem)
2989 return tem;
2990 break;
2992 case AND:
2993 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2994 return trueop1;
2995 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2996 return op0;
2997 if (HWI_COMPUTABLE_MODE_P (mode))
2999 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3000 HOST_WIDE_INT nzop1;
3001 if (CONST_INT_P (trueop1))
3003 HOST_WIDE_INT val1 = INTVAL (trueop1);
3004 /* If we are turning off bits already known off in OP0, we need
3005 not do an AND. */
3006 if ((nzop0 & ~val1) == 0)
3007 return op0;
3009 nzop1 = nonzero_bits (trueop1, mode);
3010 /* If we are clearing all the nonzero bits, the result is zero. */
3011 if ((nzop1 & nzop0) == 0
3012 && !side_effects_p (op0) && !side_effects_p (op1))
3013 return CONST0_RTX (mode);
3015 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3016 && GET_MODE_CLASS (mode) != MODE_CC)
3017 return op0;
3018 /* A & (~A) -> 0 */
3019 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3020 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3021 && ! side_effects_p (op0)
3022 && GET_MODE_CLASS (mode) != MODE_CC)
3023 return CONST0_RTX (mode);
3025 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3026 there are no nonzero bits of C outside of X's mode. */
3027 if ((GET_CODE (op0) == SIGN_EXTEND
3028 || GET_CODE (op0) == ZERO_EXTEND)
3029 && CONST_INT_P (trueop1)
3030 && HWI_COMPUTABLE_MODE_P (mode)
3031 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3032 & UINTVAL (trueop1)) == 0)
3034 machine_mode imode = GET_MODE (XEXP (op0, 0));
3035 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3036 gen_int_mode (INTVAL (trueop1),
3037 imode));
3038 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3041 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3042 we might be able to further simplify the AND with X and potentially
3043 remove the truncation altogether. */
3044 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3046 rtx x = XEXP (op0, 0);
3047 machine_mode xmode = GET_MODE (x);
3048 tem = simplify_gen_binary (AND, xmode, x,
3049 gen_int_mode (INTVAL (trueop1), xmode));
3050 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3053 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3054 if (GET_CODE (op0) == IOR
3055 && CONST_INT_P (trueop1)
3056 && CONST_INT_P (XEXP (op0, 1)))
3058 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3059 return simplify_gen_binary (IOR, mode,
3060 simplify_gen_binary (AND, mode,
3061 XEXP (op0, 0), op1),
3062 gen_int_mode (tmp, mode));
3065 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3066 insn (and may simplify more). */
3067 if (GET_CODE (op0) == XOR
3068 && rtx_equal_p (XEXP (op0, 0), op1)
3069 && ! side_effects_p (op1))
3070 return simplify_gen_binary (AND, mode,
3071 simplify_gen_unary (NOT, mode,
3072 XEXP (op0, 1), mode),
3073 op1);
3075 if (GET_CODE (op0) == XOR
3076 && rtx_equal_p (XEXP (op0, 1), op1)
3077 && ! side_effects_p (op1))
3078 return simplify_gen_binary (AND, mode,
3079 simplify_gen_unary (NOT, mode,
3080 XEXP (op0, 0), mode),
3081 op1);
3083 /* Similarly for (~(A ^ B)) & A. */
3084 if (GET_CODE (op0) == NOT
3085 && GET_CODE (XEXP (op0, 0)) == XOR
3086 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3087 && ! side_effects_p (op1))
3088 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3090 if (GET_CODE (op0) == NOT
3091 && GET_CODE (XEXP (op0, 0)) == XOR
3092 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3093 && ! side_effects_p (op1))
3094 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3096 /* Convert (A | B) & A to A. */
3097 if (GET_CODE (op0) == IOR
3098 && (rtx_equal_p (XEXP (op0, 0), op1)
3099 || rtx_equal_p (XEXP (op0, 1), op1))
3100 && ! side_effects_p (XEXP (op0, 0))
3101 && ! side_effects_p (XEXP (op0, 1)))
3102 return op1;
3104 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3105 ((A & N) + B) & M -> (A + B) & M
3106 Similarly if (N & M) == 0,
3107 ((A | N) + B) & M -> (A + B) & M
3108 and for - instead of + and/or ^ instead of |.
3109 Also, if (N & M) == 0, then
3110 (A +- N) & M -> A & M. */
3111 if (CONST_INT_P (trueop1)
3112 && HWI_COMPUTABLE_MODE_P (mode)
3113 && ~UINTVAL (trueop1)
3114 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3115 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3117 rtx pmop[2];
3118 int which;
3120 pmop[0] = XEXP (op0, 0);
3121 pmop[1] = XEXP (op0, 1);
3123 if (CONST_INT_P (pmop[1])
3124 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3125 return simplify_gen_binary (AND, mode, pmop[0], op1);
3127 for (which = 0; which < 2; which++)
3129 tem = pmop[which];
3130 switch (GET_CODE (tem))
3132 case AND:
3133 if (CONST_INT_P (XEXP (tem, 1))
3134 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3135 == UINTVAL (trueop1))
3136 pmop[which] = XEXP (tem, 0);
3137 break;
3138 case IOR:
3139 case XOR:
3140 if (CONST_INT_P (XEXP (tem, 1))
3141 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3142 pmop[which] = XEXP (tem, 0);
3143 break;
3144 default:
3145 break;
3149 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3151 tem = simplify_gen_binary (GET_CODE (op0), mode,
3152 pmop[0], pmop[1]);
3153 return simplify_gen_binary (code, mode, tem, op1);
3157 /* (and X (ior (not X) Y) -> (and X Y) */
3158 if (GET_CODE (op1) == IOR
3159 && GET_CODE (XEXP (op1, 0)) == NOT
3160 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3161 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3163 /* (and (ior (not X) Y) X) -> (and X Y) */
3164 if (GET_CODE (op0) == IOR
3165 && GET_CODE (XEXP (op0, 0)) == NOT
3166 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3167 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3169 /* (and X (ior Y (not X)) -> (and X Y) */
3170 if (GET_CODE (op1) == IOR
3171 && GET_CODE (XEXP (op1, 1)) == NOT
3172 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3173 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3175 /* (and (ior Y (not X)) X) -> (and X Y) */
3176 if (GET_CODE (op0) == IOR
3177 && GET_CODE (XEXP (op0, 1)) == NOT
3178 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3179 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3181 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3182 if (tem)
3183 return tem;
3185 tem = simplify_associative_operation (code, mode, op0, op1);
3186 if (tem)
3187 return tem;
3188 break;
3190 case UDIV:
3191 /* 0/x is 0 (or x&0 if x has side-effects). */
3192 if (trueop0 == CONST0_RTX (mode))
3194 if (side_effects_p (op1))
3195 return simplify_gen_binary (AND, mode, op1, trueop0);
3196 return trueop0;
3198 /* x/1 is x. */
3199 if (trueop1 == CONST1_RTX (mode))
3201 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3202 if (tem)
3203 return tem;
3205 /* Convert divide by power of two into shift. */
3206 if (CONST_INT_P (trueop1)
3207 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3208 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3209 break;
3211 case DIV:
3212 /* Handle floating point and integers separately. */
3213 if (SCALAR_FLOAT_MODE_P (mode))
3215 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3216 safe for modes with NaNs, since 0.0 / 0.0 will then be
3217 NaN rather than 0.0. Nor is it safe for modes with signed
3218 zeros, since dividing 0 by a negative number gives -0.0 */
3219 if (trueop0 == CONST0_RTX (mode)
3220 && !HONOR_NANS (mode)
3221 && !HONOR_SIGNED_ZEROS (mode)
3222 && ! side_effects_p (op1))
3223 return op0;
3224 /* x/1.0 is x. */
3225 if (trueop1 == CONST1_RTX (mode)
3226 && !HONOR_SNANS (mode))
3227 return op0;
3229 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3230 && trueop1 != CONST0_RTX (mode))
3232 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3234 /* x/-1.0 is -x. */
3235 if (real_equal (d1, &dconstm1)
3236 && !HONOR_SNANS (mode))
3237 return simplify_gen_unary (NEG, mode, op0, mode);
3239 /* Change FP division by a constant into multiplication.
3240 Only do this with -freciprocal-math. */
3241 if (flag_reciprocal_math
3242 && !real_equal (d1, &dconst0))
3244 REAL_VALUE_TYPE d;
3245 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3246 tem = const_double_from_real_value (d, mode);
3247 return simplify_gen_binary (MULT, mode, op0, tem);
3251 else if (SCALAR_INT_MODE_P (mode))
3253 /* 0/x is 0 (or x&0 if x has side-effects). */
3254 if (trueop0 == CONST0_RTX (mode)
3255 && !cfun->can_throw_non_call_exceptions)
3257 if (side_effects_p (op1))
3258 return simplify_gen_binary (AND, mode, op1, trueop0);
3259 return trueop0;
3261 /* x/1 is x. */
3262 if (trueop1 == CONST1_RTX (mode))
3264 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3265 if (tem)
3266 return tem;
3268 /* x/-1 is -x. */
3269 if (trueop1 == constm1_rtx)
3271 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3272 if (x)
3273 return simplify_gen_unary (NEG, mode, x, mode);
3276 break;
3278 case UMOD:
3279 /* 0%x is 0 (or x&0 if x has side-effects). */
3280 if (trueop0 == CONST0_RTX (mode))
3282 if (side_effects_p (op1))
3283 return simplify_gen_binary (AND, mode, op1, trueop0);
3284 return trueop0;
3286 /* x%1 is 0 (of x&0 if x has side-effects). */
3287 if (trueop1 == CONST1_RTX (mode))
3289 if (side_effects_p (op0))
3290 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3291 return CONST0_RTX (mode);
3293 /* Implement modulus by power of two as AND. */
3294 if (CONST_INT_P (trueop1)
3295 && exact_log2 (UINTVAL (trueop1)) > 0)
3296 return simplify_gen_binary (AND, mode, op0,
3297 gen_int_mode (INTVAL (op1) - 1, mode));
3298 break;
3300 case MOD:
3301 /* 0%x is 0 (or x&0 if x has side-effects). */
3302 if (trueop0 == CONST0_RTX (mode))
3304 if (side_effects_p (op1))
3305 return simplify_gen_binary (AND, mode, op1, trueop0);
3306 return trueop0;
3308 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3309 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3311 if (side_effects_p (op0))
3312 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3313 return CONST0_RTX (mode);
3315 break;
3317 case ROTATERT:
3318 case ROTATE:
3319 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3320 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3321 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3322 amount instead. */
3323 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3324 if (CONST_INT_P (trueop1)
3325 && IN_RANGE (INTVAL (trueop1),
3326 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3327 GET_MODE_PRECISION (mode) - 1))
3328 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3329 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3330 - INTVAL (trueop1)));
3331 #endif
3332 /* FALLTHRU */
3333 case ASHIFTRT:
3334 if (trueop1 == CONST0_RTX (mode))
3335 return op0;
3336 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3337 return op0;
3338 /* Rotating ~0 always results in ~0. */
3339 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3340 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3341 && ! side_effects_p (op1))
3342 return op0;
3343 /* Given:
3344 scalar modes M1, M2
3345 scalar constants c1, c2
3346 size (M2) > size (M1)
3347 c1 == size (M2) - size (M1)
3348 optimize:
3349 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3350 <low_part>)
3351 (const_int <c2>))
3353 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3354 <low_part>). */
3355 if (code == ASHIFTRT
3356 && !VECTOR_MODE_P (mode)
3357 && SUBREG_P (op0)
3358 && CONST_INT_P (op1)
3359 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3360 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3361 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3362 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3363 > GET_MODE_BITSIZE (mode))
3364 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3365 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3366 - GET_MODE_BITSIZE (mode)))
3367 && subreg_lowpart_p (op0))
3369 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3370 + INTVAL (op1));
3371 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3372 tmp = simplify_gen_binary (ASHIFTRT,
3373 GET_MODE (SUBREG_REG (op0)),
3374 XEXP (SUBREG_REG (op0), 0),
3375 tmp);
3376 return lowpart_subreg (mode, tmp, inner_mode);
3378 canonicalize_shift:
3379 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3381 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3382 if (val != INTVAL (op1))
3383 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3385 break;
3387 case ASHIFT:
3388 case SS_ASHIFT:
3389 case US_ASHIFT:
3390 if (trueop1 == CONST0_RTX (mode))
3391 return op0;
3392 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3393 return op0;
3394 goto canonicalize_shift;
3396 case LSHIFTRT:
3397 if (trueop1 == CONST0_RTX (mode))
3398 return op0;
3399 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3400 return op0;
3401 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3402 if (GET_CODE (op0) == CLZ
3403 && CONST_INT_P (trueop1)
3404 && STORE_FLAG_VALUE == 1
3405 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3407 machine_mode imode = GET_MODE (XEXP (op0, 0));
3408 unsigned HOST_WIDE_INT zero_val = 0;
3410 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3411 && zero_val == GET_MODE_PRECISION (imode)
3412 && INTVAL (trueop1) == exact_log2 (zero_val))
3413 return simplify_gen_relational (EQ, mode, imode,
3414 XEXP (op0, 0), const0_rtx);
3416 goto canonicalize_shift;
3418 case SMIN:
3419 if (width <= HOST_BITS_PER_WIDE_INT
3420 && mode_signbit_p (mode, trueop1)
3421 && ! side_effects_p (op0))
3422 return op1;
3423 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3424 return op0;
3425 tem = simplify_associative_operation (code, mode, op0, op1);
3426 if (tem)
3427 return tem;
3428 break;
3430 case SMAX:
3431 if (width <= HOST_BITS_PER_WIDE_INT
3432 && CONST_INT_P (trueop1)
3433 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3434 && ! side_effects_p (op0))
3435 return op1;
3436 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3437 return op0;
3438 tem = simplify_associative_operation (code, mode, op0, op1);
3439 if (tem)
3440 return tem;
3441 break;
3443 case UMIN:
3444 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3445 return op1;
3446 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3447 return op0;
3448 tem = simplify_associative_operation (code, mode, op0, op1);
3449 if (tem)
3450 return tem;
3451 break;
3453 case UMAX:
3454 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3455 return op1;
3456 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3457 return op0;
3458 tem = simplify_associative_operation (code, mode, op0, op1);
3459 if (tem)
3460 return tem;
3461 break;
3463 case SS_PLUS:
3464 case US_PLUS:
3465 case SS_MINUS:
3466 case US_MINUS:
3467 case SS_MULT:
3468 case US_MULT:
3469 case SS_DIV:
3470 case US_DIV:
3471 /* ??? There are simplifications that can be done. */
3472 return 0;
3474 case VEC_SELECT:
3475 if (!VECTOR_MODE_P (mode))
3477 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3478 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3479 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3480 gcc_assert (XVECLEN (trueop1, 0) == 1);
3481 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3483 if (GET_CODE (trueop0) == CONST_VECTOR)
3484 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3485 (trueop1, 0, 0)));
3487 /* Extract a scalar element from a nested VEC_SELECT expression
3488 (with optional nested VEC_CONCAT expression). Some targets
3489 (i386) extract scalar element from a vector using chain of
3490 nested VEC_SELECT expressions. When input operand is a memory
3491 operand, this operation can be simplified to a simple scalar
3492 load from an offseted memory address. */
3493 if (GET_CODE (trueop0) == VEC_SELECT)
3495 rtx op0 = XEXP (trueop0, 0);
3496 rtx op1 = XEXP (trueop0, 1);
3498 machine_mode opmode = GET_MODE (op0);
3499 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3500 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3502 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3503 int elem;
3505 rtvec vec;
3506 rtx tmp_op, tmp;
3508 gcc_assert (GET_CODE (op1) == PARALLEL);
3509 gcc_assert (i < n_elts);
3511 /* Select element, pointed by nested selector. */
3512 elem = INTVAL (XVECEXP (op1, 0, i));
3514 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3515 if (GET_CODE (op0) == VEC_CONCAT)
3517 rtx op00 = XEXP (op0, 0);
3518 rtx op01 = XEXP (op0, 1);
3520 machine_mode mode00, mode01;
3521 int n_elts00, n_elts01;
3523 mode00 = GET_MODE (op00);
3524 mode01 = GET_MODE (op01);
3526 /* Find out number of elements of each operand. */
3527 if (VECTOR_MODE_P (mode00))
3529 elt_size = GET_MODE_UNIT_SIZE (mode00);
3530 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3532 else
3533 n_elts00 = 1;
3535 if (VECTOR_MODE_P (mode01))
3537 elt_size = GET_MODE_UNIT_SIZE (mode01);
3538 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3540 else
3541 n_elts01 = 1;
3543 gcc_assert (n_elts == n_elts00 + n_elts01);
3545 /* Select correct operand of VEC_CONCAT
3546 and adjust selector. */
3547 if (elem < n_elts01)
3548 tmp_op = op00;
3549 else
3551 tmp_op = op01;
3552 elem -= n_elts00;
3555 else
3556 tmp_op = op0;
3558 vec = rtvec_alloc (1);
3559 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3561 tmp = gen_rtx_fmt_ee (code, mode,
3562 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3563 return tmp;
3565 if (GET_CODE (trueop0) == VEC_DUPLICATE
3566 && GET_MODE (XEXP (trueop0, 0)) == mode)
3567 return XEXP (trueop0, 0);
3569 else
3571 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3572 gcc_assert (GET_MODE_INNER (mode)
3573 == GET_MODE_INNER (GET_MODE (trueop0)));
3574 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3576 if (GET_CODE (trueop0) == CONST_VECTOR)
3578 int elt_size = GET_MODE_UNIT_SIZE (mode);
3579 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3580 rtvec v = rtvec_alloc (n_elts);
3581 unsigned int i;
3583 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3584 for (i = 0; i < n_elts; i++)
3586 rtx x = XVECEXP (trueop1, 0, i);
3588 gcc_assert (CONST_INT_P (x));
3589 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3590 INTVAL (x));
3593 return gen_rtx_CONST_VECTOR (mode, v);
3596 /* Recognize the identity. */
3597 if (GET_MODE (trueop0) == mode)
3599 bool maybe_ident = true;
3600 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3602 rtx j = XVECEXP (trueop1, 0, i);
3603 if (!CONST_INT_P (j) || INTVAL (j) != i)
3605 maybe_ident = false;
3606 break;
3609 if (maybe_ident)
3610 return trueop0;
3613 /* If we build {a,b} then permute it, build the result directly. */
3614 if (XVECLEN (trueop1, 0) == 2
3615 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3616 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3617 && GET_CODE (trueop0) == VEC_CONCAT
3618 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3619 && GET_MODE (XEXP (trueop0, 0)) == mode
3620 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3621 && GET_MODE (XEXP (trueop0, 1)) == mode)
3623 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3624 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3625 rtx subop0, subop1;
3627 gcc_assert (i0 < 4 && i1 < 4);
3628 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3629 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3631 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3634 if (XVECLEN (trueop1, 0) == 2
3635 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3636 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3637 && GET_CODE (trueop0) == VEC_CONCAT
3638 && GET_MODE (trueop0) == mode)
3640 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3641 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3642 rtx subop0, subop1;
3644 gcc_assert (i0 < 2 && i1 < 2);
3645 subop0 = XEXP (trueop0, i0);
3646 subop1 = XEXP (trueop0, i1);
3648 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3651 /* If we select one half of a vec_concat, return that. */
3652 if (GET_CODE (trueop0) == VEC_CONCAT
3653 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3655 rtx subop0 = XEXP (trueop0, 0);
3656 rtx subop1 = XEXP (trueop0, 1);
3657 machine_mode mode0 = GET_MODE (subop0);
3658 machine_mode mode1 = GET_MODE (subop1);
3659 int li = GET_MODE_UNIT_SIZE (mode0);
3660 int l0 = GET_MODE_SIZE (mode0) / li;
3661 int l1 = GET_MODE_SIZE (mode1) / li;
3662 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3663 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3665 bool success = true;
3666 for (int i = 1; i < l0; ++i)
3668 rtx j = XVECEXP (trueop1, 0, i);
3669 if (!CONST_INT_P (j) || INTVAL (j) != i)
3671 success = false;
3672 break;
3675 if (success)
3676 return subop0;
3678 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3680 bool success = true;
3681 for (int i = 1; i < l1; ++i)
3683 rtx j = XVECEXP (trueop1, 0, i);
3684 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3686 success = false;
3687 break;
3690 if (success)
3691 return subop1;
3696 if (XVECLEN (trueop1, 0) == 1
3697 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3698 && GET_CODE (trueop0) == VEC_CONCAT)
3700 rtx vec = trueop0;
3701 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3703 /* Try to find the element in the VEC_CONCAT. */
3704 while (GET_MODE (vec) != mode
3705 && GET_CODE (vec) == VEC_CONCAT)
3707 HOST_WIDE_INT vec_size;
3709 if (CONST_INT_P (XEXP (vec, 0)))
3711 /* vec_concat of two const_ints doesn't make sense with
3712 respect to modes. */
3713 if (CONST_INT_P (XEXP (vec, 1)))
3714 return 0;
3716 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3717 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3719 else
3720 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3722 if (offset < vec_size)
3723 vec = XEXP (vec, 0);
3724 else
3726 offset -= vec_size;
3727 vec = XEXP (vec, 1);
3729 vec = avoid_constant_pool_reference (vec);
3732 if (GET_MODE (vec) == mode)
3733 return vec;
3736 /* If we select elements in a vec_merge that all come from the same
3737 operand, select from that operand directly. */
3738 if (GET_CODE (op0) == VEC_MERGE)
3740 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3741 if (CONST_INT_P (trueop02))
3743 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3744 bool all_operand0 = true;
3745 bool all_operand1 = true;
3746 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3748 rtx j = XVECEXP (trueop1, 0, i);
3749 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3750 all_operand1 = false;
3751 else
3752 all_operand0 = false;
3754 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3755 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3756 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3757 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3761 /* If we have two nested selects that are inverses of each
3762 other, replace them with the source operand. */
3763 if (GET_CODE (trueop0) == VEC_SELECT
3764 && GET_MODE (XEXP (trueop0, 0)) == mode)
3766 rtx op0_subop1 = XEXP (trueop0, 1);
3767 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3768 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3770 /* Apply the outer ordering vector to the inner one. (The inner
3771 ordering vector is expressly permitted to be of a different
3772 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3773 then the two VEC_SELECTs cancel. */
3774 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3776 rtx x = XVECEXP (trueop1, 0, i);
3777 if (!CONST_INT_P (x))
3778 return 0;
3779 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3780 if (!CONST_INT_P (y) || i != INTVAL (y))
3781 return 0;
3783 return XEXP (trueop0, 0);
3786 return 0;
3787 case VEC_CONCAT:
3789 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3790 ? GET_MODE (trueop0)
3791 : GET_MODE_INNER (mode));
3792 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3793 ? GET_MODE (trueop1)
3794 : GET_MODE_INNER (mode));
3796 gcc_assert (VECTOR_MODE_P (mode));
3797 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3798 == GET_MODE_SIZE (mode));
3800 if (VECTOR_MODE_P (op0_mode))
3801 gcc_assert (GET_MODE_INNER (mode)
3802 == GET_MODE_INNER (op0_mode));
3803 else
3804 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3806 if (VECTOR_MODE_P (op1_mode))
3807 gcc_assert (GET_MODE_INNER (mode)
3808 == GET_MODE_INNER (op1_mode));
3809 else
3810 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3812 if ((GET_CODE (trueop0) == CONST_VECTOR
3813 || CONST_SCALAR_INT_P (trueop0)
3814 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3815 && (GET_CODE (trueop1) == CONST_VECTOR
3816 || CONST_SCALAR_INT_P (trueop1)
3817 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3819 int elt_size = GET_MODE_UNIT_SIZE (mode);
3820 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3821 rtvec v = rtvec_alloc (n_elts);
3822 unsigned int i;
3823 unsigned in_n_elts = 1;
3825 if (VECTOR_MODE_P (op0_mode))
3826 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3827 for (i = 0; i < n_elts; i++)
3829 if (i < in_n_elts)
3831 if (!VECTOR_MODE_P (op0_mode))
3832 RTVEC_ELT (v, i) = trueop0;
3833 else
3834 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3836 else
3838 if (!VECTOR_MODE_P (op1_mode))
3839 RTVEC_ELT (v, i) = trueop1;
3840 else
3841 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3842 i - in_n_elts);
3846 return gen_rtx_CONST_VECTOR (mode, v);
3849 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3850 Restrict the transformation to avoid generating a VEC_SELECT with a
3851 mode unrelated to its operand. */
3852 if (GET_CODE (trueop0) == VEC_SELECT
3853 && GET_CODE (trueop1) == VEC_SELECT
3854 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3855 && GET_MODE (XEXP (trueop0, 0)) == mode)
3857 rtx par0 = XEXP (trueop0, 1);
3858 rtx par1 = XEXP (trueop1, 1);
3859 int len0 = XVECLEN (par0, 0);
3860 int len1 = XVECLEN (par1, 0);
3861 rtvec vec = rtvec_alloc (len0 + len1);
3862 for (int i = 0; i < len0; i++)
3863 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3864 for (int i = 0; i < len1; i++)
3865 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3866 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3867 gen_rtx_PARALLEL (VOIDmode, vec));
3870 return 0;
3872 default:
3873 gcc_unreachable ();
3876 return 0;
3880 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3881 rtx op0, rtx op1)
3883 unsigned int width = GET_MODE_PRECISION (mode);
3885 if (VECTOR_MODE_P (mode)
3886 && code != VEC_CONCAT
3887 && GET_CODE (op0) == CONST_VECTOR
3888 && GET_CODE (op1) == CONST_VECTOR)
3890 unsigned n_elts = GET_MODE_NUNITS (mode);
3891 machine_mode op0mode = GET_MODE (op0);
3892 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3893 machine_mode op1mode = GET_MODE (op1);
3894 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3895 rtvec v = rtvec_alloc (n_elts);
3896 unsigned int i;
3898 gcc_assert (op0_n_elts == n_elts);
3899 gcc_assert (op1_n_elts == n_elts);
3900 for (i = 0; i < n_elts; i++)
3902 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3903 CONST_VECTOR_ELT (op0, i),
3904 CONST_VECTOR_ELT (op1, i));
3905 if (!x)
3906 return 0;
3907 RTVEC_ELT (v, i) = x;
3910 return gen_rtx_CONST_VECTOR (mode, v);
3913 if (VECTOR_MODE_P (mode)
3914 && code == VEC_CONCAT
3915 && (CONST_SCALAR_INT_P (op0)
3916 || GET_CODE (op0) == CONST_FIXED
3917 || CONST_DOUBLE_AS_FLOAT_P (op0))
3918 && (CONST_SCALAR_INT_P (op1)
3919 || CONST_DOUBLE_AS_FLOAT_P (op1)
3920 || GET_CODE (op1) == CONST_FIXED))
3922 unsigned n_elts = GET_MODE_NUNITS (mode);
3923 rtvec v = rtvec_alloc (n_elts);
3925 gcc_assert (n_elts >= 2);
3926 if (n_elts == 2)
3928 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3929 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3931 RTVEC_ELT (v, 0) = op0;
3932 RTVEC_ELT (v, 1) = op1;
3934 else
3936 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3937 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3938 unsigned i;
3940 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3941 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3942 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3944 for (i = 0; i < op0_n_elts; ++i)
3945 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3946 for (i = 0; i < op1_n_elts; ++i)
3947 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3950 return gen_rtx_CONST_VECTOR (mode, v);
3953 if (SCALAR_FLOAT_MODE_P (mode)
3954 && CONST_DOUBLE_AS_FLOAT_P (op0)
3955 && CONST_DOUBLE_AS_FLOAT_P (op1)
3956 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3958 if (code == AND
3959 || code == IOR
3960 || code == XOR)
3962 long tmp0[4];
3963 long tmp1[4];
3964 REAL_VALUE_TYPE r;
3965 int i;
3967 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3968 GET_MODE (op0));
3969 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3970 GET_MODE (op1));
3971 for (i = 0; i < 4; i++)
3973 switch (code)
3975 case AND:
3976 tmp0[i] &= tmp1[i];
3977 break;
3978 case IOR:
3979 tmp0[i] |= tmp1[i];
3980 break;
3981 case XOR:
3982 tmp0[i] ^= tmp1[i];
3983 break;
3984 default:
3985 gcc_unreachable ();
3988 real_from_target (&r, tmp0, mode);
3989 return const_double_from_real_value (r, mode);
3991 else
3993 REAL_VALUE_TYPE f0, f1, value, result;
3994 const REAL_VALUE_TYPE *opr0, *opr1;
3995 bool inexact;
3997 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
3998 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4000 if (HONOR_SNANS (mode)
4001 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4002 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4003 return 0;
4005 real_convert (&f0, mode, opr0);
4006 real_convert (&f1, mode, opr1);
4008 if (code == DIV
4009 && real_equal (&f1, &dconst0)
4010 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4011 return 0;
4013 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4014 && flag_trapping_math
4015 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4017 int s0 = REAL_VALUE_NEGATIVE (f0);
4018 int s1 = REAL_VALUE_NEGATIVE (f1);
4020 switch (code)
4022 case PLUS:
4023 /* Inf + -Inf = NaN plus exception. */
4024 if (s0 != s1)
4025 return 0;
4026 break;
4027 case MINUS:
4028 /* Inf - Inf = NaN plus exception. */
4029 if (s0 == s1)
4030 return 0;
4031 break;
4032 case DIV:
4033 /* Inf / Inf = NaN plus exception. */
4034 return 0;
4035 default:
4036 break;
4040 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4041 && flag_trapping_math
4042 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4043 || (REAL_VALUE_ISINF (f1)
4044 && real_equal (&f0, &dconst0))))
4045 /* Inf * 0 = NaN plus exception. */
4046 return 0;
4048 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4049 &f0, &f1);
4050 real_convert (&result, mode, &value);
4052 /* Don't constant fold this floating point operation if
4053 the result has overflowed and flag_trapping_math. */
4055 if (flag_trapping_math
4056 && MODE_HAS_INFINITIES (mode)
4057 && REAL_VALUE_ISINF (result)
4058 && !REAL_VALUE_ISINF (f0)
4059 && !REAL_VALUE_ISINF (f1))
4060 /* Overflow plus exception. */
4061 return 0;
4063 /* Don't constant fold this floating point operation if the
4064 result may dependent upon the run-time rounding mode and
4065 flag_rounding_math is set, or if GCC's software emulation
4066 is unable to accurately represent the result. */
4068 if ((flag_rounding_math
4069 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4070 && (inexact || !real_identical (&result, &value)))
4071 return NULL_RTX;
4073 return const_double_from_real_value (result, mode);
4077 /* We can fold some multi-word operations. */
4078 if ((GET_MODE_CLASS (mode) == MODE_INT
4079 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
4080 && CONST_SCALAR_INT_P (op0)
4081 && CONST_SCALAR_INT_P (op1))
4083 wide_int result;
4084 bool overflow;
4085 rtx_mode_t pop0 = rtx_mode_t (op0, mode);
4086 rtx_mode_t pop1 = rtx_mode_t (op1, mode);
4088 #if TARGET_SUPPORTS_WIDE_INT == 0
4089 /* This assert keeps the simplification from producing a result
4090 that cannot be represented in a CONST_DOUBLE but a lot of
4091 upstream callers expect that this function never fails to
4092 simplify something and so you if you added this to the test
4093 above the code would die later anyway. If this assert
4094 happens, you just need to make the port support wide int. */
4095 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
4096 #endif
4097 switch (code)
4099 case MINUS:
4100 result = wi::sub (pop0, pop1);
4101 break;
4103 case PLUS:
4104 result = wi::add (pop0, pop1);
4105 break;
4107 case MULT:
4108 result = wi::mul (pop0, pop1);
4109 break;
4111 case DIV:
4112 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4113 if (overflow)
4114 return NULL_RTX;
4115 break;
4117 case MOD:
4118 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4119 if (overflow)
4120 return NULL_RTX;
4121 break;
4123 case UDIV:
4124 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4125 if (overflow)
4126 return NULL_RTX;
4127 break;
4129 case UMOD:
4130 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4131 if (overflow)
4132 return NULL_RTX;
4133 break;
4135 case AND:
4136 result = wi::bit_and (pop0, pop1);
4137 break;
4139 case IOR:
4140 result = wi::bit_or (pop0, pop1);
4141 break;
4143 case XOR:
4144 result = wi::bit_xor (pop0, pop1);
4145 break;
4147 case SMIN:
4148 result = wi::smin (pop0, pop1);
4149 break;
4151 case SMAX:
4152 result = wi::smax (pop0, pop1);
4153 break;
4155 case UMIN:
4156 result = wi::umin (pop0, pop1);
4157 break;
4159 case UMAX:
4160 result = wi::umax (pop0, pop1);
4161 break;
4163 case LSHIFTRT:
4164 case ASHIFTRT:
4165 case ASHIFT:
4167 wide_int wop1 = pop1;
4168 if (SHIFT_COUNT_TRUNCATED)
4169 wop1 = wi::umod_trunc (wop1, width);
4170 else if (wi::geu_p (wop1, width))
4171 return NULL_RTX;
4173 switch (code)
4175 case LSHIFTRT:
4176 result = wi::lrshift (pop0, wop1);
4177 break;
4179 case ASHIFTRT:
4180 result = wi::arshift (pop0, wop1);
4181 break;
4183 case ASHIFT:
4184 result = wi::lshift (pop0, wop1);
4185 break;
4187 default:
4188 gcc_unreachable ();
4190 break;
4192 case ROTATE:
4193 case ROTATERT:
4195 if (wi::neg_p (pop1))
4196 return NULL_RTX;
4198 switch (code)
4200 case ROTATE:
4201 result = wi::lrotate (pop0, pop1);
4202 break;
4204 case ROTATERT:
4205 result = wi::rrotate (pop0, pop1);
4206 break;
4208 default:
4209 gcc_unreachable ();
4211 break;
4213 default:
4214 return NULL_RTX;
4216 return immed_wide_int_const (result, mode);
4219 return NULL_RTX;
4224 /* Return a positive integer if X should sort after Y. The value
4225 returned is 1 if and only if X and Y are both regs. */
4227 static int
4228 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4230 int result;
4232 result = (commutative_operand_precedence (y)
4233 - commutative_operand_precedence (x));
4234 if (result)
4235 return result + result;
4237 /* Group together equal REGs to do more simplification. */
4238 if (REG_P (x) && REG_P (y))
4239 return REGNO (x) > REGNO (y);
4241 return 0;
4244 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4245 operands may be another PLUS or MINUS.
4247 Rather than test for specific case, we do this by a brute-force method
4248 and do all possible simplifications until no more changes occur. Then
4249 we rebuild the operation.
4251 May return NULL_RTX when no changes were made. */
4253 static rtx
4254 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4255 rtx op1)
4257 struct simplify_plus_minus_op_data
4259 rtx op;
4260 short neg;
4261 } ops[16];
4262 rtx result, tem;
4263 int n_ops = 2;
4264 int changed, n_constants, canonicalized = 0;
4265 int i, j;
4267 memset (ops, 0, sizeof ops);
4269 /* Set up the two operands and then expand them until nothing has been
4270 changed. If we run out of room in our array, give up; this should
4271 almost never happen. */
4273 ops[0].op = op0;
4274 ops[0].neg = 0;
4275 ops[1].op = op1;
4276 ops[1].neg = (code == MINUS);
4280 changed = 0;
4281 n_constants = 0;
4283 for (i = 0; i < n_ops; i++)
4285 rtx this_op = ops[i].op;
4286 int this_neg = ops[i].neg;
4287 enum rtx_code this_code = GET_CODE (this_op);
4289 switch (this_code)
4291 case PLUS:
4292 case MINUS:
4293 if (n_ops == ARRAY_SIZE (ops))
4294 return NULL_RTX;
4296 ops[n_ops].op = XEXP (this_op, 1);
4297 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4298 n_ops++;
4300 ops[i].op = XEXP (this_op, 0);
4301 changed = 1;
4302 /* If this operand was negated then we will potentially
4303 canonicalize the expression. Similarly if we don't
4304 place the operands adjacent we're re-ordering the
4305 expression and thus might be performing a
4306 canonicalization. Ignore register re-ordering.
4307 ??? It might be better to shuffle the ops array here,
4308 but then (plus (plus (A, B), plus (C, D))) wouldn't
4309 be seen as non-canonical. */
4310 if (this_neg
4311 || (i != n_ops - 2
4312 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4313 canonicalized = 1;
4314 break;
4316 case NEG:
4317 ops[i].op = XEXP (this_op, 0);
4318 ops[i].neg = ! this_neg;
4319 changed = 1;
4320 canonicalized = 1;
4321 break;
4323 case CONST:
4324 if (n_ops != ARRAY_SIZE (ops)
4325 && GET_CODE (XEXP (this_op, 0)) == PLUS
4326 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4327 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4329 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4330 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4331 ops[n_ops].neg = this_neg;
4332 n_ops++;
4333 changed = 1;
4334 canonicalized = 1;
4336 break;
4338 case NOT:
4339 /* ~a -> (-a - 1) */
4340 if (n_ops != ARRAY_SIZE (ops))
4342 ops[n_ops].op = CONSTM1_RTX (mode);
4343 ops[n_ops++].neg = this_neg;
4344 ops[i].op = XEXP (this_op, 0);
4345 ops[i].neg = !this_neg;
4346 changed = 1;
4347 canonicalized = 1;
4349 break;
4351 case CONST_INT:
4352 n_constants++;
4353 if (this_neg)
4355 ops[i].op = neg_const_int (mode, this_op);
4356 ops[i].neg = 0;
4357 changed = 1;
4358 canonicalized = 1;
4360 break;
4362 default:
4363 break;
4367 while (changed);
4369 if (n_constants > 1)
4370 canonicalized = 1;
4372 gcc_assert (n_ops >= 2);
4374 /* If we only have two operands, we can avoid the loops. */
4375 if (n_ops == 2)
4377 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4378 rtx lhs, rhs;
4380 /* Get the two operands. Be careful with the order, especially for
4381 the cases where code == MINUS. */
4382 if (ops[0].neg && ops[1].neg)
4384 lhs = gen_rtx_NEG (mode, ops[0].op);
4385 rhs = ops[1].op;
4387 else if (ops[0].neg)
4389 lhs = ops[1].op;
4390 rhs = ops[0].op;
4392 else
4394 lhs = ops[0].op;
4395 rhs = ops[1].op;
4398 return simplify_const_binary_operation (code, mode, lhs, rhs);
4401 /* Now simplify each pair of operands until nothing changes. */
4402 while (1)
4404 /* Insertion sort is good enough for a small array. */
4405 for (i = 1; i < n_ops; i++)
4407 struct simplify_plus_minus_op_data save;
4408 int cmp;
4410 j = i - 1;
4411 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4412 if (cmp <= 0)
4413 continue;
4414 /* Just swapping registers doesn't count as canonicalization. */
4415 if (cmp != 1)
4416 canonicalized = 1;
4418 save = ops[i];
4420 ops[j + 1] = ops[j];
4421 while (j--
4422 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4423 ops[j + 1] = save;
4426 changed = 0;
4427 for (i = n_ops - 1; i > 0; i--)
4428 for (j = i - 1; j >= 0; j--)
4430 rtx lhs = ops[j].op, rhs = ops[i].op;
4431 int lneg = ops[j].neg, rneg = ops[i].neg;
4433 if (lhs != 0 && rhs != 0)
4435 enum rtx_code ncode = PLUS;
4437 if (lneg != rneg)
4439 ncode = MINUS;
4440 if (lneg)
4441 std::swap (lhs, rhs);
4443 else if (swap_commutative_operands_p (lhs, rhs))
4444 std::swap (lhs, rhs);
4446 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4447 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4449 rtx tem_lhs, tem_rhs;
4451 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4452 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4453 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4454 tem_rhs);
4456 if (tem && !CONSTANT_P (tem))
4457 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4459 else
4460 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4462 if (tem)
4464 /* Reject "simplifications" that just wrap the two
4465 arguments in a CONST. Failure to do so can result
4466 in infinite recursion with simplify_binary_operation
4467 when it calls us to simplify CONST operations.
4468 Also, if we find such a simplification, don't try
4469 any more combinations with this rhs: We must have
4470 something like symbol+offset, ie. one of the
4471 trivial CONST expressions we handle later. */
4472 if (GET_CODE (tem) == CONST
4473 && GET_CODE (XEXP (tem, 0)) == ncode
4474 && XEXP (XEXP (tem, 0), 0) == lhs
4475 && XEXP (XEXP (tem, 0), 1) == rhs)
4476 break;
4477 lneg &= rneg;
4478 if (GET_CODE (tem) == NEG)
4479 tem = XEXP (tem, 0), lneg = !lneg;
4480 if (CONST_INT_P (tem) && lneg)
4481 tem = neg_const_int (mode, tem), lneg = 0;
4483 ops[i].op = tem;
4484 ops[i].neg = lneg;
4485 ops[j].op = NULL_RTX;
4486 changed = 1;
4487 canonicalized = 1;
4492 if (!changed)
4493 break;
4495 /* Pack all the operands to the lower-numbered entries. */
4496 for (i = 0, j = 0; j < n_ops; j++)
4497 if (ops[j].op)
4499 ops[i] = ops[j];
4500 i++;
4502 n_ops = i;
4505 /* If nothing changed, check that rematerialization of rtl instructions
4506 is still required. */
4507 if (!canonicalized)
4509 /* Perform rematerialization if only all operands are registers and
4510 all operations are PLUS. */
4511 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4512 around rs6000 and how it uses the CA register. See PR67145. */
4513 for (i = 0; i < n_ops; i++)
4514 if (ops[i].neg
4515 || !REG_P (ops[i].op)
4516 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4517 && fixed_regs[REGNO (ops[i].op)]
4518 && !global_regs[REGNO (ops[i].op)]
4519 && ops[i].op != frame_pointer_rtx
4520 && ops[i].op != arg_pointer_rtx
4521 && ops[i].op != stack_pointer_rtx))
4522 return NULL_RTX;
4523 goto gen_result;
4526 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4527 if (n_ops == 2
4528 && CONST_INT_P (ops[1].op)
4529 && CONSTANT_P (ops[0].op)
4530 && ops[0].neg)
4531 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4533 /* We suppressed creation of trivial CONST expressions in the
4534 combination loop to avoid recursion. Create one manually now.
4535 The combination loop should have ensured that there is exactly
4536 one CONST_INT, and the sort will have ensured that it is last
4537 in the array and that any other constant will be next-to-last. */
4539 if (n_ops > 1
4540 && CONST_INT_P (ops[n_ops - 1].op)
4541 && CONSTANT_P (ops[n_ops - 2].op))
4543 rtx value = ops[n_ops - 1].op;
4544 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4545 value = neg_const_int (mode, value);
4546 if (CONST_INT_P (value))
4548 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4549 INTVAL (value));
4550 n_ops--;
4554 /* Put a non-negated operand first, if possible. */
4556 for (i = 0; i < n_ops && ops[i].neg; i++)
4557 continue;
4558 if (i == n_ops)
4559 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4560 else if (i != 0)
4562 tem = ops[0].op;
4563 ops[0] = ops[i];
4564 ops[i].op = tem;
4565 ops[i].neg = 1;
4568 /* Now make the result by performing the requested operations. */
4569 gen_result:
4570 result = ops[0].op;
4571 for (i = 1; i < n_ops; i++)
4572 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4573 mode, result, ops[i].op);
4575 return result;
4578 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4579 static bool
4580 plus_minus_operand_p (const_rtx x)
4582 return GET_CODE (x) == PLUS
4583 || GET_CODE (x) == MINUS
4584 || (GET_CODE (x) == CONST
4585 && GET_CODE (XEXP (x, 0)) == PLUS
4586 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4587 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4590 /* Like simplify_binary_operation except used for relational operators.
4591 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4592 not also be VOIDmode.
4594 CMP_MODE specifies in which mode the comparison is done in, so it is
4595 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4596 the operands or, if both are VOIDmode, the operands are compared in
4597 "infinite precision". */
4599 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4600 machine_mode cmp_mode, rtx op0, rtx op1)
4602 rtx tem, trueop0, trueop1;
4604 if (cmp_mode == VOIDmode)
4605 cmp_mode = GET_MODE (op0);
4606 if (cmp_mode == VOIDmode)
4607 cmp_mode = GET_MODE (op1);
4609 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4610 if (tem)
4612 if (SCALAR_FLOAT_MODE_P (mode))
4614 if (tem == const0_rtx)
4615 return CONST0_RTX (mode);
4616 #ifdef FLOAT_STORE_FLAG_VALUE
4618 REAL_VALUE_TYPE val;
4619 val = FLOAT_STORE_FLAG_VALUE (mode);
4620 return const_double_from_real_value (val, mode);
4622 #else
4623 return NULL_RTX;
4624 #endif
4626 if (VECTOR_MODE_P (mode))
4628 if (tem == const0_rtx)
4629 return CONST0_RTX (mode);
4630 #ifdef VECTOR_STORE_FLAG_VALUE
4632 int i, units;
4633 rtvec v;
4635 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4636 if (val == NULL_RTX)
4637 return NULL_RTX;
4638 if (val == const1_rtx)
4639 return CONST1_RTX (mode);
4641 units = GET_MODE_NUNITS (mode);
4642 v = rtvec_alloc (units);
4643 for (i = 0; i < units; i++)
4644 RTVEC_ELT (v, i) = val;
4645 return gen_rtx_raw_CONST_VECTOR (mode, v);
4647 #else
4648 return NULL_RTX;
4649 #endif
4652 return tem;
4655 /* For the following tests, ensure const0_rtx is op1. */
4656 if (swap_commutative_operands_p (op0, op1)
4657 || (op0 == const0_rtx && op1 != const0_rtx))
4658 std::swap (op0, op1), code = swap_condition (code);
4660 /* If op0 is a compare, extract the comparison arguments from it. */
4661 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4662 return simplify_gen_relational (code, mode, VOIDmode,
4663 XEXP (op0, 0), XEXP (op0, 1));
4665 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4666 || CC0_P (op0))
4667 return NULL_RTX;
4669 trueop0 = avoid_constant_pool_reference (op0);
4670 trueop1 = avoid_constant_pool_reference (op1);
4671 return simplify_relational_operation_1 (code, mode, cmp_mode,
4672 trueop0, trueop1);
4675 /* This part of simplify_relational_operation is only used when CMP_MODE
4676 is not in class MODE_CC (i.e. it is a real comparison).
4678 MODE is the mode of the result, while CMP_MODE specifies in which
4679 mode the comparison is done in, so it is the mode of the operands. */
4681 static rtx
4682 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4683 machine_mode cmp_mode, rtx op0, rtx op1)
4685 enum rtx_code op0code = GET_CODE (op0);
4687 if (op1 == const0_rtx && COMPARISON_P (op0))
4689 /* If op0 is a comparison, extract the comparison arguments
4690 from it. */
4691 if (code == NE)
4693 if (GET_MODE (op0) == mode)
4694 return simplify_rtx (op0);
4695 else
4696 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4697 XEXP (op0, 0), XEXP (op0, 1));
4699 else if (code == EQ)
4701 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4702 if (new_code != UNKNOWN)
4703 return simplify_gen_relational (new_code, mode, VOIDmode,
4704 XEXP (op0, 0), XEXP (op0, 1));
4708 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4709 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4710 if ((code == LTU || code == GEU)
4711 && GET_CODE (op0) == PLUS
4712 && CONST_INT_P (XEXP (op0, 1))
4713 && (rtx_equal_p (op1, XEXP (op0, 0))
4714 || rtx_equal_p (op1, XEXP (op0, 1)))
4715 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4716 && XEXP (op0, 1) != const0_rtx)
4718 rtx new_cmp
4719 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4720 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4721 cmp_mode, XEXP (op0, 0), new_cmp);
4724 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4725 transformed into (LTU a -C). */
4726 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4727 && CONST_INT_P (XEXP (op0, 1))
4728 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4729 && XEXP (op0, 1) != const0_rtx)
4731 rtx new_cmp
4732 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4733 return simplify_gen_relational (LTU, mode, cmp_mode,
4734 XEXP (op0, 0), new_cmp);
4737 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4738 if ((code == LTU || code == GEU)
4739 && GET_CODE (op0) == PLUS
4740 && rtx_equal_p (op1, XEXP (op0, 1))
4741 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4742 && !rtx_equal_p (op1, XEXP (op0, 0)))
4743 return simplify_gen_relational (code, mode, cmp_mode, op0,
4744 copy_rtx (XEXP (op0, 0)));
4746 if (op1 == const0_rtx)
4748 /* Canonicalize (GTU x 0) as (NE x 0). */
4749 if (code == GTU)
4750 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4751 /* Canonicalize (LEU x 0) as (EQ x 0). */
4752 if (code == LEU)
4753 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4755 else if (op1 == const1_rtx)
4757 switch (code)
4759 case GE:
4760 /* Canonicalize (GE x 1) as (GT x 0). */
4761 return simplify_gen_relational (GT, mode, cmp_mode,
4762 op0, const0_rtx);
4763 case GEU:
4764 /* Canonicalize (GEU x 1) as (NE x 0). */
4765 return simplify_gen_relational (NE, mode, cmp_mode,
4766 op0, const0_rtx);
4767 case LT:
4768 /* Canonicalize (LT x 1) as (LE x 0). */
4769 return simplify_gen_relational (LE, mode, cmp_mode,
4770 op0, const0_rtx);
4771 case LTU:
4772 /* Canonicalize (LTU x 1) as (EQ x 0). */
4773 return simplify_gen_relational (EQ, mode, cmp_mode,
4774 op0, const0_rtx);
4775 default:
4776 break;
4779 else if (op1 == constm1_rtx)
4781 /* Canonicalize (LE x -1) as (LT x 0). */
4782 if (code == LE)
4783 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4784 /* Canonicalize (GT x -1) as (GE x 0). */
4785 if (code == GT)
4786 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4789 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4790 if ((code == EQ || code == NE)
4791 && (op0code == PLUS || op0code == MINUS)
4792 && CONSTANT_P (op1)
4793 && CONSTANT_P (XEXP (op0, 1))
4794 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4796 rtx x = XEXP (op0, 0);
4797 rtx c = XEXP (op0, 1);
4798 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4799 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4801 /* Detect an infinite recursive condition, where we oscillate at this
4802 simplification case between:
4803 A + B == C <---> C - B == A,
4804 where A, B, and C are all constants with non-simplifiable expressions,
4805 usually SYMBOL_REFs. */
4806 if (GET_CODE (tem) == invcode
4807 && CONSTANT_P (x)
4808 && rtx_equal_p (c, XEXP (tem, 1)))
4809 return NULL_RTX;
4811 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4814 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4815 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4816 if (code == NE
4817 && op1 == const0_rtx
4818 && GET_MODE_CLASS (mode) == MODE_INT
4819 && cmp_mode != VOIDmode
4820 /* ??? Work-around BImode bugs in the ia64 backend. */
4821 && mode != BImode
4822 && cmp_mode != BImode
4823 && nonzero_bits (op0, cmp_mode) == 1
4824 && STORE_FLAG_VALUE == 1)
4825 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4826 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4827 : lowpart_subreg (mode, op0, cmp_mode);
4829 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4830 if ((code == EQ || code == NE)
4831 && op1 == const0_rtx
4832 && op0code == XOR)
4833 return simplify_gen_relational (code, mode, cmp_mode,
4834 XEXP (op0, 0), XEXP (op0, 1));
4836 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4837 if ((code == EQ || code == NE)
4838 && op0code == XOR
4839 && rtx_equal_p (XEXP (op0, 0), op1)
4840 && !side_effects_p (XEXP (op0, 0)))
4841 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4842 CONST0_RTX (mode));
4844 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4845 if ((code == EQ || code == NE)
4846 && op0code == XOR
4847 && rtx_equal_p (XEXP (op0, 1), op1)
4848 && !side_effects_p (XEXP (op0, 1)))
4849 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4850 CONST0_RTX (mode));
4852 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4853 if ((code == EQ || code == NE)
4854 && op0code == XOR
4855 && CONST_SCALAR_INT_P (op1)
4856 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4857 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4858 simplify_gen_binary (XOR, cmp_mode,
4859 XEXP (op0, 1), op1));
4861 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4862 can be implemented with a BICS instruction on some targets, or
4863 constant-folded if y is a constant. */
4864 if ((code == EQ || code == NE)
4865 && op0code == AND
4866 && rtx_equal_p (XEXP (op0, 0), op1)
4867 && !side_effects_p (op1)
4868 && op1 != CONST0_RTX (cmp_mode))
4870 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4871 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4873 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4874 CONST0_RTX (cmp_mode));
4877 /* Likewise for (eq/ne (and x y) y). */
4878 if ((code == EQ || code == NE)
4879 && op0code == AND
4880 && rtx_equal_p (XEXP (op0, 1), op1)
4881 && !side_effects_p (op1)
4882 && op1 != CONST0_RTX (cmp_mode))
4884 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4885 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4887 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4888 CONST0_RTX (cmp_mode));
4891 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4892 if ((code == EQ || code == NE)
4893 && GET_CODE (op0) == BSWAP
4894 && CONST_SCALAR_INT_P (op1))
4895 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4896 simplify_gen_unary (BSWAP, cmp_mode,
4897 op1, cmp_mode));
4899 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4900 if ((code == EQ || code == NE)
4901 && GET_CODE (op0) == BSWAP
4902 && GET_CODE (op1) == BSWAP)
4903 return simplify_gen_relational (code, mode, cmp_mode,
4904 XEXP (op0, 0), XEXP (op1, 0));
4906 if (op0code == POPCOUNT && op1 == const0_rtx)
4907 switch (code)
4909 case EQ:
4910 case LE:
4911 case LEU:
4912 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4913 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4914 XEXP (op0, 0), const0_rtx);
4916 case NE:
4917 case GT:
4918 case GTU:
4919 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4920 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4921 XEXP (op0, 0), const0_rtx);
4923 default:
4924 break;
4927 return NULL_RTX;
4930 enum
4932 CMP_EQ = 1,
4933 CMP_LT = 2,
4934 CMP_GT = 4,
4935 CMP_LTU = 8,
4936 CMP_GTU = 16
4940 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4941 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4942 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4943 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4944 For floating-point comparisons, assume that the operands were ordered. */
4946 static rtx
4947 comparison_result (enum rtx_code code, int known_results)
4949 switch (code)
4951 case EQ:
4952 case UNEQ:
4953 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4954 case NE:
4955 case LTGT:
4956 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4958 case LT:
4959 case UNLT:
4960 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4961 case GE:
4962 case UNGE:
4963 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4965 case GT:
4966 case UNGT:
4967 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4968 case LE:
4969 case UNLE:
4970 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4972 case LTU:
4973 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4974 case GEU:
4975 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4977 case GTU:
4978 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4979 case LEU:
4980 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4982 case ORDERED:
4983 return const_true_rtx;
4984 case UNORDERED:
4985 return const0_rtx;
4986 default:
4987 gcc_unreachable ();
4991 /* Check if the given comparison (done in the given MODE) is actually
4992 a tautology or a contradiction. If the mode is VOID_mode, the
4993 comparison is done in "infinite precision". If no simplification
4994 is possible, this function returns zero. Otherwise, it returns
4995 either const_true_rtx or const0_rtx. */
4998 simplify_const_relational_operation (enum rtx_code code,
4999 machine_mode mode,
5000 rtx op0, rtx op1)
5002 rtx tem;
5003 rtx trueop0;
5004 rtx trueop1;
5006 gcc_assert (mode != VOIDmode
5007 || (GET_MODE (op0) == VOIDmode
5008 && GET_MODE (op1) == VOIDmode));
5010 /* If op0 is a compare, extract the comparison arguments from it. */
5011 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5013 op1 = XEXP (op0, 1);
5014 op0 = XEXP (op0, 0);
5016 if (GET_MODE (op0) != VOIDmode)
5017 mode = GET_MODE (op0);
5018 else if (GET_MODE (op1) != VOIDmode)
5019 mode = GET_MODE (op1);
5020 else
5021 return 0;
5024 /* We can't simplify MODE_CC values since we don't know what the
5025 actual comparison is. */
5026 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5027 return 0;
5029 /* Make sure the constant is second. */
5030 if (swap_commutative_operands_p (op0, op1))
5032 std::swap (op0, op1);
5033 code = swap_condition (code);
5036 trueop0 = avoid_constant_pool_reference (op0);
5037 trueop1 = avoid_constant_pool_reference (op1);
5039 /* For integer comparisons of A and B maybe we can simplify A - B and can
5040 then simplify a comparison of that with zero. If A and B are both either
5041 a register or a CONST_INT, this can't help; testing for these cases will
5042 prevent infinite recursion here and speed things up.
5044 We can only do this for EQ and NE comparisons as otherwise we may
5045 lose or introduce overflow which we cannot disregard as undefined as
5046 we do not know the signedness of the operation on either the left or
5047 the right hand side of the comparison. */
5049 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5050 && (code == EQ || code == NE)
5051 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5052 && (REG_P (op1) || CONST_INT_P (trueop1)))
5053 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
5054 /* We cannot do this if tem is a nonzero address. */
5055 && ! nonzero_address_p (tem))
5056 return simplify_const_relational_operation (signed_condition (code),
5057 mode, tem, const0_rtx);
5059 if (! HONOR_NANS (mode) && code == ORDERED)
5060 return const_true_rtx;
5062 if (! HONOR_NANS (mode) && code == UNORDERED)
5063 return const0_rtx;
5065 /* For modes without NaNs, if the two operands are equal, we know the
5066 result except if they have side-effects. Even with NaNs we know
5067 the result of unordered comparisons and, if signaling NaNs are
5068 irrelevant, also the result of LT/GT/LTGT. */
5069 if ((! HONOR_NANS (trueop0)
5070 || code == UNEQ || code == UNLE || code == UNGE
5071 || ((code == LT || code == GT || code == LTGT)
5072 && ! HONOR_SNANS (trueop0)))
5073 && rtx_equal_p (trueop0, trueop1)
5074 && ! side_effects_p (trueop0))
5075 return comparison_result (code, CMP_EQ);
5077 /* If the operands are floating-point constants, see if we can fold
5078 the result. */
5079 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5080 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5081 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5083 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5084 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5086 /* Comparisons are unordered iff at least one of the values is NaN. */
5087 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5088 switch (code)
5090 case UNEQ:
5091 case UNLT:
5092 case UNGT:
5093 case UNLE:
5094 case UNGE:
5095 case NE:
5096 case UNORDERED:
5097 return const_true_rtx;
5098 case EQ:
5099 case LT:
5100 case GT:
5101 case LE:
5102 case GE:
5103 case LTGT:
5104 case ORDERED:
5105 return const0_rtx;
5106 default:
5107 return 0;
5110 return comparison_result (code,
5111 (real_equal (d0, d1) ? CMP_EQ :
5112 real_less (d0, d1) ? CMP_LT : CMP_GT));
5115 /* Otherwise, see if the operands are both integers. */
5116 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5117 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5119 /* It would be nice if we really had a mode here. However, the
5120 largest int representable on the target is as good as
5121 infinite. */
5122 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5123 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5124 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5126 if (wi::eq_p (ptrueop0, ptrueop1))
5127 return comparison_result (code, CMP_EQ);
5128 else
5130 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5131 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5132 return comparison_result (code, cr);
5136 /* Optimize comparisons with upper and lower bounds. */
5137 if (HWI_COMPUTABLE_MODE_P (mode)
5138 && CONST_INT_P (trueop1)
5139 && !side_effects_p (trueop0))
5141 int sign;
5142 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5143 HOST_WIDE_INT val = INTVAL (trueop1);
5144 HOST_WIDE_INT mmin, mmax;
5146 if (code == GEU
5147 || code == LEU
5148 || code == GTU
5149 || code == LTU)
5150 sign = 0;
5151 else
5152 sign = 1;
5154 /* Get a reduced range if the sign bit is zero. */
5155 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5157 mmin = 0;
5158 mmax = nonzero;
5160 else
5162 rtx mmin_rtx, mmax_rtx;
5163 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5165 mmin = INTVAL (mmin_rtx);
5166 mmax = INTVAL (mmax_rtx);
5167 if (sign)
5169 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5171 mmin >>= (sign_copies - 1);
5172 mmax >>= (sign_copies - 1);
5176 switch (code)
5178 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5179 case GEU:
5180 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5181 return const_true_rtx;
5182 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5183 return const0_rtx;
5184 break;
5185 case GE:
5186 if (val <= mmin)
5187 return const_true_rtx;
5188 if (val > mmax)
5189 return const0_rtx;
5190 break;
5192 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5193 case LEU:
5194 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5195 return const_true_rtx;
5196 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5197 return const0_rtx;
5198 break;
5199 case LE:
5200 if (val >= mmax)
5201 return const_true_rtx;
5202 if (val < mmin)
5203 return const0_rtx;
5204 break;
5206 case EQ:
5207 /* x == y is always false for y out of range. */
5208 if (val < mmin || val > mmax)
5209 return const0_rtx;
5210 break;
5212 /* x > y is always false for y >= mmax, always true for y < mmin. */
5213 case GTU:
5214 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5215 return const0_rtx;
5216 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5217 return const_true_rtx;
5218 break;
5219 case GT:
5220 if (val >= mmax)
5221 return const0_rtx;
5222 if (val < mmin)
5223 return const_true_rtx;
5224 break;
5226 /* x < y is always false for y <= mmin, always true for y > mmax. */
5227 case LTU:
5228 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5229 return const0_rtx;
5230 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5231 return const_true_rtx;
5232 break;
5233 case LT:
5234 if (val <= mmin)
5235 return const0_rtx;
5236 if (val > mmax)
5237 return const_true_rtx;
5238 break;
5240 case NE:
5241 /* x != y is always true for y out of range. */
5242 if (val < mmin || val > mmax)
5243 return const_true_rtx;
5244 break;
5246 default:
5247 break;
5251 /* Optimize integer comparisons with zero. */
5252 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5254 /* Some addresses are known to be nonzero. We don't know
5255 their sign, but equality comparisons are known. */
5256 if (nonzero_address_p (trueop0))
5258 if (code == EQ || code == LEU)
5259 return const0_rtx;
5260 if (code == NE || code == GTU)
5261 return const_true_rtx;
5264 /* See if the first operand is an IOR with a constant. If so, we
5265 may be able to determine the result of this comparison. */
5266 if (GET_CODE (op0) == IOR)
5268 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5269 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5271 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5272 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5273 && (UINTVAL (inner_const)
5274 & (HOST_WIDE_INT_1U
5275 << sign_bitnum)));
5277 switch (code)
5279 case EQ:
5280 case LEU:
5281 return const0_rtx;
5282 case NE:
5283 case GTU:
5284 return const_true_rtx;
5285 case LT:
5286 case LE:
5287 if (has_sign)
5288 return const_true_rtx;
5289 break;
5290 case GT:
5291 case GE:
5292 if (has_sign)
5293 return const0_rtx;
5294 break;
5295 default:
5296 break;
5302 /* Optimize comparison of ABS with zero. */
5303 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5304 && (GET_CODE (trueop0) == ABS
5305 || (GET_CODE (trueop0) == FLOAT_EXTEND
5306 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5308 switch (code)
5310 case LT:
5311 /* Optimize abs(x) < 0.0. */
5312 if (!HONOR_SNANS (mode)
5313 && (!INTEGRAL_MODE_P (mode)
5314 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5316 if (INTEGRAL_MODE_P (mode)
5317 && (issue_strict_overflow_warning
5318 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5319 warning (OPT_Wstrict_overflow,
5320 ("assuming signed overflow does not occur when "
5321 "assuming abs (x) < 0 is false"));
5322 return const0_rtx;
5324 break;
5326 case GE:
5327 /* Optimize abs(x) >= 0.0. */
5328 if (!HONOR_NANS (mode)
5329 && (!INTEGRAL_MODE_P (mode)
5330 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5332 if (INTEGRAL_MODE_P (mode)
5333 && (issue_strict_overflow_warning
5334 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5335 warning (OPT_Wstrict_overflow,
5336 ("assuming signed overflow does not occur when "
5337 "assuming abs (x) >= 0 is true"));
5338 return const_true_rtx;
5340 break;
5342 case UNGE:
5343 /* Optimize ! (abs(x) < 0.0). */
5344 return const_true_rtx;
5346 default:
5347 break;
5351 return 0;
5354 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5355 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5356 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5357 can be simplified to that or NULL_RTX if not.
5358 Assume X is compared against zero with CMP_CODE and the true
5359 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5361 static rtx
5362 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5364 if (cmp_code != EQ && cmp_code != NE)
5365 return NULL_RTX;
5367 /* Result on X == 0 and X !=0 respectively. */
5368 rtx on_zero, on_nonzero;
5369 if (cmp_code == EQ)
5371 on_zero = true_val;
5372 on_nonzero = false_val;
5374 else
5376 on_zero = false_val;
5377 on_nonzero = true_val;
5380 rtx_code op_code = GET_CODE (on_nonzero);
5381 if ((op_code != CLZ && op_code != CTZ)
5382 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5383 || !CONST_INT_P (on_zero))
5384 return NULL_RTX;
5386 HOST_WIDE_INT op_val;
5387 if (((op_code == CLZ
5388 && CLZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero), op_val))
5389 || (op_code == CTZ
5390 && CTZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero), op_val)))
5391 && op_val == INTVAL (on_zero))
5392 return on_nonzero;
5394 return NULL_RTX;
5398 /* Simplify CODE, an operation with result mode MODE and three operands,
5399 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5400 a constant. Return 0 if no simplifications is possible. */
5403 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5404 machine_mode op0_mode, rtx op0, rtx op1,
5405 rtx op2)
5407 unsigned int width = GET_MODE_PRECISION (mode);
5408 bool any_change = false;
5409 rtx tem, trueop2;
5411 /* VOIDmode means "infinite" precision. */
5412 if (width == 0)
5413 width = HOST_BITS_PER_WIDE_INT;
5415 switch (code)
5417 case FMA:
5418 /* Simplify negations around the multiplication. */
5419 /* -a * -b + c => a * b + c. */
5420 if (GET_CODE (op0) == NEG)
5422 tem = simplify_unary_operation (NEG, mode, op1, mode);
5423 if (tem)
5424 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5426 else if (GET_CODE (op1) == NEG)
5428 tem = simplify_unary_operation (NEG, mode, op0, mode);
5429 if (tem)
5430 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5433 /* Canonicalize the two multiplication operands. */
5434 /* a * -b + c => -b * a + c. */
5435 if (swap_commutative_operands_p (op0, op1))
5436 std::swap (op0, op1), any_change = true;
5438 if (any_change)
5439 return gen_rtx_FMA (mode, op0, op1, op2);
5440 return NULL_RTX;
5442 case SIGN_EXTRACT:
5443 case ZERO_EXTRACT:
5444 if (CONST_INT_P (op0)
5445 && CONST_INT_P (op1)
5446 && CONST_INT_P (op2)
5447 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5448 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5450 /* Extracting a bit-field from a constant */
5451 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5452 HOST_WIDE_INT op1val = INTVAL (op1);
5453 HOST_WIDE_INT op2val = INTVAL (op2);
5454 if (BITS_BIG_ENDIAN)
5455 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5456 else
5457 val >>= op2val;
5459 if (HOST_BITS_PER_WIDE_INT != op1val)
5461 /* First zero-extend. */
5462 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5463 /* If desired, propagate sign bit. */
5464 if (code == SIGN_EXTRACT
5465 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5466 != 0)
5467 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5470 return gen_int_mode (val, mode);
5472 break;
5474 case IF_THEN_ELSE:
5475 if (CONST_INT_P (op0))
5476 return op0 != const0_rtx ? op1 : op2;
5478 /* Convert c ? a : a into "a". */
5479 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5480 return op1;
5482 /* Convert a != b ? a : b into "a". */
5483 if (GET_CODE (op0) == NE
5484 && ! side_effects_p (op0)
5485 && ! HONOR_NANS (mode)
5486 && ! HONOR_SIGNED_ZEROS (mode)
5487 && ((rtx_equal_p (XEXP (op0, 0), op1)
5488 && rtx_equal_p (XEXP (op0, 1), op2))
5489 || (rtx_equal_p (XEXP (op0, 0), op2)
5490 && rtx_equal_p (XEXP (op0, 1), op1))))
5491 return op1;
5493 /* Convert a == b ? a : b into "b". */
5494 if (GET_CODE (op0) == EQ
5495 && ! side_effects_p (op0)
5496 && ! HONOR_NANS (mode)
5497 && ! HONOR_SIGNED_ZEROS (mode)
5498 && ((rtx_equal_p (XEXP (op0, 0), op1)
5499 && rtx_equal_p (XEXP (op0, 1), op2))
5500 || (rtx_equal_p (XEXP (op0, 0), op2)
5501 && rtx_equal_p (XEXP (op0, 1), op1))))
5502 return op2;
5504 /* Convert (!c) != {0,...,0} ? a : b into
5505 c != {0,...,0} ? b : a for vector modes. */
5506 if (VECTOR_MODE_P (GET_MODE (op1))
5507 && GET_CODE (op0) == NE
5508 && GET_CODE (XEXP (op0, 0)) == NOT
5509 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5511 rtx cv = XEXP (op0, 1);
5512 int nunits = CONST_VECTOR_NUNITS (cv);
5513 bool ok = true;
5514 for (int i = 0; i < nunits; ++i)
5515 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5517 ok = false;
5518 break;
5520 if (ok)
5522 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5523 XEXP (XEXP (op0, 0), 0),
5524 XEXP (op0, 1));
5525 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5526 return retval;
5530 /* Convert x == 0 ? N : clz (x) into clz (x) when
5531 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5532 Similarly for ctz (x). */
5533 if (COMPARISON_P (op0) && !side_effects_p (op0)
5534 && XEXP (op0, 1) == const0_rtx)
5536 rtx simplified
5537 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5538 op1, op2);
5539 if (simplified)
5540 return simplified;
5543 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5545 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5546 ? GET_MODE (XEXP (op0, 1))
5547 : GET_MODE (XEXP (op0, 0)));
5548 rtx temp;
5550 /* Look for happy constants in op1 and op2. */
5551 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5553 HOST_WIDE_INT t = INTVAL (op1);
5554 HOST_WIDE_INT f = INTVAL (op2);
5556 if (t == STORE_FLAG_VALUE && f == 0)
5557 code = GET_CODE (op0);
5558 else if (t == 0 && f == STORE_FLAG_VALUE)
5560 enum rtx_code tmp;
5561 tmp = reversed_comparison_code (op0, NULL);
5562 if (tmp == UNKNOWN)
5563 break;
5564 code = tmp;
5566 else
5567 break;
5569 return simplify_gen_relational (code, mode, cmp_mode,
5570 XEXP (op0, 0), XEXP (op0, 1));
5573 if (cmp_mode == VOIDmode)
5574 cmp_mode = op0_mode;
5575 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5576 cmp_mode, XEXP (op0, 0),
5577 XEXP (op0, 1));
5579 /* See if any simplifications were possible. */
5580 if (temp)
5582 if (CONST_INT_P (temp))
5583 return temp == const0_rtx ? op2 : op1;
5584 else if (temp)
5585 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5588 break;
5590 case VEC_MERGE:
5591 gcc_assert (GET_MODE (op0) == mode);
5592 gcc_assert (GET_MODE (op1) == mode);
5593 gcc_assert (VECTOR_MODE_P (mode));
5594 trueop2 = avoid_constant_pool_reference (op2);
5595 if (CONST_INT_P (trueop2))
5597 int elt_size = GET_MODE_UNIT_SIZE (mode);
5598 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5599 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5600 unsigned HOST_WIDE_INT mask;
5601 if (n_elts == HOST_BITS_PER_WIDE_INT)
5602 mask = -1;
5603 else
5604 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5606 if (!(sel & mask) && !side_effects_p (op0))
5607 return op1;
5608 if ((sel & mask) == mask && !side_effects_p (op1))
5609 return op0;
5611 rtx trueop0 = avoid_constant_pool_reference (op0);
5612 rtx trueop1 = avoid_constant_pool_reference (op1);
5613 if (GET_CODE (trueop0) == CONST_VECTOR
5614 && GET_CODE (trueop1) == CONST_VECTOR)
5616 rtvec v = rtvec_alloc (n_elts);
5617 unsigned int i;
5619 for (i = 0; i < n_elts; i++)
5620 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5621 ? CONST_VECTOR_ELT (trueop0, i)
5622 : CONST_VECTOR_ELT (trueop1, i));
5623 return gen_rtx_CONST_VECTOR (mode, v);
5626 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5627 if no element from a appears in the result. */
5628 if (GET_CODE (op0) == VEC_MERGE)
5630 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5631 if (CONST_INT_P (tem))
5633 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5634 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5635 return simplify_gen_ternary (code, mode, mode,
5636 XEXP (op0, 1), op1, op2);
5637 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5638 return simplify_gen_ternary (code, mode, mode,
5639 XEXP (op0, 0), op1, op2);
5642 if (GET_CODE (op1) == VEC_MERGE)
5644 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5645 if (CONST_INT_P (tem))
5647 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5648 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5649 return simplify_gen_ternary (code, mode, mode,
5650 op0, XEXP (op1, 1), op2);
5651 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5652 return simplify_gen_ternary (code, mode, mode,
5653 op0, XEXP (op1, 0), op2);
5657 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5658 with a. */
5659 if (GET_CODE (op0) == VEC_DUPLICATE
5660 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5661 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5662 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5664 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5665 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5667 if (XEXP (XEXP (op0, 0), 0) == op1
5668 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5669 return op1;
5674 if (rtx_equal_p (op0, op1)
5675 && !side_effects_p (op2) && !side_effects_p (op1))
5676 return op0;
5678 break;
5680 default:
5681 gcc_unreachable ();
5684 return 0;
5687 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5688 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5689 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5691 Works by unpacking OP into a collection of 8-bit values
5692 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5693 and then repacking them again for OUTERMODE. */
5695 static rtx
5696 simplify_immed_subreg (machine_mode outermode, rtx op,
5697 machine_mode innermode, unsigned int byte)
5699 enum {
5700 value_bit = 8,
5701 value_mask = (1 << value_bit) - 1
5703 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5704 int value_start;
5705 int i;
5706 int elem;
5708 int num_elem;
5709 rtx * elems;
5710 int elem_bitsize;
5711 rtx result_s;
5712 rtvec result_v = NULL;
5713 enum mode_class outer_class;
5714 machine_mode outer_submode;
5715 int max_bitsize;
5717 /* Some ports misuse CCmode. */
5718 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5719 return op;
5721 /* We have no way to represent a complex constant at the rtl level. */
5722 if (COMPLEX_MODE_P (outermode))
5723 return NULL_RTX;
5725 /* We support any size mode. */
5726 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5727 GET_MODE_BITSIZE (innermode));
5729 /* Unpack the value. */
5731 if (GET_CODE (op) == CONST_VECTOR)
5733 num_elem = CONST_VECTOR_NUNITS (op);
5734 elems = &CONST_VECTOR_ELT (op, 0);
5735 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5737 else
5739 num_elem = 1;
5740 elems = &op;
5741 elem_bitsize = max_bitsize;
5743 /* If this asserts, it is too complicated; reducing value_bit may help. */
5744 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5745 /* I don't know how to handle endianness of sub-units. */
5746 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5748 for (elem = 0; elem < num_elem; elem++)
5750 unsigned char * vp;
5751 rtx el = elems[elem];
5753 /* Vectors are kept in target memory order. (This is probably
5754 a mistake.) */
5756 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5757 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5758 / BITS_PER_UNIT);
5759 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5760 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5761 unsigned bytele = (subword_byte % UNITS_PER_WORD
5762 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5763 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5766 switch (GET_CODE (el))
5768 case CONST_INT:
5769 for (i = 0;
5770 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5771 i += value_bit)
5772 *vp++ = INTVAL (el) >> i;
5773 /* CONST_INTs are always logically sign-extended. */
5774 for (; i < elem_bitsize; i += value_bit)
5775 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5776 break;
5778 case CONST_WIDE_INT:
5780 rtx_mode_t val = rtx_mode_t (el, innermode);
5781 unsigned char extend = wi::sign_mask (val);
5782 int prec = wi::get_precision (val);
5784 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
5785 *vp++ = wi::extract_uhwi (val, i, value_bit);
5786 for (; i < elem_bitsize; i += value_bit)
5787 *vp++ = extend;
5789 break;
5791 case CONST_DOUBLE:
5792 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5794 unsigned char extend = 0;
5795 /* If this triggers, someone should have generated a
5796 CONST_INT instead. */
5797 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5799 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5800 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5801 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5803 *vp++
5804 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5805 i += value_bit;
5808 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5809 extend = -1;
5810 for (; i < elem_bitsize; i += value_bit)
5811 *vp++ = extend;
5813 else
5815 /* This is big enough for anything on the platform. */
5816 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5817 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5819 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5820 gcc_assert (bitsize <= elem_bitsize);
5821 gcc_assert (bitsize % value_bit == 0);
5823 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5824 GET_MODE (el));
5826 /* real_to_target produces its result in words affected by
5827 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5828 and use WORDS_BIG_ENDIAN instead; see the documentation
5829 of SUBREG in rtl.texi. */
5830 for (i = 0; i < bitsize; i += value_bit)
5832 int ibase;
5833 if (WORDS_BIG_ENDIAN)
5834 ibase = bitsize - 1 - i;
5835 else
5836 ibase = i;
5837 *vp++ = tmp[ibase / 32] >> i % 32;
5840 /* It shouldn't matter what's done here, so fill it with
5841 zero. */
5842 for (; i < elem_bitsize; i += value_bit)
5843 *vp++ = 0;
5845 break;
5847 case CONST_FIXED:
5848 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5850 for (i = 0; i < elem_bitsize; i += value_bit)
5851 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5853 else
5855 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5856 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5857 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5858 i += value_bit)
5859 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5860 >> (i - HOST_BITS_PER_WIDE_INT);
5861 for (; i < elem_bitsize; i += value_bit)
5862 *vp++ = 0;
5864 break;
5866 default:
5867 gcc_unreachable ();
5871 /* Now, pick the right byte to start with. */
5872 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5873 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5874 will already have offset 0. */
5875 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5877 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5878 - byte);
5879 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5880 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5881 byte = (subword_byte % UNITS_PER_WORD
5882 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5885 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5886 so if it's become negative it will instead be very large.) */
5887 gcc_assert (byte < GET_MODE_SIZE (innermode));
5889 /* Convert from bytes to chunks of size value_bit. */
5890 value_start = byte * (BITS_PER_UNIT / value_bit);
5892 /* Re-pack the value. */
5893 num_elem = GET_MODE_NUNITS (outermode);
5895 if (VECTOR_MODE_P (outermode))
5897 result_v = rtvec_alloc (num_elem);
5898 elems = &RTVEC_ELT (result_v, 0);
5900 else
5901 elems = &result_s;
5903 outer_submode = GET_MODE_INNER (outermode);
5904 outer_class = GET_MODE_CLASS (outer_submode);
5905 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5907 gcc_assert (elem_bitsize % value_bit == 0);
5908 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5910 for (elem = 0; elem < num_elem; elem++)
5912 unsigned char *vp;
5914 /* Vectors are stored in target memory order. (This is probably
5915 a mistake.) */
5917 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5918 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5919 / BITS_PER_UNIT);
5920 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5921 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5922 unsigned bytele = (subword_byte % UNITS_PER_WORD
5923 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5924 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5927 switch (outer_class)
5929 case MODE_INT:
5930 case MODE_PARTIAL_INT:
5932 int u;
5933 int base = 0;
5934 int units
5935 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5936 / HOST_BITS_PER_WIDE_INT;
5937 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5938 wide_int r;
5940 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5941 return NULL_RTX;
5942 for (u = 0; u < units; u++)
5944 unsigned HOST_WIDE_INT buf = 0;
5945 for (i = 0;
5946 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5947 i += value_bit)
5948 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5950 tmp[u] = buf;
5951 base += HOST_BITS_PER_WIDE_INT;
5953 r = wide_int::from_array (tmp, units,
5954 GET_MODE_PRECISION (outer_submode));
5955 #if TARGET_SUPPORTS_WIDE_INT == 0
5956 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5957 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5958 return NULL_RTX;
5959 #endif
5960 elems[elem] = immed_wide_int_const (r, outer_submode);
5962 break;
5964 case MODE_FLOAT:
5965 case MODE_DECIMAL_FLOAT:
5967 REAL_VALUE_TYPE r;
5968 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
5970 /* real_from_target wants its input in words affected by
5971 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5972 and use WORDS_BIG_ENDIAN instead; see the documentation
5973 of SUBREG in rtl.texi. */
5974 for (i = 0; i < elem_bitsize; i += value_bit)
5976 int ibase;
5977 if (WORDS_BIG_ENDIAN)
5978 ibase = elem_bitsize - 1 - i;
5979 else
5980 ibase = i;
5981 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5984 real_from_target (&r, tmp, outer_submode);
5985 elems[elem] = const_double_from_real_value (r, outer_submode);
5987 break;
5989 case MODE_FRACT:
5990 case MODE_UFRACT:
5991 case MODE_ACCUM:
5992 case MODE_UACCUM:
5994 FIXED_VALUE_TYPE f;
5995 f.data.low = 0;
5996 f.data.high = 0;
5997 f.mode = outer_submode;
5999 for (i = 0;
6000 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6001 i += value_bit)
6002 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6003 for (; i < elem_bitsize; i += value_bit)
6004 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
6005 << (i - HOST_BITS_PER_WIDE_INT));
6007 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
6009 break;
6011 default:
6012 gcc_unreachable ();
6015 if (VECTOR_MODE_P (outermode))
6016 return gen_rtx_CONST_VECTOR (outermode, result_v);
6017 else
6018 return result_s;
6021 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6022 Return 0 if no simplifications are possible. */
6024 simplify_subreg (machine_mode outermode, rtx op,
6025 machine_mode innermode, unsigned int byte)
6027 /* Little bit of sanity checking. */
6028 gcc_assert (innermode != VOIDmode);
6029 gcc_assert (outermode != VOIDmode);
6030 gcc_assert (innermode != BLKmode);
6031 gcc_assert (outermode != BLKmode);
6033 gcc_assert (GET_MODE (op) == innermode
6034 || GET_MODE (op) == VOIDmode);
6036 if ((byte % GET_MODE_SIZE (outermode)) != 0)
6037 return NULL_RTX;
6039 if (byte >= GET_MODE_SIZE (innermode))
6040 return NULL_RTX;
6042 if (outermode == innermode && !byte)
6043 return op;
6045 if (CONST_SCALAR_INT_P (op)
6046 || CONST_DOUBLE_AS_FLOAT_P (op)
6047 || GET_CODE (op) == CONST_FIXED
6048 || GET_CODE (op) == CONST_VECTOR)
6049 return simplify_immed_subreg (outermode, op, innermode, byte);
6051 /* Changing mode twice with SUBREG => just change it once,
6052 or not at all if changing back op starting mode. */
6053 if (GET_CODE (op) == SUBREG)
6055 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6056 int final_offset = byte + SUBREG_BYTE (op);
6057 rtx newx;
6059 if (outermode == innermostmode
6060 && byte == 0 && SUBREG_BYTE (op) == 0)
6061 return SUBREG_REG (op);
6063 /* The SUBREG_BYTE represents offset, as if the value were stored
6064 in memory. Irritating exception is paradoxical subreg, where
6065 we define SUBREG_BYTE to be 0. On big endian machines, this
6066 value should be negative. For a moment, undo this exception. */
6067 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6069 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
6070 if (WORDS_BIG_ENDIAN)
6071 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6072 if (BYTES_BIG_ENDIAN)
6073 final_offset += difference % UNITS_PER_WORD;
6075 if (SUBREG_BYTE (op) == 0
6076 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
6078 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
6079 if (WORDS_BIG_ENDIAN)
6080 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6081 if (BYTES_BIG_ENDIAN)
6082 final_offset += difference % UNITS_PER_WORD;
6085 /* See whether resulting subreg will be paradoxical. */
6086 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
6088 /* In nonparadoxical subregs we can't handle negative offsets. */
6089 if (final_offset < 0)
6090 return NULL_RTX;
6091 /* Bail out in case resulting subreg would be incorrect. */
6092 if (final_offset % GET_MODE_SIZE (outermode)
6093 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
6094 return NULL_RTX;
6096 else
6098 int offset = 0;
6099 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
6101 /* In paradoxical subreg, see if we are still looking on lower part.
6102 If so, our SUBREG_BYTE will be 0. */
6103 if (WORDS_BIG_ENDIAN)
6104 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6105 if (BYTES_BIG_ENDIAN)
6106 offset += difference % UNITS_PER_WORD;
6107 if (offset == final_offset)
6108 final_offset = 0;
6109 else
6110 return NULL_RTX;
6113 /* Recurse for further possible simplifications. */
6114 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6115 final_offset);
6116 if (newx)
6117 return newx;
6118 if (validate_subreg (outermode, innermostmode,
6119 SUBREG_REG (op), final_offset))
6121 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6122 if (SUBREG_PROMOTED_VAR_P (op)
6123 && SUBREG_PROMOTED_SIGN (op) >= 0
6124 && GET_MODE_CLASS (outermode) == MODE_INT
6125 && IN_RANGE (GET_MODE_SIZE (outermode),
6126 GET_MODE_SIZE (innermode),
6127 GET_MODE_SIZE (innermostmode))
6128 && subreg_lowpart_p (newx))
6130 SUBREG_PROMOTED_VAR_P (newx) = 1;
6131 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6133 return newx;
6135 return NULL_RTX;
6138 /* SUBREG of a hard register => just change the register number
6139 and/or mode. If the hard register is not valid in that mode,
6140 suppress this simplification. If the hard register is the stack,
6141 frame, or argument pointer, leave this as a SUBREG. */
6143 if (REG_P (op) && HARD_REGISTER_P (op))
6145 unsigned int regno, final_regno;
6147 regno = REGNO (op);
6148 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6149 if (HARD_REGISTER_NUM_P (final_regno))
6151 rtx x;
6152 int final_offset = byte;
6154 /* Adjust offset for paradoxical subregs. */
6155 if (byte == 0
6156 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6158 int difference = (GET_MODE_SIZE (innermode)
6159 - GET_MODE_SIZE (outermode));
6160 if (WORDS_BIG_ENDIAN)
6161 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6162 if (BYTES_BIG_ENDIAN)
6163 final_offset += difference % UNITS_PER_WORD;
6166 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
6168 /* Propagate original regno. We don't have any way to specify
6169 the offset inside original regno, so do so only for lowpart.
6170 The information is used only by alias analysis that can not
6171 grog partial register anyway. */
6173 if (subreg_lowpart_offset (outermode, innermode) == byte)
6174 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6175 return x;
6179 /* If we have a SUBREG of a register that we are replacing and we are
6180 replacing it with a MEM, make a new MEM and try replacing the
6181 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6182 or if we would be widening it. */
6184 if (MEM_P (op)
6185 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6186 /* Allow splitting of volatile memory references in case we don't
6187 have instruction to move the whole thing. */
6188 && (! MEM_VOLATILE_P (op)
6189 || ! have_insn_for (SET, innermode))
6190 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6191 return adjust_address_nv (op, outermode, byte);
6193 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6194 of two parts. */
6195 if (GET_CODE (op) == CONCAT
6196 || GET_CODE (op) == VEC_CONCAT)
6198 unsigned int part_size, final_offset;
6199 rtx part, res;
6201 enum machine_mode part_mode = GET_MODE (XEXP (op, 0));
6202 if (part_mode == VOIDmode)
6203 part_mode = GET_MODE_INNER (GET_MODE (op));
6204 part_size = GET_MODE_SIZE (part_mode);
6205 if (byte < part_size)
6207 part = XEXP (op, 0);
6208 final_offset = byte;
6210 else
6212 part = XEXP (op, 1);
6213 final_offset = byte - part_size;
6216 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6217 return NULL_RTX;
6219 part_mode = GET_MODE (part);
6220 if (part_mode == VOIDmode)
6221 part_mode = GET_MODE_INNER (GET_MODE (op));
6222 res = simplify_subreg (outermode, part, part_mode, final_offset);
6223 if (res)
6224 return res;
6225 if (validate_subreg (outermode, part_mode, part, final_offset))
6226 return gen_rtx_SUBREG (outermode, part, final_offset);
6227 return NULL_RTX;
6230 /* A SUBREG resulting from a zero extension may fold to zero if
6231 it extracts higher bits that the ZERO_EXTEND's source bits. */
6232 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6234 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6235 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6236 return CONST0_RTX (outermode);
6239 if (SCALAR_INT_MODE_P (outermode)
6240 && SCALAR_INT_MODE_P (innermode)
6241 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6242 && byte == subreg_lowpart_offset (outermode, innermode))
6244 rtx tem = simplify_truncation (outermode, op, innermode);
6245 if (tem)
6246 return tem;
6249 return NULL_RTX;
6252 /* Make a SUBREG operation or equivalent if it folds. */
6255 simplify_gen_subreg (machine_mode outermode, rtx op,
6256 machine_mode innermode, unsigned int byte)
6258 rtx newx;
6260 newx = simplify_subreg (outermode, op, innermode, byte);
6261 if (newx)
6262 return newx;
6264 if (GET_CODE (op) == SUBREG
6265 || GET_CODE (op) == CONCAT
6266 || GET_MODE (op) == VOIDmode)
6267 return NULL_RTX;
6269 if (validate_subreg (outermode, innermode, op, byte))
6270 return gen_rtx_SUBREG (outermode, op, byte);
6272 return NULL_RTX;
6275 /* Generates a subreg to get the least significant part of EXPR (in mode
6276 INNER_MODE) to OUTER_MODE. */
6279 lowpart_subreg (machine_mode outer_mode, rtx expr,
6280 machine_mode inner_mode)
6282 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6283 subreg_lowpart_offset (outer_mode, inner_mode));
6286 /* Simplify X, an rtx expression.
6288 Return the simplified expression or NULL if no simplifications
6289 were possible.
6291 This is the preferred entry point into the simplification routines;
6292 however, we still allow passes to call the more specific routines.
6294 Right now GCC has three (yes, three) major bodies of RTL simplification
6295 code that need to be unified.
6297 1. fold_rtx in cse.c. This code uses various CSE specific
6298 information to aid in RTL simplification.
6300 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6301 it uses combine specific information to aid in RTL
6302 simplification.
6304 3. The routines in this file.
6307 Long term we want to only have one body of simplification code; to
6308 get to that state I recommend the following steps:
6310 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6311 which are not pass dependent state into these routines.
6313 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6314 use this routine whenever possible.
6316 3. Allow for pass dependent state to be provided to these
6317 routines and add simplifications based on the pass dependent
6318 state. Remove code from cse.c & combine.c that becomes
6319 redundant/dead.
6321 It will take time, but ultimately the compiler will be easier to
6322 maintain and improve. It's totally silly that when we add a
6323 simplification that it needs to be added to 4 places (3 for RTL
6324 simplification and 1 for tree simplification. */
6327 simplify_rtx (const_rtx x)
6329 const enum rtx_code code = GET_CODE (x);
6330 const machine_mode mode = GET_MODE (x);
6332 switch (GET_RTX_CLASS (code))
6334 case RTX_UNARY:
6335 return simplify_unary_operation (code, mode,
6336 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6337 case RTX_COMM_ARITH:
6338 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6339 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6341 /* Fall through. */
6343 case RTX_BIN_ARITH:
6344 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6346 case RTX_TERNARY:
6347 case RTX_BITFIELD_OPS:
6348 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6349 XEXP (x, 0), XEXP (x, 1),
6350 XEXP (x, 2));
6352 case RTX_COMPARE:
6353 case RTX_COMM_COMPARE:
6354 return simplify_relational_operation (code, mode,
6355 ((GET_MODE (XEXP (x, 0))
6356 != VOIDmode)
6357 ? GET_MODE (XEXP (x, 0))
6358 : GET_MODE (XEXP (x, 1))),
6359 XEXP (x, 0),
6360 XEXP (x, 1));
6362 case RTX_EXTRA:
6363 if (code == SUBREG)
6364 return simplify_subreg (mode, SUBREG_REG (x),
6365 GET_MODE (SUBREG_REG (x)),
6366 SUBREG_BYTE (x));
6367 break;
6369 case RTX_OBJ:
6370 if (code == LO_SUM)
6372 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6373 if (GET_CODE (XEXP (x, 0)) == HIGH
6374 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6375 return XEXP (x, 1);
6377 break;
6379 default:
6380 break;
6382 return NULL;