Fix ChangeLog entries for PR ipa/70306
[official-gcc.git] / gcc / simplify-rtx.c
blobe1a0319c26f1145f916422e0ad2c1941202212c3
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "optabs.h"
30 #include "emit-rtl.h"
31 #include "recog.h"
32 #include "diagnostic-core.h"
33 #include "varasm.h"
34 #include "flags.h"
36 /* Simplification and canonicalization of RTL. */
38 /* Much code operates on (low, high) pairs; the low value is an
39 unsigned wide int, the high value a signed wide int. We
40 occasionally need to sign extend from low to high as if low were a
41 signed wide int. */
42 #define HWI_SIGN_EXTEND(low) \
43 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
45 static rtx neg_const_int (machine_mode, const_rtx);
46 static bool plus_minus_operand_p (const_rtx);
47 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
48 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
49 unsigned int);
50 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
51 rtx, rtx);
52 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
53 machine_mode, rtx, rtx);
54 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
55 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
56 rtx, rtx, rtx, rtx);
58 /* Negate a CONST_INT rtx, truncating (because a conversion from a
59 maximally negative number can overflow). */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
63 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
66 /* Test whether expression, X, is an immediate constant that represents
67 the most significant bit of machine mode MODE. */
69 bool
70 mode_signbit_p (machine_mode mode, const_rtx x)
72 unsigned HOST_WIDE_INT val;
73 unsigned int width;
75 if (GET_MODE_CLASS (mode) != MODE_INT)
76 return false;
78 width = GET_MODE_PRECISION (mode);
79 if (width == 0)
80 return false;
82 if (width <= HOST_BITS_PER_WIDE_INT
83 && CONST_INT_P (x))
84 val = INTVAL (x);
85 #if TARGET_SUPPORTS_WIDE_INT
86 else if (CONST_WIDE_INT_P (x))
88 unsigned int i;
89 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
90 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
91 return false;
92 for (i = 0; i < elts - 1; i++)
93 if (CONST_WIDE_INT_ELT (x, i) != 0)
94 return false;
95 val = CONST_WIDE_INT_ELT (x, elts - 1);
96 width %= HOST_BITS_PER_WIDE_INT;
97 if (width == 0)
98 width = HOST_BITS_PER_WIDE_INT;
100 #else
101 else if (width <= HOST_BITS_PER_DOUBLE_INT
102 && CONST_DOUBLE_AS_INT_P (x)
103 && CONST_DOUBLE_LOW (x) == 0)
105 val = CONST_DOUBLE_HIGH (x);
106 width -= HOST_BITS_PER_WIDE_INT;
108 #endif
109 else
110 /* X is not an integer constant. */
111 return false;
113 if (width < HOST_BITS_PER_WIDE_INT)
114 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
115 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
118 /* Test whether VAL is equal to the most significant bit of mode MODE
119 (after masking with the mode mask of MODE). Returns false if the
120 precision of MODE is too large to handle. */
122 bool
123 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
125 unsigned int width;
127 if (GET_MODE_CLASS (mode) != MODE_INT)
128 return false;
130 width = GET_MODE_PRECISION (mode);
131 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
132 return false;
134 val &= GET_MODE_MASK (mode);
135 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
138 /* Test whether the most significant bit of mode MODE is set in VAL.
139 Returns false if the precision of MODE is too large to handle. */
140 bool
141 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
143 unsigned int width;
145 if (GET_MODE_CLASS (mode) != MODE_INT)
146 return false;
148 width = GET_MODE_PRECISION (mode);
149 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
150 return false;
152 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
153 return val != 0;
156 /* Test whether the most significant bit of mode MODE is clear in VAL.
157 Returns false if the precision of MODE is too large to handle. */
158 bool
159 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
161 unsigned int width;
163 if (GET_MODE_CLASS (mode) != MODE_INT)
164 return false;
166 width = GET_MODE_PRECISION (mode);
167 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
168 return false;
170 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
171 return val == 0;
174 /* Make a binary operation by properly ordering the operands and
175 seeing if the expression folds. */
178 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
179 rtx op1)
181 rtx tem;
183 /* If this simplifies, do it. */
184 tem = simplify_binary_operation (code, mode, op0, op1);
185 if (tem)
186 return tem;
188 /* Put complex operands first and constants second if commutative. */
189 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
190 && swap_commutative_operands_p (op0, op1))
191 std::swap (op0, op1);
193 return gen_rtx_fmt_ee (code, mode, op0, op1);
196 /* If X is a MEM referencing the constant pool, return the real value.
197 Otherwise return X. */
199 avoid_constant_pool_reference (rtx x)
201 rtx c, tmp, addr;
202 machine_mode cmode;
203 HOST_WIDE_INT offset = 0;
205 switch (GET_CODE (x))
207 case MEM:
208 break;
210 case FLOAT_EXTEND:
211 /* Handle float extensions of constant pool references. */
212 tmp = XEXP (x, 0);
213 c = avoid_constant_pool_reference (tmp);
214 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
215 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
216 GET_MODE (x));
217 return x;
219 default:
220 return x;
223 if (GET_MODE (x) == BLKmode)
224 return x;
226 addr = XEXP (x, 0);
228 /* Call target hook to avoid the effects of -fpic etc.... */
229 addr = targetm.delegitimize_address (addr);
231 /* Split the address into a base and integer offset. */
232 if (GET_CODE (addr) == CONST
233 && GET_CODE (XEXP (addr, 0)) == PLUS
234 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
236 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
237 addr = XEXP (XEXP (addr, 0), 0);
240 if (GET_CODE (addr) == LO_SUM)
241 addr = XEXP (addr, 1);
243 /* If this is a constant pool reference, we can turn it into its
244 constant and hope that simplifications happen. */
245 if (GET_CODE (addr) == SYMBOL_REF
246 && CONSTANT_POOL_ADDRESS_P (addr))
248 c = get_pool_constant (addr);
249 cmode = get_pool_mode (addr);
251 /* If we're accessing the constant in a different mode than it was
252 originally stored, attempt to fix that up via subreg simplifications.
253 If that fails we have no choice but to return the original memory. */
254 if ((offset != 0 || cmode != GET_MODE (x))
255 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
257 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
258 if (tem && CONSTANT_P (tem))
259 return tem;
261 else
262 return c;
265 return x;
268 /* Simplify a MEM based on its attributes. This is the default
269 delegitimize_address target hook, and it's recommended that every
270 overrider call it. */
273 delegitimize_mem_from_attrs (rtx x)
275 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
276 use their base addresses as equivalent. */
277 if (MEM_P (x)
278 && MEM_EXPR (x)
279 && MEM_OFFSET_KNOWN_P (x))
281 tree decl = MEM_EXPR (x);
282 machine_mode mode = GET_MODE (x);
283 HOST_WIDE_INT offset = 0;
285 switch (TREE_CODE (decl))
287 default:
288 decl = NULL;
289 break;
291 case VAR_DECL:
292 break;
294 case ARRAY_REF:
295 case ARRAY_RANGE_REF:
296 case COMPONENT_REF:
297 case BIT_FIELD_REF:
298 case REALPART_EXPR:
299 case IMAGPART_EXPR:
300 case VIEW_CONVERT_EXPR:
302 HOST_WIDE_INT bitsize, bitpos;
303 tree toffset;
304 int unsignedp, reversep, volatilep = 0;
306 decl
307 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
308 &unsignedp, &reversep, &volatilep, false);
309 if (bitsize != GET_MODE_BITSIZE (mode)
310 || (bitpos % BITS_PER_UNIT)
311 || (toffset && !tree_fits_shwi_p (toffset)))
312 decl = NULL;
313 else
315 offset += bitpos / BITS_PER_UNIT;
316 if (toffset)
317 offset += tree_to_shwi (toffset);
319 break;
323 if (decl
324 && mode == GET_MODE (x)
325 && TREE_CODE (decl) == VAR_DECL
326 && (TREE_STATIC (decl)
327 || DECL_THREAD_LOCAL_P (decl))
328 && DECL_RTL_SET_P (decl)
329 && MEM_P (DECL_RTL (decl)))
331 rtx newx;
333 offset += MEM_OFFSET (x);
335 newx = DECL_RTL (decl);
337 if (MEM_P (newx))
339 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
341 /* Avoid creating a new MEM needlessly if we already had
342 the same address. We do if there's no OFFSET and the
343 old address X is identical to NEWX, or if X is of the
344 form (plus NEWX OFFSET), or the NEWX is of the form
345 (plus Y (const_int Z)) and X is that with the offset
346 added: (plus Y (const_int Z+OFFSET)). */
347 if (!((offset == 0
348 || (GET_CODE (o) == PLUS
349 && GET_CODE (XEXP (o, 1)) == CONST_INT
350 && (offset == INTVAL (XEXP (o, 1))
351 || (GET_CODE (n) == PLUS
352 && GET_CODE (XEXP (n, 1)) == CONST_INT
353 && (INTVAL (XEXP (n, 1)) + offset
354 == INTVAL (XEXP (o, 1)))
355 && (n = XEXP (n, 0))))
356 && (o = XEXP (o, 0))))
357 && rtx_equal_p (o, n)))
358 x = adjust_address_nv (newx, mode, offset);
360 else if (GET_MODE (x) == GET_MODE (newx)
361 && offset == 0)
362 x = newx;
366 return x;
369 /* Make a unary operation by first seeing if it folds and otherwise making
370 the specified operation. */
373 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
374 machine_mode op_mode)
376 rtx tem;
378 /* If this simplifies, use it. */
379 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
380 return tem;
382 return gen_rtx_fmt_e (code, mode, op);
385 /* Likewise for ternary operations. */
388 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
389 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
391 rtx tem;
393 /* If this simplifies, use it. */
394 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
395 op0, op1, op2)))
396 return tem;
398 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
401 /* Likewise, for relational operations.
402 CMP_MODE specifies mode comparison is done in. */
405 simplify_gen_relational (enum rtx_code code, machine_mode mode,
406 machine_mode cmp_mode, rtx op0, rtx op1)
408 rtx tem;
410 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
411 op0, op1)))
412 return tem;
414 return gen_rtx_fmt_ee (code, mode, op0, op1);
417 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
418 and simplify the result. If FN is non-NULL, call this callback on each
419 X, if it returns non-NULL, replace X with its return value and simplify the
420 result. */
423 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
424 rtx (*fn) (rtx, const_rtx, void *), void *data)
426 enum rtx_code code = GET_CODE (x);
427 machine_mode mode = GET_MODE (x);
428 machine_mode op_mode;
429 const char *fmt;
430 rtx op0, op1, op2, newx, op;
431 rtvec vec, newvec;
432 int i, j;
434 if (__builtin_expect (fn != NULL, 0))
436 newx = fn (x, old_rtx, data);
437 if (newx)
438 return newx;
440 else if (rtx_equal_p (x, old_rtx))
441 return copy_rtx ((rtx) data);
443 switch (GET_RTX_CLASS (code))
445 case RTX_UNARY:
446 op0 = XEXP (x, 0);
447 op_mode = GET_MODE (op0);
448 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
449 if (op0 == XEXP (x, 0))
450 return x;
451 return simplify_gen_unary (code, mode, op0, op_mode);
453 case RTX_BIN_ARITH:
454 case RTX_COMM_ARITH:
455 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
456 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
457 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
458 return x;
459 return simplify_gen_binary (code, mode, op0, op1);
461 case RTX_COMPARE:
462 case RTX_COMM_COMPARE:
463 op0 = XEXP (x, 0);
464 op1 = XEXP (x, 1);
465 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
466 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
467 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
468 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
469 return x;
470 return simplify_gen_relational (code, mode, op_mode, op0, op1);
472 case RTX_TERNARY:
473 case RTX_BITFIELD_OPS:
474 op0 = XEXP (x, 0);
475 op_mode = GET_MODE (op0);
476 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
477 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
478 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
479 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
480 return x;
481 if (op_mode == VOIDmode)
482 op_mode = GET_MODE (op0);
483 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
485 case RTX_EXTRA:
486 if (code == SUBREG)
488 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
489 if (op0 == SUBREG_REG (x))
490 return x;
491 op0 = simplify_gen_subreg (GET_MODE (x), op0,
492 GET_MODE (SUBREG_REG (x)),
493 SUBREG_BYTE (x));
494 return op0 ? op0 : x;
496 break;
498 case RTX_OBJ:
499 if (code == MEM)
501 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
502 if (op0 == XEXP (x, 0))
503 return x;
504 return replace_equiv_address_nv (x, op0);
506 else if (code == LO_SUM)
508 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
509 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
511 /* (lo_sum (high x) y) -> y where x and y have the same base. */
512 if (GET_CODE (op0) == HIGH)
514 rtx base0, base1, offset0, offset1;
515 split_const (XEXP (op0, 0), &base0, &offset0);
516 split_const (op1, &base1, &offset1);
517 if (rtx_equal_p (base0, base1))
518 return op1;
521 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
522 return x;
523 return gen_rtx_LO_SUM (mode, op0, op1);
525 break;
527 default:
528 break;
531 newx = x;
532 fmt = GET_RTX_FORMAT (code);
533 for (i = 0; fmt[i]; i++)
534 switch (fmt[i])
536 case 'E':
537 vec = XVEC (x, i);
538 newvec = XVEC (newx, i);
539 for (j = 0; j < GET_NUM_ELEM (vec); j++)
541 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
542 old_rtx, fn, data);
543 if (op != RTVEC_ELT (vec, j))
545 if (newvec == vec)
547 newvec = shallow_copy_rtvec (vec);
548 if (x == newx)
549 newx = shallow_copy_rtx (x);
550 XVEC (newx, i) = newvec;
552 RTVEC_ELT (newvec, j) = op;
555 break;
557 case 'e':
558 if (XEXP (x, i))
560 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
561 if (op != XEXP (x, i))
563 if (x == newx)
564 newx = shallow_copy_rtx (x);
565 XEXP (newx, i) = op;
568 break;
570 return newx;
573 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
574 resulting RTX. Return a new RTX which is as simplified as possible. */
577 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
579 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
582 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
583 Only handle cases where the truncated value is inherently an rvalue.
585 RTL provides two ways of truncating a value:
587 1. a lowpart subreg. This form is only a truncation when both
588 the outer and inner modes (here MODE and OP_MODE respectively)
589 are scalar integers, and only then when the subreg is used as
590 an rvalue.
592 It is only valid to form such truncating subregs if the
593 truncation requires no action by the target. The onus for
594 proving this is on the creator of the subreg -- e.g. the
595 caller to simplify_subreg or simplify_gen_subreg -- and typically
596 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
598 2. a TRUNCATE. This form handles both scalar and compound integers.
600 The first form is preferred where valid. However, the TRUNCATE
601 handling in simplify_unary_operation turns the second form into the
602 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
603 so it is generally safe to form rvalue truncations using:
605 simplify_gen_unary (TRUNCATE, ...)
607 and leave simplify_unary_operation to work out which representation
608 should be used.
610 Because of the proof requirements on (1), simplify_truncation must
611 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
612 regardless of whether the outer truncation came from a SUBREG or a
613 TRUNCATE. For example, if the caller has proven that an SImode
614 truncation of:
616 (and:DI X Y)
618 is a no-op and can be represented as a subreg, it does not follow
619 that SImode truncations of X and Y are also no-ops. On a target
620 like 64-bit MIPS that requires SImode values to be stored in
621 sign-extended form, an SImode truncation of:
623 (and:DI (reg:DI X) (const_int 63))
625 is trivially a no-op because only the lower 6 bits can be set.
626 However, X is still an arbitrary 64-bit number and so we cannot
627 assume that truncating it too is a no-op. */
629 static rtx
630 simplify_truncation (machine_mode mode, rtx op,
631 machine_mode op_mode)
633 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
634 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
635 gcc_assert (precision <= op_precision);
637 /* Optimize truncations of zero and sign extended values. */
638 if (GET_CODE (op) == ZERO_EXTEND
639 || GET_CODE (op) == SIGN_EXTEND)
641 /* There are three possibilities. If MODE is the same as the
642 origmode, we can omit both the extension and the subreg.
643 If MODE is not larger than the origmode, we can apply the
644 truncation without the extension. Finally, if the outermode
645 is larger than the origmode, we can just extend to the appropriate
646 mode. */
647 machine_mode origmode = GET_MODE (XEXP (op, 0));
648 if (mode == origmode)
649 return XEXP (op, 0);
650 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
651 return simplify_gen_unary (TRUNCATE, mode,
652 XEXP (op, 0), origmode);
653 else
654 return simplify_gen_unary (GET_CODE (op), mode,
655 XEXP (op, 0), origmode);
658 /* If the machine can perform operations in the truncated mode, distribute
659 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
660 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
661 if (1
662 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
663 && (GET_CODE (op) == PLUS
664 || GET_CODE (op) == MINUS
665 || GET_CODE (op) == MULT))
667 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
668 if (op0)
670 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
671 if (op1)
672 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
676 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
677 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
678 the outer subreg is effectively a truncation to the original mode. */
679 if ((GET_CODE (op) == LSHIFTRT
680 || GET_CODE (op) == ASHIFTRT)
681 /* Ensure that OP_MODE is at least twice as wide as MODE
682 to avoid the possibility that an outer LSHIFTRT shifts by more
683 than the sign extension's sign_bit_copies and introduces zeros
684 into the high bits of the result. */
685 && 2 * precision <= op_precision
686 && CONST_INT_P (XEXP (op, 1))
687 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
688 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
689 && UINTVAL (XEXP (op, 1)) < precision)
690 return simplify_gen_binary (ASHIFTRT, mode,
691 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
693 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
694 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
695 the outer subreg is effectively a truncation to the original mode. */
696 if ((GET_CODE (op) == LSHIFTRT
697 || GET_CODE (op) == ASHIFTRT)
698 && CONST_INT_P (XEXP (op, 1))
699 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
700 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
701 && UINTVAL (XEXP (op, 1)) < precision)
702 return simplify_gen_binary (LSHIFTRT, mode,
703 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
705 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
706 to (ashift:QI (x:QI) C), where C is a suitable small constant and
707 the outer subreg is effectively a truncation to the original mode. */
708 if (GET_CODE (op) == ASHIFT
709 && CONST_INT_P (XEXP (op, 1))
710 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
711 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
712 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
713 && UINTVAL (XEXP (op, 1)) < precision)
714 return simplify_gen_binary (ASHIFT, mode,
715 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
717 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
718 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
719 and C2. */
720 if (GET_CODE (op) == AND
721 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
722 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
723 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
724 && CONST_INT_P (XEXP (op, 1)))
726 rtx op0 = (XEXP (XEXP (op, 0), 0));
727 rtx shift_op = XEXP (XEXP (op, 0), 1);
728 rtx mask_op = XEXP (op, 1);
729 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
730 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
732 if (shift < precision
733 /* If doing this transform works for an X with all bits set,
734 it works for any X. */
735 && ((GET_MODE_MASK (mode) >> shift) & mask)
736 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
737 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
738 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
740 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
741 return simplify_gen_binary (AND, mode, op0, mask_op);
745 /* Recognize a word extraction from a multi-word subreg. */
746 if ((GET_CODE (op) == LSHIFTRT
747 || GET_CODE (op) == ASHIFTRT)
748 && SCALAR_INT_MODE_P (mode)
749 && SCALAR_INT_MODE_P (op_mode)
750 && precision >= BITS_PER_WORD
751 && 2 * precision <= op_precision
752 && CONST_INT_P (XEXP (op, 1))
753 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
754 && UINTVAL (XEXP (op, 1)) < op_precision)
756 int byte = subreg_lowpart_offset (mode, op_mode);
757 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
758 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
759 (WORDS_BIG_ENDIAN
760 ? byte - shifted_bytes
761 : byte + shifted_bytes));
764 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
765 and try replacing the TRUNCATE and shift with it. Don't do this
766 if the MEM has a mode-dependent address. */
767 if ((GET_CODE (op) == LSHIFTRT
768 || GET_CODE (op) == ASHIFTRT)
769 && SCALAR_INT_MODE_P (op_mode)
770 && MEM_P (XEXP (op, 0))
771 && CONST_INT_P (XEXP (op, 1))
772 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
773 && INTVAL (XEXP (op, 1)) > 0
774 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
775 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
776 MEM_ADDR_SPACE (XEXP (op, 0)))
777 && ! MEM_VOLATILE_P (XEXP (op, 0))
778 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
779 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
781 int byte = subreg_lowpart_offset (mode, op_mode);
782 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
783 return adjust_address_nv (XEXP (op, 0), mode,
784 (WORDS_BIG_ENDIAN
785 ? byte - shifted_bytes
786 : byte + shifted_bytes));
789 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
790 (OP:SI foo:SI) if OP is NEG or ABS. */
791 if ((GET_CODE (op) == ABS
792 || GET_CODE (op) == NEG)
793 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
794 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
795 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
796 return simplify_gen_unary (GET_CODE (op), mode,
797 XEXP (XEXP (op, 0), 0), mode);
799 /* (truncate:A (subreg:B (truncate:C X) 0)) is
800 (truncate:A X). */
801 if (GET_CODE (op) == SUBREG
802 && SCALAR_INT_MODE_P (mode)
803 && SCALAR_INT_MODE_P (op_mode)
804 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
805 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
806 && subreg_lowpart_p (op))
808 rtx inner = XEXP (SUBREG_REG (op), 0);
809 if (GET_MODE_PRECISION (mode)
810 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
811 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
812 else
813 /* If subreg above is paradoxical and C is narrower
814 than A, return (subreg:A (truncate:C X) 0). */
815 return simplify_gen_subreg (mode, SUBREG_REG (op),
816 GET_MODE (SUBREG_REG (op)), 0);
819 /* (truncate:A (truncate:B X)) is (truncate:A X). */
820 if (GET_CODE (op) == TRUNCATE)
821 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
822 GET_MODE (XEXP (op, 0)));
824 return NULL_RTX;
827 /* Try to simplify a unary operation CODE whose output mode is to be
828 MODE with input operand OP whose mode was originally OP_MODE.
829 Return zero if no simplification can be made. */
831 simplify_unary_operation (enum rtx_code code, machine_mode mode,
832 rtx op, machine_mode op_mode)
834 rtx trueop, tem;
836 trueop = avoid_constant_pool_reference (op);
838 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
839 if (tem)
840 return tem;
842 return simplify_unary_operation_1 (code, mode, op);
845 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
846 to be exact. */
848 static bool
849 exact_int_to_float_conversion_p (const_rtx op)
851 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
852 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
853 /* Constants shouldn't reach here. */
854 gcc_assert (op0_mode != VOIDmode);
855 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
856 int in_bits = in_prec;
857 if (HWI_COMPUTABLE_MODE_P (op0_mode))
859 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
860 if (GET_CODE (op) == FLOAT)
861 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
862 else if (GET_CODE (op) == UNSIGNED_FLOAT)
863 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
864 else
865 gcc_unreachable ();
866 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
868 return in_bits <= out_bits;
871 /* Perform some simplifications we can do even if the operands
872 aren't constant. */
873 static rtx
874 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
876 enum rtx_code reversed;
877 rtx temp;
879 switch (code)
881 case NOT:
882 /* (not (not X)) == X. */
883 if (GET_CODE (op) == NOT)
884 return XEXP (op, 0);
886 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
887 comparison is all ones. */
888 if (COMPARISON_P (op)
889 && (mode == BImode || STORE_FLAG_VALUE == -1)
890 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
891 return simplify_gen_relational (reversed, mode, VOIDmode,
892 XEXP (op, 0), XEXP (op, 1));
894 /* (not (plus X -1)) can become (neg X). */
895 if (GET_CODE (op) == PLUS
896 && XEXP (op, 1) == constm1_rtx)
897 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
899 /* Similarly, (not (neg X)) is (plus X -1). */
900 if (GET_CODE (op) == NEG)
901 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
902 CONSTM1_RTX (mode));
904 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
905 if (GET_CODE (op) == XOR
906 && CONST_INT_P (XEXP (op, 1))
907 && (temp = simplify_unary_operation (NOT, mode,
908 XEXP (op, 1), mode)) != 0)
909 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
911 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
912 if (GET_CODE (op) == PLUS
913 && CONST_INT_P (XEXP (op, 1))
914 && mode_signbit_p (mode, XEXP (op, 1))
915 && (temp = simplify_unary_operation (NOT, mode,
916 XEXP (op, 1), mode)) != 0)
917 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
920 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
921 operands other than 1, but that is not valid. We could do a
922 similar simplification for (not (lshiftrt C X)) where C is
923 just the sign bit, but this doesn't seem common enough to
924 bother with. */
925 if (GET_CODE (op) == ASHIFT
926 && XEXP (op, 0) == const1_rtx)
928 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
929 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
932 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
933 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
934 so we can perform the above simplification. */
935 if (STORE_FLAG_VALUE == -1
936 && GET_CODE (op) == ASHIFTRT
937 && CONST_INT_P (XEXP (op, 1))
938 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
939 return simplify_gen_relational (GE, mode, VOIDmode,
940 XEXP (op, 0), const0_rtx);
943 if (GET_CODE (op) == SUBREG
944 && subreg_lowpart_p (op)
945 && (GET_MODE_SIZE (GET_MODE (op))
946 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
947 && GET_CODE (SUBREG_REG (op)) == ASHIFT
948 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
950 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
951 rtx x;
953 x = gen_rtx_ROTATE (inner_mode,
954 simplify_gen_unary (NOT, inner_mode, const1_rtx,
955 inner_mode),
956 XEXP (SUBREG_REG (op), 1));
957 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
958 if (temp)
959 return temp;
962 /* Apply De Morgan's laws to reduce number of patterns for machines
963 with negating logical insns (and-not, nand, etc.). If result has
964 only one NOT, put it first, since that is how the patterns are
965 coded. */
966 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
968 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
969 machine_mode op_mode;
971 op_mode = GET_MODE (in1);
972 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
974 op_mode = GET_MODE (in2);
975 if (op_mode == VOIDmode)
976 op_mode = mode;
977 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
979 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
980 std::swap (in1, in2);
982 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
983 mode, in1, in2);
986 /* (not (bswap x)) -> (bswap (not x)). */
987 if (GET_CODE (op) == BSWAP)
989 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
990 return simplify_gen_unary (BSWAP, mode, x, mode);
992 break;
994 case NEG:
995 /* (neg (neg X)) == X. */
996 if (GET_CODE (op) == NEG)
997 return XEXP (op, 0);
999 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1000 If comparison is not reversible use
1001 x ? y : (neg y). */
1002 if (GET_CODE (op) == IF_THEN_ELSE)
1004 rtx cond = XEXP (op, 0);
1005 rtx true_rtx = XEXP (op, 1);
1006 rtx false_rtx = XEXP (op, 2);
1008 if ((GET_CODE (true_rtx) == NEG
1009 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1010 || (GET_CODE (false_rtx) == NEG
1011 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1013 if (reversed_comparison_code (cond, NULL_RTX) != UNKNOWN)
1014 temp = reversed_comparison (cond, mode);
1015 else
1017 temp = cond;
1018 std::swap (true_rtx, false_rtx);
1020 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1021 mode, temp, true_rtx, false_rtx);
1025 /* (neg (plus X 1)) can become (not X). */
1026 if (GET_CODE (op) == PLUS
1027 && XEXP (op, 1) == const1_rtx)
1028 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1030 /* Similarly, (neg (not X)) is (plus X 1). */
1031 if (GET_CODE (op) == NOT)
1032 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1033 CONST1_RTX (mode));
1035 /* (neg (minus X Y)) can become (minus Y X). This transformation
1036 isn't safe for modes with signed zeros, since if X and Y are
1037 both +0, (minus Y X) is the same as (minus X Y). If the
1038 rounding mode is towards +infinity (or -infinity) then the two
1039 expressions will be rounded differently. */
1040 if (GET_CODE (op) == MINUS
1041 && !HONOR_SIGNED_ZEROS (mode)
1042 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1043 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1045 if (GET_CODE (op) == PLUS
1046 && !HONOR_SIGNED_ZEROS (mode)
1047 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1049 /* (neg (plus A C)) is simplified to (minus -C A). */
1050 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1051 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1053 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1054 if (temp)
1055 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1058 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1059 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1060 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1063 /* (neg (mult A B)) becomes (mult A (neg B)).
1064 This works even for floating-point values. */
1065 if (GET_CODE (op) == MULT
1066 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1068 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1069 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1072 /* NEG commutes with ASHIFT since it is multiplication. Only do
1073 this if we can then eliminate the NEG (e.g., if the operand
1074 is a constant). */
1075 if (GET_CODE (op) == ASHIFT)
1077 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1078 if (temp)
1079 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1082 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1083 C is equal to the width of MODE minus 1. */
1084 if (GET_CODE (op) == ASHIFTRT
1085 && CONST_INT_P (XEXP (op, 1))
1086 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1087 return simplify_gen_binary (LSHIFTRT, mode,
1088 XEXP (op, 0), XEXP (op, 1));
1090 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1091 C is equal to the width of MODE minus 1. */
1092 if (GET_CODE (op) == LSHIFTRT
1093 && CONST_INT_P (XEXP (op, 1))
1094 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1095 return simplify_gen_binary (ASHIFTRT, mode,
1096 XEXP (op, 0), XEXP (op, 1));
1098 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1099 if (GET_CODE (op) == XOR
1100 && XEXP (op, 1) == const1_rtx
1101 && nonzero_bits (XEXP (op, 0), mode) == 1)
1102 return plus_constant (mode, XEXP (op, 0), -1);
1104 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1105 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1106 if (GET_CODE (op) == LT
1107 && XEXP (op, 1) == const0_rtx
1108 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1110 machine_mode inner = GET_MODE (XEXP (op, 0));
1111 int isize = GET_MODE_PRECISION (inner);
1112 if (STORE_FLAG_VALUE == 1)
1114 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1115 GEN_INT (isize - 1));
1116 if (mode == inner)
1117 return temp;
1118 if (GET_MODE_PRECISION (mode) > isize)
1119 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1120 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1122 else if (STORE_FLAG_VALUE == -1)
1124 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1125 GEN_INT (isize - 1));
1126 if (mode == inner)
1127 return temp;
1128 if (GET_MODE_PRECISION (mode) > isize)
1129 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1130 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1133 break;
1135 case TRUNCATE:
1136 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1137 with the umulXi3_highpart patterns. */
1138 if (GET_CODE (op) == LSHIFTRT
1139 && GET_CODE (XEXP (op, 0)) == MULT)
1140 break;
1142 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1144 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1146 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1147 if (temp)
1148 return temp;
1150 /* We can't handle truncation to a partial integer mode here
1151 because we don't know the real bitsize of the partial
1152 integer mode. */
1153 break;
1156 if (GET_MODE (op) != VOIDmode)
1158 temp = simplify_truncation (mode, op, GET_MODE (op));
1159 if (temp)
1160 return temp;
1163 /* If we know that the value is already truncated, we can
1164 replace the TRUNCATE with a SUBREG. */
1165 if (GET_MODE_NUNITS (mode) == 1
1166 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1167 || truncated_to_mode (mode, op)))
1169 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1170 if (temp)
1171 return temp;
1174 /* A truncate of a comparison can be replaced with a subreg if
1175 STORE_FLAG_VALUE permits. This is like the previous test,
1176 but it works even if the comparison is done in a mode larger
1177 than HOST_BITS_PER_WIDE_INT. */
1178 if (HWI_COMPUTABLE_MODE_P (mode)
1179 && COMPARISON_P (op)
1180 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1182 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1183 if (temp)
1184 return temp;
1187 /* A truncate of a memory is just loading the low part of the memory
1188 if we are not changing the meaning of the address. */
1189 if (GET_CODE (op) == MEM
1190 && !VECTOR_MODE_P (mode)
1191 && !MEM_VOLATILE_P (op)
1192 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1194 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1195 if (temp)
1196 return temp;
1199 break;
1201 case FLOAT_TRUNCATE:
1202 if (DECIMAL_FLOAT_MODE_P (mode))
1203 break;
1205 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1206 if (GET_CODE (op) == FLOAT_EXTEND
1207 && GET_MODE (XEXP (op, 0)) == mode)
1208 return XEXP (op, 0);
1210 /* (float_truncate:SF (float_truncate:DF foo:XF))
1211 = (float_truncate:SF foo:XF).
1212 This may eliminate double rounding, so it is unsafe.
1214 (float_truncate:SF (float_extend:XF foo:DF))
1215 = (float_truncate:SF foo:DF).
1217 (float_truncate:DF (float_extend:XF foo:SF))
1218 = (float_extend:DF foo:SF). */
1219 if ((GET_CODE (op) == FLOAT_TRUNCATE
1220 && flag_unsafe_math_optimizations)
1221 || GET_CODE (op) == FLOAT_EXTEND)
1222 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1223 0)))
1224 > GET_MODE_SIZE (mode)
1225 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1226 mode,
1227 XEXP (op, 0), mode);
1229 /* (float_truncate (float x)) is (float x) */
1230 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1231 && (flag_unsafe_math_optimizations
1232 || exact_int_to_float_conversion_p (op)))
1233 return simplify_gen_unary (GET_CODE (op), mode,
1234 XEXP (op, 0),
1235 GET_MODE (XEXP (op, 0)));
1237 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1238 (OP:SF foo:SF) if OP is NEG or ABS. */
1239 if ((GET_CODE (op) == ABS
1240 || GET_CODE (op) == NEG)
1241 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1242 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1243 return simplify_gen_unary (GET_CODE (op), mode,
1244 XEXP (XEXP (op, 0), 0), mode);
1246 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1247 is (float_truncate:SF x). */
1248 if (GET_CODE (op) == SUBREG
1249 && subreg_lowpart_p (op)
1250 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1251 return SUBREG_REG (op);
1252 break;
1254 case FLOAT_EXTEND:
1255 if (DECIMAL_FLOAT_MODE_P (mode))
1256 break;
1258 /* (float_extend (float_extend x)) is (float_extend x)
1260 (float_extend (float x)) is (float x) assuming that double
1261 rounding can't happen.
1263 if (GET_CODE (op) == FLOAT_EXTEND
1264 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1265 && exact_int_to_float_conversion_p (op)))
1266 return simplify_gen_unary (GET_CODE (op), mode,
1267 XEXP (op, 0),
1268 GET_MODE (XEXP (op, 0)));
1270 break;
1272 case ABS:
1273 /* (abs (neg <foo>)) -> (abs <foo>) */
1274 if (GET_CODE (op) == NEG)
1275 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1276 GET_MODE (XEXP (op, 0)));
1278 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1279 do nothing. */
1280 if (GET_MODE (op) == VOIDmode)
1281 break;
1283 /* If operand is something known to be positive, ignore the ABS. */
1284 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1285 || val_signbit_known_clear_p (GET_MODE (op),
1286 nonzero_bits (op, GET_MODE (op))))
1287 return op;
1289 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1290 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1291 return gen_rtx_NEG (mode, op);
1293 break;
1295 case FFS:
1296 /* (ffs (*_extend <X>)) = (ffs <X>) */
1297 if (GET_CODE (op) == SIGN_EXTEND
1298 || GET_CODE (op) == ZERO_EXTEND)
1299 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1300 GET_MODE (XEXP (op, 0)));
1301 break;
1303 case POPCOUNT:
1304 switch (GET_CODE (op))
1306 case BSWAP:
1307 case ZERO_EXTEND:
1308 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1309 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1310 GET_MODE (XEXP (op, 0)));
1312 case ROTATE:
1313 case ROTATERT:
1314 /* Rotations don't affect popcount. */
1315 if (!side_effects_p (XEXP (op, 1)))
1316 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1317 GET_MODE (XEXP (op, 0)));
1318 break;
1320 default:
1321 break;
1323 break;
1325 case PARITY:
1326 switch (GET_CODE (op))
1328 case NOT:
1329 case BSWAP:
1330 case ZERO_EXTEND:
1331 case SIGN_EXTEND:
1332 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1333 GET_MODE (XEXP (op, 0)));
1335 case ROTATE:
1336 case ROTATERT:
1337 /* Rotations don't affect parity. */
1338 if (!side_effects_p (XEXP (op, 1)))
1339 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1340 GET_MODE (XEXP (op, 0)));
1341 break;
1343 default:
1344 break;
1346 break;
1348 case BSWAP:
1349 /* (bswap (bswap x)) -> x. */
1350 if (GET_CODE (op) == BSWAP)
1351 return XEXP (op, 0);
1352 break;
1354 case FLOAT:
1355 /* (float (sign_extend <X>)) = (float <X>). */
1356 if (GET_CODE (op) == SIGN_EXTEND)
1357 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1358 GET_MODE (XEXP (op, 0)));
1359 break;
1361 case SIGN_EXTEND:
1362 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1363 becomes just the MINUS if its mode is MODE. This allows
1364 folding switch statements on machines using casesi (such as
1365 the VAX). */
1366 if (GET_CODE (op) == TRUNCATE
1367 && GET_MODE (XEXP (op, 0)) == mode
1368 && GET_CODE (XEXP (op, 0)) == MINUS
1369 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1370 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1371 return XEXP (op, 0);
1373 /* Extending a widening multiplication should be canonicalized to
1374 a wider widening multiplication. */
1375 if (GET_CODE (op) == MULT)
1377 rtx lhs = XEXP (op, 0);
1378 rtx rhs = XEXP (op, 1);
1379 enum rtx_code lcode = GET_CODE (lhs);
1380 enum rtx_code rcode = GET_CODE (rhs);
1382 /* Widening multiplies usually extend both operands, but sometimes
1383 they use a shift to extract a portion of a register. */
1384 if ((lcode == SIGN_EXTEND
1385 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1386 && (rcode == SIGN_EXTEND
1387 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1389 machine_mode lmode = GET_MODE (lhs);
1390 machine_mode rmode = GET_MODE (rhs);
1391 int bits;
1393 if (lcode == ASHIFTRT)
1394 /* Number of bits not shifted off the end. */
1395 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1396 else /* lcode == SIGN_EXTEND */
1397 /* Size of inner mode. */
1398 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1400 if (rcode == ASHIFTRT)
1401 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1402 else /* rcode == SIGN_EXTEND */
1403 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1405 /* We can only widen multiplies if the result is mathematiclly
1406 equivalent. I.e. if overflow was impossible. */
1407 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1408 return simplify_gen_binary
1409 (MULT, mode,
1410 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1411 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1415 /* Check for a sign extension of a subreg of a promoted
1416 variable, where the promotion is sign-extended, and the
1417 target mode is the same as the variable's promotion. */
1418 if (GET_CODE (op) == SUBREG
1419 && SUBREG_PROMOTED_VAR_P (op)
1420 && SUBREG_PROMOTED_SIGNED_P (op)
1421 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1423 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1424 if (temp)
1425 return temp;
1428 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1429 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1430 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1432 gcc_assert (GET_MODE_PRECISION (mode)
1433 > GET_MODE_PRECISION (GET_MODE (op)));
1434 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1435 GET_MODE (XEXP (op, 0)));
1438 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1439 is (sign_extend:M (subreg:O <X>)) if there is mode with
1440 GET_MODE_BITSIZE (N) - I bits.
1441 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1442 is similarly (zero_extend:M (subreg:O <X>)). */
1443 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1444 && GET_CODE (XEXP (op, 0)) == ASHIFT
1445 && CONST_INT_P (XEXP (op, 1))
1446 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1447 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1449 machine_mode tmode
1450 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1451 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1452 gcc_assert (GET_MODE_BITSIZE (mode)
1453 > GET_MODE_BITSIZE (GET_MODE (op)));
1454 if (tmode != BLKmode)
1456 rtx inner =
1457 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1458 if (inner)
1459 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1460 ? SIGN_EXTEND : ZERO_EXTEND,
1461 mode, inner, tmode);
1465 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1466 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1467 if (GET_CODE (op) == LSHIFTRT
1468 && CONST_INT_P (XEXP (op, 1))
1469 && XEXP (op, 1) != const0_rtx)
1470 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1472 #if defined(POINTERS_EXTEND_UNSIGNED)
1473 /* As we do not know which address space the pointer is referring to,
1474 we can do this only if the target does not support different pointer
1475 or address modes depending on the address space. */
1476 if (target_default_pointer_address_modes_p ()
1477 && ! POINTERS_EXTEND_UNSIGNED
1478 && mode == Pmode && GET_MODE (op) == ptr_mode
1479 && (CONSTANT_P (op)
1480 || (GET_CODE (op) == SUBREG
1481 && REG_P (SUBREG_REG (op))
1482 && REG_POINTER (SUBREG_REG (op))
1483 && GET_MODE (SUBREG_REG (op)) == Pmode))
1484 && !targetm.have_ptr_extend ())
1485 return convert_memory_address (Pmode, op);
1486 #endif
1487 break;
1489 case ZERO_EXTEND:
1490 /* Check for a zero extension of a subreg of a promoted
1491 variable, where the promotion is zero-extended, and the
1492 target mode is the same as the variable's promotion. */
1493 if (GET_CODE (op) == SUBREG
1494 && SUBREG_PROMOTED_VAR_P (op)
1495 && SUBREG_PROMOTED_UNSIGNED_P (op)
1496 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1498 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1499 if (temp)
1500 return temp;
1503 /* Extending a widening multiplication should be canonicalized to
1504 a wider widening multiplication. */
1505 if (GET_CODE (op) == MULT)
1507 rtx lhs = XEXP (op, 0);
1508 rtx rhs = XEXP (op, 1);
1509 enum rtx_code lcode = GET_CODE (lhs);
1510 enum rtx_code rcode = GET_CODE (rhs);
1512 /* Widening multiplies usually extend both operands, but sometimes
1513 they use a shift to extract a portion of a register. */
1514 if ((lcode == ZERO_EXTEND
1515 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1516 && (rcode == ZERO_EXTEND
1517 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1519 machine_mode lmode = GET_MODE (lhs);
1520 machine_mode rmode = GET_MODE (rhs);
1521 int bits;
1523 if (lcode == LSHIFTRT)
1524 /* Number of bits not shifted off the end. */
1525 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1526 else /* lcode == ZERO_EXTEND */
1527 /* Size of inner mode. */
1528 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1530 if (rcode == LSHIFTRT)
1531 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1532 else /* rcode == ZERO_EXTEND */
1533 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1535 /* We can only widen multiplies if the result is mathematiclly
1536 equivalent. I.e. if overflow was impossible. */
1537 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1538 return simplify_gen_binary
1539 (MULT, mode,
1540 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1541 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1545 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1546 if (GET_CODE (op) == ZERO_EXTEND)
1547 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1548 GET_MODE (XEXP (op, 0)));
1550 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1551 is (zero_extend:M (subreg:O <X>)) if there is mode with
1552 GET_MODE_PRECISION (N) - I bits. */
1553 if (GET_CODE (op) == LSHIFTRT
1554 && GET_CODE (XEXP (op, 0)) == ASHIFT
1555 && CONST_INT_P (XEXP (op, 1))
1556 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1557 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1559 machine_mode tmode
1560 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1561 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1562 if (tmode != BLKmode)
1564 rtx inner =
1565 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1566 if (inner)
1567 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1571 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1572 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1573 of mode N. E.g.
1574 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1575 (and:SI (reg:SI) (const_int 63)). */
1576 if (GET_CODE (op) == SUBREG
1577 && GET_MODE_PRECISION (GET_MODE (op))
1578 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1579 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1580 <= HOST_BITS_PER_WIDE_INT
1581 && GET_MODE_PRECISION (mode)
1582 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1583 && subreg_lowpart_p (op)
1584 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1585 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1587 if (GET_MODE_PRECISION (mode)
1588 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1589 return SUBREG_REG (op);
1590 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1591 GET_MODE (SUBREG_REG (op)));
1594 #if defined(POINTERS_EXTEND_UNSIGNED)
1595 /* As we do not know which address space the pointer is referring to,
1596 we can do this only if the target does not support different pointer
1597 or address modes depending on the address space. */
1598 if (target_default_pointer_address_modes_p ()
1599 && POINTERS_EXTEND_UNSIGNED > 0
1600 && mode == Pmode && GET_MODE (op) == ptr_mode
1601 && (CONSTANT_P (op)
1602 || (GET_CODE (op) == SUBREG
1603 && REG_P (SUBREG_REG (op))
1604 && REG_POINTER (SUBREG_REG (op))
1605 && GET_MODE (SUBREG_REG (op)) == Pmode))
1606 && !targetm.have_ptr_extend ())
1607 return convert_memory_address (Pmode, op);
1608 #endif
1609 break;
1611 default:
1612 break;
1615 return 0;
1618 /* Try to compute the value of a unary operation CODE whose output mode is to
1619 be MODE with input operand OP whose mode was originally OP_MODE.
1620 Return zero if the value cannot be computed. */
1622 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1623 rtx op, machine_mode op_mode)
1625 unsigned int width = GET_MODE_PRECISION (mode);
1627 if (code == VEC_DUPLICATE)
1629 gcc_assert (VECTOR_MODE_P (mode));
1630 if (GET_MODE (op) != VOIDmode)
1632 if (!VECTOR_MODE_P (GET_MODE (op)))
1633 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1634 else
1635 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1636 (GET_MODE (op)));
1638 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1639 || GET_CODE (op) == CONST_VECTOR)
1641 int elt_size = GET_MODE_UNIT_SIZE (mode);
1642 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1643 rtvec v = rtvec_alloc (n_elts);
1644 unsigned int i;
1646 if (GET_CODE (op) != CONST_VECTOR)
1647 for (i = 0; i < n_elts; i++)
1648 RTVEC_ELT (v, i) = op;
1649 else
1651 machine_mode inmode = GET_MODE (op);
1652 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1653 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1655 gcc_assert (in_n_elts < n_elts);
1656 gcc_assert ((n_elts % in_n_elts) == 0);
1657 for (i = 0; i < n_elts; i++)
1658 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1660 return gen_rtx_CONST_VECTOR (mode, v);
1664 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1666 int elt_size = GET_MODE_UNIT_SIZE (mode);
1667 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1668 machine_mode opmode = GET_MODE (op);
1669 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1670 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1671 rtvec v = rtvec_alloc (n_elts);
1672 unsigned int i;
1674 gcc_assert (op_n_elts == n_elts);
1675 for (i = 0; i < n_elts; i++)
1677 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1678 CONST_VECTOR_ELT (op, i),
1679 GET_MODE_INNER (opmode));
1680 if (!x)
1681 return 0;
1682 RTVEC_ELT (v, i) = x;
1684 return gen_rtx_CONST_VECTOR (mode, v);
1687 /* The order of these tests is critical so that, for example, we don't
1688 check the wrong mode (input vs. output) for a conversion operation,
1689 such as FIX. At some point, this should be simplified. */
1691 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1693 REAL_VALUE_TYPE d;
1695 if (op_mode == VOIDmode)
1697 /* CONST_INT have VOIDmode as the mode. We assume that all
1698 the bits of the constant are significant, though, this is
1699 a dangerous assumption as many times CONST_INTs are
1700 created and used with garbage in the bits outside of the
1701 precision of the implied mode of the const_int. */
1702 op_mode = MAX_MODE_INT;
1705 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1707 /* Avoid the folding if flag_signaling_nans is on and
1708 operand is a signaling NaN. */
1709 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1710 return 0;
1712 d = real_value_truncate (mode, d);
1713 return const_double_from_real_value (d, mode);
1715 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1717 REAL_VALUE_TYPE d;
1719 if (op_mode == VOIDmode)
1721 /* CONST_INT have VOIDmode as the mode. We assume that all
1722 the bits of the constant are significant, though, this is
1723 a dangerous assumption as many times CONST_INTs are
1724 created and used with garbage in the bits outside of the
1725 precision of the implied mode of the const_int. */
1726 op_mode = MAX_MODE_INT;
1729 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1731 /* Avoid the folding if flag_signaling_nans is on and
1732 operand is a signaling NaN. */
1733 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1734 return 0;
1736 d = real_value_truncate (mode, d);
1737 return const_double_from_real_value (d, mode);
1740 if (CONST_SCALAR_INT_P (op) && width > 0)
1742 wide_int result;
1743 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1744 rtx_mode_t op0 = std::make_pair (op, imode);
1745 int int_value;
1747 #if TARGET_SUPPORTS_WIDE_INT == 0
1748 /* This assert keeps the simplification from producing a result
1749 that cannot be represented in a CONST_DOUBLE but a lot of
1750 upstream callers expect that this function never fails to
1751 simplify something and so you if you added this to the test
1752 above the code would die later anyway. If this assert
1753 happens, you just need to make the port support wide int. */
1754 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1755 #endif
1757 switch (code)
1759 case NOT:
1760 result = wi::bit_not (op0);
1761 break;
1763 case NEG:
1764 result = wi::neg (op0);
1765 break;
1767 case ABS:
1768 result = wi::abs (op0);
1769 break;
1771 case FFS:
1772 result = wi::shwi (wi::ffs (op0), mode);
1773 break;
1775 case CLZ:
1776 if (wi::ne_p (op0, 0))
1777 int_value = wi::clz (op0);
1778 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1779 int_value = GET_MODE_PRECISION (mode);
1780 result = wi::shwi (int_value, mode);
1781 break;
1783 case CLRSB:
1784 result = wi::shwi (wi::clrsb (op0), mode);
1785 break;
1787 case CTZ:
1788 if (wi::ne_p (op0, 0))
1789 int_value = wi::ctz (op0);
1790 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1791 int_value = GET_MODE_PRECISION (mode);
1792 result = wi::shwi (int_value, mode);
1793 break;
1795 case POPCOUNT:
1796 result = wi::shwi (wi::popcount (op0), mode);
1797 break;
1799 case PARITY:
1800 result = wi::shwi (wi::parity (op0), mode);
1801 break;
1803 case BSWAP:
1804 result = wide_int (op0).bswap ();
1805 break;
1807 case TRUNCATE:
1808 case ZERO_EXTEND:
1809 result = wide_int::from (op0, width, UNSIGNED);
1810 break;
1812 case SIGN_EXTEND:
1813 result = wide_int::from (op0, width, SIGNED);
1814 break;
1816 case SQRT:
1817 default:
1818 return 0;
1821 return immed_wide_int_const (result, mode);
1824 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1825 && SCALAR_FLOAT_MODE_P (mode)
1826 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1828 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1829 switch (code)
1831 case SQRT:
1832 return 0;
1833 case ABS:
1834 d = real_value_abs (&d);
1835 break;
1836 case NEG:
1837 d = real_value_negate (&d);
1838 break;
1839 case FLOAT_TRUNCATE:
1840 /* Don't perform the operation if flag_signaling_nans is on
1841 and the operand is a signaling NaN. */
1842 if (!(HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)))
1843 d = real_value_truncate (mode, d);
1844 break;
1845 case FLOAT_EXTEND:
1846 /* All this does is change the mode, unless changing
1847 mode class. */
1848 /* Don't perform the operation if flag_signaling_nans is on
1849 and the operand is a signaling NaN. */
1850 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op))
1851 && !(HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)))
1852 real_convert (&d, mode, &d);
1853 break;
1854 case FIX:
1855 /* Don't perform the operation if flag_signaling_nans is on
1856 and the operand is a signaling NaN. */
1857 if (!(HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)))
1858 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1859 break;
1860 case NOT:
1862 long tmp[4];
1863 int i;
1865 real_to_target (tmp, &d, GET_MODE (op));
1866 for (i = 0; i < 4; i++)
1867 tmp[i] = ~tmp[i];
1868 real_from_target (&d, tmp, mode);
1869 break;
1871 default:
1872 gcc_unreachable ();
1874 return const_double_from_real_value (d, mode);
1876 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1877 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1878 && GET_MODE_CLASS (mode) == MODE_INT
1879 && width > 0)
1881 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1882 operators are intentionally left unspecified (to ease implementation
1883 by target backends), for consistency, this routine implements the
1884 same semantics for constant folding as used by the middle-end. */
1886 /* This was formerly used only for non-IEEE float.
1887 eggert@twinsun.com says it is safe for IEEE also. */
1888 REAL_VALUE_TYPE t;
1889 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1890 wide_int wmax, wmin;
1891 /* This is part of the abi to real_to_integer, but we check
1892 things before making this call. */
1893 bool fail;
1895 switch (code)
1897 case FIX:
1898 if (REAL_VALUE_ISNAN (*x))
1899 return const0_rtx;
1901 /* Test against the signed upper bound. */
1902 wmax = wi::max_value (width, SIGNED);
1903 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1904 if (real_less (&t, x))
1905 return immed_wide_int_const (wmax, mode);
1907 /* Test against the signed lower bound. */
1908 wmin = wi::min_value (width, SIGNED);
1909 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1910 if (real_less (x, &t))
1911 return immed_wide_int_const (wmin, mode);
1913 return immed_wide_int_const (real_to_integer (x, &fail, width),
1914 mode);
1916 case UNSIGNED_FIX:
1917 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1918 return const0_rtx;
1920 /* Test against the unsigned upper bound. */
1921 wmax = wi::max_value (width, UNSIGNED);
1922 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1923 if (real_less (&t, x))
1924 return immed_wide_int_const (wmax, mode);
1926 return immed_wide_int_const (real_to_integer (x, &fail, width),
1927 mode);
1929 default:
1930 gcc_unreachable ();
1934 return NULL_RTX;
1937 /* Subroutine of simplify_binary_operation to simplify a binary operation
1938 CODE that can commute with byte swapping, with result mode MODE and
1939 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1940 Return zero if no simplification or canonicalization is possible. */
1942 static rtx
1943 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1944 rtx op0, rtx op1)
1946 rtx tem;
1948 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1949 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1951 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1952 simplify_gen_unary (BSWAP, mode, op1, mode));
1953 return simplify_gen_unary (BSWAP, mode, tem, mode);
1956 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1957 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1959 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1960 return simplify_gen_unary (BSWAP, mode, tem, mode);
1963 return NULL_RTX;
1966 /* Subroutine of simplify_binary_operation to simplify a commutative,
1967 associative binary operation CODE with result mode MODE, operating
1968 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1969 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1970 canonicalization is possible. */
1972 static rtx
1973 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1974 rtx op0, rtx op1)
1976 rtx tem;
1978 /* Linearize the operator to the left. */
1979 if (GET_CODE (op1) == code)
1981 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1982 if (GET_CODE (op0) == code)
1984 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1985 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1988 /* "a op (b op c)" becomes "(b op c) op a". */
1989 if (! swap_commutative_operands_p (op1, op0))
1990 return simplify_gen_binary (code, mode, op1, op0);
1992 std::swap (op0, op1);
1995 if (GET_CODE (op0) == code)
1997 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1998 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2000 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2001 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2004 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2005 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2006 if (tem != 0)
2007 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2009 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2010 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2011 if (tem != 0)
2012 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2015 return 0;
2019 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2020 and OP1. Return 0 if no simplification is possible.
2022 Don't use this for relational operations such as EQ or LT.
2023 Use simplify_relational_operation instead. */
2025 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2026 rtx op0, rtx op1)
2028 rtx trueop0, trueop1;
2029 rtx tem;
2031 /* Relational operations don't work here. We must know the mode
2032 of the operands in order to do the comparison correctly.
2033 Assuming a full word can give incorrect results.
2034 Consider comparing 128 with -128 in QImode. */
2035 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2036 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2038 /* Make sure the constant is second. */
2039 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2040 && swap_commutative_operands_p (op0, op1))
2041 std::swap (op0, op1);
2043 trueop0 = avoid_constant_pool_reference (op0);
2044 trueop1 = avoid_constant_pool_reference (op1);
2046 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2047 if (tem)
2048 return tem;
2049 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2051 if (tem)
2052 return tem;
2054 /* If the above steps did not result in a simplification and op0 or op1
2055 were constant pool references, use the referenced constants directly. */
2056 if (trueop0 != op0 || trueop1 != op1)
2057 return simplify_gen_binary (code, mode, trueop0, trueop1);
2059 return NULL_RTX;
2062 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2063 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2064 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2065 actual constants. */
2067 static rtx
2068 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2069 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2071 rtx tem, reversed, opleft, opright;
2072 HOST_WIDE_INT val;
2073 unsigned int width = GET_MODE_PRECISION (mode);
2075 /* Even if we can't compute a constant result,
2076 there are some cases worth simplifying. */
2078 switch (code)
2080 case PLUS:
2081 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2082 when x is NaN, infinite, or finite and nonzero. They aren't
2083 when x is -0 and the rounding mode is not towards -infinity,
2084 since (-0) + 0 is then 0. */
2085 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2086 return op0;
2088 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2089 transformations are safe even for IEEE. */
2090 if (GET_CODE (op0) == NEG)
2091 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2092 else if (GET_CODE (op1) == NEG)
2093 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2095 /* (~a) + 1 -> -a */
2096 if (INTEGRAL_MODE_P (mode)
2097 && GET_CODE (op0) == NOT
2098 && trueop1 == const1_rtx)
2099 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2101 /* Handle both-operands-constant cases. We can only add
2102 CONST_INTs to constants since the sum of relocatable symbols
2103 can't be handled by most assemblers. Don't add CONST_INT
2104 to CONST_INT since overflow won't be computed properly if wider
2105 than HOST_BITS_PER_WIDE_INT. */
2107 if ((GET_CODE (op0) == CONST
2108 || GET_CODE (op0) == SYMBOL_REF
2109 || GET_CODE (op0) == LABEL_REF)
2110 && CONST_INT_P (op1))
2111 return plus_constant (mode, op0, INTVAL (op1));
2112 else if ((GET_CODE (op1) == CONST
2113 || GET_CODE (op1) == SYMBOL_REF
2114 || GET_CODE (op1) == LABEL_REF)
2115 && CONST_INT_P (op0))
2116 return plus_constant (mode, op1, INTVAL (op0));
2118 /* See if this is something like X * C - X or vice versa or
2119 if the multiplication is written as a shift. If so, we can
2120 distribute and make a new multiply, shift, or maybe just
2121 have X (if C is 2 in the example above). But don't make
2122 something more expensive than we had before. */
2124 if (SCALAR_INT_MODE_P (mode))
2126 rtx lhs = op0, rhs = op1;
2128 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2129 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2131 if (GET_CODE (lhs) == NEG)
2133 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2134 lhs = XEXP (lhs, 0);
2136 else if (GET_CODE (lhs) == MULT
2137 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2139 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2140 lhs = XEXP (lhs, 0);
2142 else if (GET_CODE (lhs) == ASHIFT
2143 && CONST_INT_P (XEXP (lhs, 1))
2144 && INTVAL (XEXP (lhs, 1)) >= 0
2145 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2147 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2148 GET_MODE_PRECISION (mode));
2149 lhs = XEXP (lhs, 0);
2152 if (GET_CODE (rhs) == NEG)
2154 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2155 rhs = XEXP (rhs, 0);
2157 else if (GET_CODE (rhs) == MULT
2158 && CONST_INT_P (XEXP (rhs, 1)))
2160 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2161 rhs = XEXP (rhs, 0);
2163 else if (GET_CODE (rhs) == ASHIFT
2164 && CONST_INT_P (XEXP (rhs, 1))
2165 && INTVAL (XEXP (rhs, 1)) >= 0
2166 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2168 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2169 GET_MODE_PRECISION (mode));
2170 rhs = XEXP (rhs, 0);
2173 if (rtx_equal_p (lhs, rhs))
2175 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2176 rtx coeff;
2177 bool speed = optimize_function_for_speed_p (cfun);
2179 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2181 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2182 return (set_src_cost (tem, mode, speed)
2183 <= set_src_cost (orig, mode, speed) ? tem : 0);
2187 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2188 if (CONST_SCALAR_INT_P (op1)
2189 && GET_CODE (op0) == XOR
2190 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2191 && mode_signbit_p (mode, op1))
2192 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2193 simplify_gen_binary (XOR, mode, op1,
2194 XEXP (op0, 1)));
2196 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2197 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2198 && GET_CODE (op0) == MULT
2199 && GET_CODE (XEXP (op0, 0)) == NEG)
2201 rtx in1, in2;
2203 in1 = XEXP (XEXP (op0, 0), 0);
2204 in2 = XEXP (op0, 1);
2205 return simplify_gen_binary (MINUS, mode, op1,
2206 simplify_gen_binary (MULT, mode,
2207 in1, in2));
2210 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2211 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2212 is 1. */
2213 if (COMPARISON_P (op0)
2214 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2215 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2216 && (reversed = reversed_comparison (op0, mode)))
2217 return
2218 simplify_gen_unary (NEG, mode, reversed, mode);
2220 /* If one of the operands is a PLUS or a MINUS, see if we can
2221 simplify this by the associative law.
2222 Don't use the associative law for floating point.
2223 The inaccuracy makes it nonassociative,
2224 and subtle programs can break if operations are associated. */
2226 if (INTEGRAL_MODE_P (mode)
2227 && (plus_minus_operand_p (op0)
2228 || plus_minus_operand_p (op1))
2229 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2230 return tem;
2232 /* Reassociate floating point addition only when the user
2233 specifies associative math operations. */
2234 if (FLOAT_MODE_P (mode)
2235 && flag_associative_math)
2237 tem = simplify_associative_operation (code, mode, op0, op1);
2238 if (tem)
2239 return tem;
2241 break;
2243 case COMPARE:
2244 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2245 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2246 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2247 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2249 rtx xop00 = XEXP (op0, 0);
2250 rtx xop10 = XEXP (op1, 0);
2252 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2253 return xop00;
2255 if (REG_P (xop00) && REG_P (xop10)
2256 && GET_MODE (xop00) == GET_MODE (xop10)
2257 && REGNO (xop00) == REGNO (xop10)
2258 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2259 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2260 return xop00;
2262 break;
2264 case MINUS:
2265 /* We can't assume x-x is 0 even with non-IEEE floating point,
2266 but since it is zero except in very strange circumstances, we
2267 will treat it as zero with -ffinite-math-only. */
2268 if (rtx_equal_p (trueop0, trueop1)
2269 && ! side_effects_p (op0)
2270 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2271 return CONST0_RTX (mode);
2273 /* Change subtraction from zero into negation. (0 - x) is the
2274 same as -x when x is NaN, infinite, or finite and nonzero.
2275 But if the mode has signed zeros, and does not round towards
2276 -infinity, then 0 - 0 is 0, not -0. */
2277 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2278 return simplify_gen_unary (NEG, mode, op1, mode);
2280 /* (-1 - a) is ~a, unless the expression contains symbolic
2281 constants, in which case not retaining additions and
2282 subtractions could cause invalid assembly to be produced. */
2283 if (trueop0 == constm1_rtx
2284 && !contains_symbolic_reference_p (op1))
2285 return simplify_gen_unary (NOT, mode, op1, mode);
2287 /* Subtracting 0 has no effect unless the mode has signed zeros
2288 and supports rounding towards -infinity. In such a case,
2289 0 - 0 is -0. */
2290 if (!(HONOR_SIGNED_ZEROS (mode)
2291 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2292 && trueop1 == CONST0_RTX (mode))
2293 return op0;
2295 /* See if this is something like X * C - X or vice versa or
2296 if the multiplication is written as a shift. If so, we can
2297 distribute and make a new multiply, shift, or maybe just
2298 have X (if C is 2 in the example above). But don't make
2299 something more expensive than we had before. */
2301 if (SCALAR_INT_MODE_P (mode))
2303 rtx lhs = op0, rhs = op1;
2305 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2306 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2308 if (GET_CODE (lhs) == NEG)
2310 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2311 lhs = XEXP (lhs, 0);
2313 else if (GET_CODE (lhs) == MULT
2314 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2316 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2317 lhs = XEXP (lhs, 0);
2319 else if (GET_CODE (lhs) == ASHIFT
2320 && CONST_INT_P (XEXP (lhs, 1))
2321 && INTVAL (XEXP (lhs, 1)) >= 0
2322 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2324 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2325 GET_MODE_PRECISION (mode));
2326 lhs = XEXP (lhs, 0);
2329 if (GET_CODE (rhs) == NEG)
2331 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2332 rhs = XEXP (rhs, 0);
2334 else if (GET_CODE (rhs) == MULT
2335 && CONST_INT_P (XEXP (rhs, 1)))
2337 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2338 rhs = XEXP (rhs, 0);
2340 else if (GET_CODE (rhs) == ASHIFT
2341 && CONST_INT_P (XEXP (rhs, 1))
2342 && INTVAL (XEXP (rhs, 1)) >= 0
2343 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2345 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2346 GET_MODE_PRECISION (mode));
2347 negcoeff1 = -negcoeff1;
2348 rhs = XEXP (rhs, 0);
2351 if (rtx_equal_p (lhs, rhs))
2353 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2354 rtx coeff;
2355 bool speed = optimize_function_for_speed_p (cfun);
2357 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2359 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2360 return (set_src_cost (tem, mode, speed)
2361 <= set_src_cost (orig, mode, speed) ? tem : 0);
2365 /* (a - (-b)) -> (a + b). True even for IEEE. */
2366 if (GET_CODE (op1) == NEG)
2367 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2369 /* (-x - c) may be simplified as (-c - x). */
2370 if (GET_CODE (op0) == NEG
2371 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2373 tem = simplify_unary_operation (NEG, mode, op1, mode);
2374 if (tem)
2375 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2378 /* Don't let a relocatable value get a negative coeff. */
2379 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2380 return simplify_gen_binary (PLUS, mode,
2381 op0,
2382 neg_const_int (mode, op1));
2384 /* (x - (x & y)) -> (x & ~y) */
2385 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2387 if (rtx_equal_p (op0, XEXP (op1, 0)))
2389 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2390 GET_MODE (XEXP (op1, 1)));
2391 return simplify_gen_binary (AND, mode, op0, tem);
2393 if (rtx_equal_p (op0, XEXP (op1, 1)))
2395 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2396 GET_MODE (XEXP (op1, 0)));
2397 return simplify_gen_binary (AND, mode, op0, tem);
2401 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2402 by reversing the comparison code if valid. */
2403 if (STORE_FLAG_VALUE == 1
2404 && trueop0 == const1_rtx
2405 && COMPARISON_P (op1)
2406 && (reversed = reversed_comparison (op1, mode)))
2407 return reversed;
2409 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2410 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2411 && GET_CODE (op1) == MULT
2412 && GET_CODE (XEXP (op1, 0)) == NEG)
2414 rtx in1, in2;
2416 in1 = XEXP (XEXP (op1, 0), 0);
2417 in2 = XEXP (op1, 1);
2418 return simplify_gen_binary (PLUS, mode,
2419 simplify_gen_binary (MULT, mode,
2420 in1, in2),
2421 op0);
2424 /* Canonicalize (minus (neg A) (mult B C)) to
2425 (minus (mult (neg B) C) A). */
2426 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2427 && GET_CODE (op1) == MULT
2428 && GET_CODE (op0) == NEG)
2430 rtx in1, in2;
2432 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2433 in2 = XEXP (op1, 1);
2434 return simplify_gen_binary (MINUS, mode,
2435 simplify_gen_binary (MULT, mode,
2436 in1, in2),
2437 XEXP (op0, 0));
2440 /* If one of the operands is a PLUS or a MINUS, see if we can
2441 simplify this by the associative law. This will, for example,
2442 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2443 Don't use the associative law for floating point.
2444 The inaccuracy makes it nonassociative,
2445 and subtle programs can break if operations are associated. */
2447 if (INTEGRAL_MODE_P (mode)
2448 && (plus_minus_operand_p (op0)
2449 || plus_minus_operand_p (op1))
2450 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2451 return tem;
2452 break;
2454 case MULT:
2455 if (trueop1 == constm1_rtx)
2456 return simplify_gen_unary (NEG, mode, op0, mode);
2458 if (GET_CODE (op0) == NEG)
2460 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2461 /* If op1 is a MULT as well and simplify_unary_operation
2462 just moved the NEG to the second operand, simplify_gen_binary
2463 below could through simplify_associative_operation move
2464 the NEG around again and recurse endlessly. */
2465 if (temp
2466 && GET_CODE (op1) == MULT
2467 && GET_CODE (temp) == MULT
2468 && XEXP (op1, 0) == XEXP (temp, 0)
2469 && GET_CODE (XEXP (temp, 1)) == NEG
2470 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2471 temp = NULL_RTX;
2472 if (temp)
2473 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2475 if (GET_CODE (op1) == NEG)
2477 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2478 /* If op0 is a MULT as well and simplify_unary_operation
2479 just moved the NEG to the second operand, simplify_gen_binary
2480 below could through simplify_associative_operation move
2481 the NEG around again and recurse endlessly. */
2482 if (temp
2483 && GET_CODE (op0) == MULT
2484 && GET_CODE (temp) == MULT
2485 && XEXP (op0, 0) == XEXP (temp, 0)
2486 && GET_CODE (XEXP (temp, 1)) == NEG
2487 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2488 temp = NULL_RTX;
2489 if (temp)
2490 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2493 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2494 x is NaN, since x * 0 is then also NaN. Nor is it valid
2495 when the mode has signed zeros, since multiplying a negative
2496 number by 0 will give -0, not 0. */
2497 if (!HONOR_NANS (mode)
2498 && !HONOR_SIGNED_ZEROS (mode)
2499 && trueop1 == CONST0_RTX (mode)
2500 && ! side_effects_p (op0))
2501 return op1;
2503 /* In IEEE floating point, x*1 is not equivalent to x for
2504 signalling NaNs. */
2505 if (!HONOR_SNANS (mode)
2506 && trueop1 == CONST1_RTX (mode))
2507 return op0;
2509 /* Convert multiply by constant power of two into shift. */
2510 if (CONST_SCALAR_INT_P (trueop1))
2512 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2513 if (val >= 0)
2514 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2517 /* x*2 is x+x and x*(-1) is -x */
2518 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2519 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2520 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2521 && GET_MODE (op0) == mode)
2523 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2525 if (real_equal (d1, &dconst2))
2526 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2528 if (!HONOR_SNANS (mode)
2529 && real_equal (d1, &dconstm1))
2530 return simplify_gen_unary (NEG, mode, op0, mode);
2533 /* Optimize -x * -x as x * x. */
2534 if (FLOAT_MODE_P (mode)
2535 && GET_CODE (op0) == NEG
2536 && GET_CODE (op1) == NEG
2537 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2538 && !side_effects_p (XEXP (op0, 0)))
2539 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2541 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2542 if (SCALAR_FLOAT_MODE_P (mode)
2543 && GET_CODE (op0) == ABS
2544 && GET_CODE (op1) == ABS
2545 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2546 && !side_effects_p (XEXP (op0, 0)))
2547 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2549 /* Reassociate multiplication, but for floating point MULTs
2550 only when the user specifies unsafe math optimizations. */
2551 if (! FLOAT_MODE_P (mode)
2552 || flag_unsafe_math_optimizations)
2554 tem = simplify_associative_operation (code, mode, op0, op1);
2555 if (tem)
2556 return tem;
2558 break;
2560 case IOR:
2561 if (trueop1 == CONST0_RTX (mode))
2562 return op0;
2563 if (INTEGRAL_MODE_P (mode)
2564 && trueop1 == CONSTM1_RTX (mode)
2565 && !side_effects_p (op0))
2566 return op1;
2567 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2568 return op0;
2569 /* A | (~A) -> -1 */
2570 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2571 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2572 && ! side_effects_p (op0)
2573 && SCALAR_INT_MODE_P (mode))
2574 return constm1_rtx;
2576 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2577 if (CONST_INT_P (op1)
2578 && HWI_COMPUTABLE_MODE_P (mode)
2579 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2580 && !side_effects_p (op0))
2581 return op1;
2583 /* Canonicalize (X & C1) | C2. */
2584 if (GET_CODE (op0) == AND
2585 && CONST_INT_P (trueop1)
2586 && CONST_INT_P (XEXP (op0, 1)))
2588 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2589 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2590 HOST_WIDE_INT c2 = INTVAL (trueop1);
2592 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2593 if ((c1 & c2) == c1
2594 && !side_effects_p (XEXP (op0, 0)))
2595 return trueop1;
2597 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2598 if (((c1|c2) & mask) == mask)
2599 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2601 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2602 if (((c1 & ~c2) & mask) != (c1 & mask))
2604 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2605 gen_int_mode (c1 & ~c2, mode));
2606 return simplify_gen_binary (IOR, mode, tem, op1);
2610 /* Convert (A & B) | A to A. */
2611 if (GET_CODE (op0) == AND
2612 && (rtx_equal_p (XEXP (op0, 0), op1)
2613 || rtx_equal_p (XEXP (op0, 1), op1))
2614 && ! side_effects_p (XEXP (op0, 0))
2615 && ! side_effects_p (XEXP (op0, 1)))
2616 return op1;
2618 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2619 mode size to (rotate A CX). */
2621 if (GET_CODE (op1) == ASHIFT
2622 || GET_CODE (op1) == SUBREG)
2624 opleft = op1;
2625 opright = op0;
2627 else
2629 opright = op1;
2630 opleft = op0;
2633 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2634 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2635 && CONST_INT_P (XEXP (opleft, 1))
2636 && CONST_INT_P (XEXP (opright, 1))
2637 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2638 == GET_MODE_PRECISION (mode)))
2639 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2641 /* Same, but for ashift that has been "simplified" to a wider mode
2642 by simplify_shift_const. */
2644 if (GET_CODE (opleft) == SUBREG
2645 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2646 && GET_CODE (opright) == LSHIFTRT
2647 && GET_CODE (XEXP (opright, 0)) == SUBREG
2648 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2649 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2650 && (GET_MODE_SIZE (GET_MODE (opleft))
2651 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2652 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2653 SUBREG_REG (XEXP (opright, 0)))
2654 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2655 && CONST_INT_P (XEXP (opright, 1))
2656 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2657 == GET_MODE_PRECISION (mode)))
2658 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2659 XEXP (SUBREG_REG (opleft), 1));
2661 /* If we have (ior (and (X C1) C2)), simplify this by making
2662 C1 as small as possible if C1 actually changes. */
2663 if (CONST_INT_P (op1)
2664 && (HWI_COMPUTABLE_MODE_P (mode)
2665 || INTVAL (op1) > 0)
2666 && GET_CODE (op0) == AND
2667 && CONST_INT_P (XEXP (op0, 1))
2668 && CONST_INT_P (op1)
2669 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2671 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2672 gen_int_mode (UINTVAL (XEXP (op0, 1))
2673 & ~UINTVAL (op1),
2674 mode));
2675 return simplify_gen_binary (IOR, mode, tmp, op1);
2678 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2679 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2680 the PLUS does not affect any of the bits in OP1: then we can do
2681 the IOR as a PLUS and we can associate. This is valid if OP1
2682 can be safely shifted left C bits. */
2683 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2684 && GET_CODE (XEXP (op0, 0)) == PLUS
2685 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2686 && CONST_INT_P (XEXP (op0, 1))
2687 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2689 int count = INTVAL (XEXP (op0, 1));
2690 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2692 if (mask >> count == INTVAL (trueop1)
2693 && trunc_int_for_mode (mask, mode) == mask
2694 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2695 return simplify_gen_binary (ASHIFTRT, mode,
2696 plus_constant (mode, XEXP (op0, 0),
2697 mask),
2698 XEXP (op0, 1));
2701 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2702 if (tem)
2703 return tem;
2705 tem = simplify_associative_operation (code, mode, op0, op1);
2706 if (tem)
2707 return tem;
2708 break;
2710 case XOR:
2711 if (trueop1 == CONST0_RTX (mode))
2712 return op0;
2713 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2714 return simplify_gen_unary (NOT, mode, op0, mode);
2715 if (rtx_equal_p (trueop0, trueop1)
2716 && ! side_effects_p (op0)
2717 && GET_MODE_CLASS (mode) != MODE_CC)
2718 return CONST0_RTX (mode);
2720 /* Canonicalize XOR of the most significant bit to PLUS. */
2721 if (CONST_SCALAR_INT_P (op1)
2722 && mode_signbit_p (mode, op1))
2723 return simplify_gen_binary (PLUS, mode, op0, op1);
2724 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2725 if (CONST_SCALAR_INT_P (op1)
2726 && GET_CODE (op0) == PLUS
2727 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2728 && mode_signbit_p (mode, XEXP (op0, 1)))
2729 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2730 simplify_gen_binary (XOR, mode, op1,
2731 XEXP (op0, 1)));
2733 /* If we are XORing two things that have no bits in common,
2734 convert them into an IOR. This helps to detect rotation encoded
2735 using those methods and possibly other simplifications. */
2737 if (HWI_COMPUTABLE_MODE_P (mode)
2738 && (nonzero_bits (op0, mode)
2739 & nonzero_bits (op1, mode)) == 0)
2740 return (simplify_gen_binary (IOR, mode, op0, op1));
2742 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2743 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2744 (NOT y). */
2746 int num_negated = 0;
2748 if (GET_CODE (op0) == NOT)
2749 num_negated++, op0 = XEXP (op0, 0);
2750 if (GET_CODE (op1) == NOT)
2751 num_negated++, op1 = XEXP (op1, 0);
2753 if (num_negated == 2)
2754 return simplify_gen_binary (XOR, mode, op0, op1);
2755 else if (num_negated == 1)
2756 return simplify_gen_unary (NOT, mode,
2757 simplify_gen_binary (XOR, mode, op0, op1),
2758 mode);
2761 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2762 correspond to a machine insn or result in further simplifications
2763 if B is a constant. */
2765 if (GET_CODE (op0) == AND
2766 && rtx_equal_p (XEXP (op0, 1), op1)
2767 && ! side_effects_p (op1))
2768 return simplify_gen_binary (AND, mode,
2769 simplify_gen_unary (NOT, mode,
2770 XEXP (op0, 0), mode),
2771 op1);
2773 else if (GET_CODE (op0) == AND
2774 && rtx_equal_p (XEXP (op0, 0), op1)
2775 && ! side_effects_p (op1))
2776 return simplify_gen_binary (AND, mode,
2777 simplify_gen_unary (NOT, mode,
2778 XEXP (op0, 1), mode),
2779 op1);
2781 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2782 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2783 out bits inverted twice and not set by C. Similarly, given
2784 (xor (and (xor A B) C) D), simplify without inverting C in
2785 the xor operand: (xor (and A C) (B&C)^D).
2787 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2788 && GET_CODE (XEXP (op0, 0)) == XOR
2789 && CONST_INT_P (op1)
2790 && CONST_INT_P (XEXP (op0, 1))
2791 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2793 enum rtx_code op = GET_CODE (op0);
2794 rtx a = XEXP (XEXP (op0, 0), 0);
2795 rtx b = XEXP (XEXP (op0, 0), 1);
2796 rtx c = XEXP (op0, 1);
2797 rtx d = op1;
2798 HOST_WIDE_INT bval = INTVAL (b);
2799 HOST_WIDE_INT cval = INTVAL (c);
2800 HOST_WIDE_INT dval = INTVAL (d);
2801 HOST_WIDE_INT xcval;
2803 if (op == IOR)
2804 xcval = ~cval;
2805 else
2806 xcval = cval;
2808 return simplify_gen_binary (XOR, mode,
2809 simplify_gen_binary (op, mode, a, c),
2810 gen_int_mode ((bval & xcval) ^ dval,
2811 mode));
2814 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2815 we can transform like this:
2816 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2817 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2818 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2819 Attempt a few simplifications when B and C are both constants. */
2820 if (GET_CODE (op0) == AND
2821 && CONST_INT_P (op1)
2822 && CONST_INT_P (XEXP (op0, 1)))
2824 rtx a = XEXP (op0, 0);
2825 rtx b = XEXP (op0, 1);
2826 rtx c = op1;
2827 HOST_WIDE_INT bval = INTVAL (b);
2828 HOST_WIDE_INT cval = INTVAL (c);
2830 /* Instead of computing ~A&C, we compute its negated value,
2831 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2832 optimize for sure. If it does not simplify, we still try
2833 to compute ~A&C below, but since that always allocates
2834 RTL, we don't try that before committing to returning a
2835 simplified expression. */
2836 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2837 GEN_INT (~cval));
2839 if ((~cval & bval) == 0)
2841 rtx na_c = NULL_RTX;
2842 if (n_na_c)
2843 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2844 else
2846 /* If ~A does not simplify, don't bother: we don't
2847 want to simplify 2 operations into 3, and if na_c
2848 were to simplify with na, n_na_c would have
2849 simplified as well. */
2850 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2851 if (na)
2852 na_c = simplify_gen_binary (AND, mode, na, c);
2855 /* Try to simplify ~A&C | ~B&C. */
2856 if (na_c != NULL_RTX)
2857 return simplify_gen_binary (IOR, mode, na_c,
2858 gen_int_mode (~bval & cval, mode));
2860 else
2862 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2863 if (n_na_c == CONSTM1_RTX (mode))
2865 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2866 gen_int_mode (~cval & bval,
2867 mode));
2868 return simplify_gen_binary (IOR, mode, a_nc_b,
2869 gen_int_mode (~bval & cval,
2870 mode));
2875 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2876 comparison if STORE_FLAG_VALUE is 1. */
2877 if (STORE_FLAG_VALUE == 1
2878 && trueop1 == const1_rtx
2879 && COMPARISON_P (op0)
2880 && (reversed = reversed_comparison (op0, mode)))
2881 return reversed;
2883 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2884 is (lt foo (const_int 0)), so we can perform the above
2885 simplification if STORE_FLAG_VALUE is 1. */
2887 if (STORE_FLAG_VALUE == 1
2888 && trueop1 == const1_rtx
2889 && GET_CODE (op0) == LSHIFTRT
2890 && CONST_INT_P (XEXP (op0, 1))
2891 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2892 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2894 /* (xor (comparison foo bar) (const_int sign-bit))
2895 when STORE_FLAG_VALUE is the sign bit. */
2896 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2897 && trueop1 == const_true_rtx
2898 && COMPARISON_P (op0)
2899 && (reversed = reversed_comparison (op0, mode)))
2900 return reversed;
2902 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2903 if (tem)
2904 return tem;
2906 tem = simplify_associative_operation (code, mode, op0, op1);
2907 if (tem)
2908 return tem;
2909 break;
2911 case AND:
2912 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2913 return trueop1;
2914 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2915 return op0;
2916 if (HWI_COMPUTABLE_MODE_P (mode))
2918 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2919 HOST_WIDE_INT nzop1;
2920 if (CONST_INT_P (trueop1))
2922 HOST_WIDE_INT val1 = INTVAL (trueop1);
2923 /* If we are turning off bits already known off in OP0, we need
2924 not do an AND. */
2925 if ((nzop0 & ~val1) == 0)
2926 return op0;
2928 nzop1 = nonzero_bits (trueop1, mode);
2929 /* If we are clearing all the nonzero bits, the result is zero. */
2930 if ((nzop1 & nzop0) == 0
2931 && !side_effects_p (op0) && !side_effects_p (op1))
2932 return CONST0_RTX (mode);
2934 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2935 && GET_MODE_CLASS (mode) != MODE_CC)
2936 return op0;
2937 /* A & (~A) -> 0 */
2938 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2939 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2940 && ! side_effects_p (op0)
2941 && GET_MODE_CLASS (mode) != MODE_CC)
2942 return CONST0_RTX (mode);
2944 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2945 there are no nonzero bits of C outside of X's mode. */
2946 if ((GET_CODE (op0) == SIGN_EXTEND
2947 || GET_CODE (op0) == ZERO_EXTEND)
2948 && CONST_INT_P (trueop1)
2949 && HWI_COMPUTABLE_MODE_P (mode)
2950 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2951 & UINTVAL (trueop1)) == 0)
2953 machine_mode imode = GET_MODE (XEXP (op0, 0));
2954 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2955 gen_int_mode (INTVAL (trueop1),
2956 imode));
2957 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2960 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2961 we might be able to further simplify the AND with X and potentially
2962 remove the truncation altogether. */
2963 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2965 rtx x = XEXP (op0, 0);
2966 machine_mode xmode = GET_MODE (x);
2967 tem = simplify_gen_binary (AND, xmode, x,
2968 gen_int_mode (INTVAL (trueop1), xmode));
2969 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2972 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2973 if (GET_CODE (op0) == IOR
2974 && CONST_INT_P (trueop1)
2975 && CONST_INT_P (XEXP (op0, 1)))
2977 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2978 return simplify_gen_binary (IOR, mode,
2979 simplify_gen_binary (AND, mode,
2980 XEXP (op0, 0), op1),
2981 gen_int_mode (tmp, mode));
2984 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2985 insn (and may simplify more). */
2986 if (GET_CODE (op0) == XOR
2987 && rtx_equal_p (XEXP (op0, 0), op1)
2988 && ! side_effects_p (op1))
2989 return simplify_gen_binary (AND, mode,
2990 simplify_gen_unary (NOT, mode,
2991 XEXP (op0, 1), mode),
2992 op1);
2994 if (GET_CODE (op0) == XOR
2995 && rtx_equal_p (XEXP (op0, 1), op1)
2996 && ! side_effects_p (op1))
2997 return simplify_gen_binary (AND, mode,
2998 simplify_gen_unary (NOT, mode,
2999 XEXP (op0, 0), mode),
3000 op1);
3002 /* Similarly for (~(A ^ B)) & A. */
3003 if (GET_CODE (op0) == NOT
3004 && GET_CODE (XEXP (op0, 0)) == XOR
3005 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3006 && ! side_effects_p (op1))
3007 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3009 if (GET_CODE (op0) == NOT
3010 && GET_CODE (XEXP (op0, 0)) == XOR
3011 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3012 && ! side_effects_p (op1))
3013 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3015 /* Convert (A | B) & A to A. */
3016 if (GET_CODE (op0) == IOR
3017 && (rtx_equal_p (XEXP (op0, 0), op1)
3018 || rtx_equal_p (XEXP (op0, 1), op1))
3019 && ! side_effects_p (XEXP (op0, 0))
3020 && ! side_effects_p (XEXP (op0, 1)))
3021 return op1;
3023 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3024 ((A & N) + B) & M -> (A + B) & M
3025 Similarly if (N & M) == 0,
3026 ((A | N) + B) & M -> (A + B) & M
3027 and for - instead of + and/or ^ instead of |.
3028 Also, if (N & M) == 0, then
3029 (A +- N) & M -> A & M. */
3030 if (CONST_INT_P (trueop1)
3031 && HWI_COMPUTABLE_MODE_P (mode)
3032 && ~UINTVAL (trueop1)
3033 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3034 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3036 rtx pmop[2];
3037 int which;
3039 pmop[0] = XEXP (op0, 0);
3040 pmop[1] = XEXP (op0, 1);
3042 if (CONST_INT_P (pmop[1])
3043 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3044 return simplify_gen_binary (AND, mode, pmop[0], op1);
3046 for (which = 0; which < 2; which++)
3048 tem = pmop[which];
3049 switch (GET_CODE (tem))
3051 case AND:
3052 if (CONST_INT_P (XEXP (tem, 1))
3053 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3054 == UINTVAL (trueop1))
3055 pmop[which] = XEXP (tem, 0);
3056 break;
3057 case IOR:
3058 case XOR:
3059 if (CONST_INT_P (XEXP (tem, 1))
3060 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3061 pmop[which] = XEXP (tem, 0);
3062 break;
3063 default:
3064 break;
3068 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3070 tem = simplify_gen_binary (GET_CODE (op0), mode,
3071 pmop[0], pmop[1]);
3072 return simplify_gen_binary (code, mode, tem, op1);
3076 /* (and X (ior (not X) Y) -> (and X Y) */
3077 if (GET_CODE (op1) == IOR
3078 && GET_CODE (XEXP (op1, 0)) == NOT
3079 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3080 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3082 /* (and (ior (not X) Y) X) -> (and X Y) */
3083 if (GET_CODE (op0) == IOR
3084 && GET_CODE (XEXP (op0, 0)) == NOT
3085 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3086 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3088 /* (and X (ior Y (not X)) -> (and X Y) */
3089 if (GET_CODE (op1) == IOR
3090 && GET_CODE (XEXP (op1, 1)) == NOT
3091 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3092 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3094 /* (and (ior Y (not X)) X) -> (and X Y) */
3095 if (GET_CODE (op0) == IOR
3096 && GET_CODE (XEXP (op0, 1)) == NOT
3097 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3098 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3100 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3101 if (tem)
3102 return tem;
3104 tem = simplify_associative_operation (code, mode, op0, op1);
3105 if (tem)
3106 return tem;
3107 break;
3109 case UDIV:
3110 /* 0/x is 0 (or x&0 if x has side-effects). */
3111 if (trueop0 == CONST0_RTX (mode))
3113 if (side_effects_p (op1))
3114 return simplify_gen_binary (AND, mode, op1, trueop0);
3115 return trueop0;
3117 /* x/1 is x. */
3118 if (trueop1 == CONST1_RTX (mode))
3120 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3121 if (tem)
3122 return tem;
3124 /* Convert divide by power of two into shift. */
3125 if (CONST_INT_P (trueop1)
3126 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3127 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3128 break;
3130 case DIV:
3131 /* Handle floating point and integers separately. */
3132 if (SCALAR_FLOAT_MODE_P (mode))
3134 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3135 safe for modes with NaNs, since 0.0 / 0.0 will then be
3136 NaN rather than 0.0. Nor is it safe for modes with signed
3137 zeros, since dividing 0 by a negative number gives -0.0 */
3138 if (trueop0 == CONST0_RTX (mode)
3139 && !HONOR_NANS (mode)
3140 && !HONOR_SIGNED_ZEROS (mode)
3141 && ! side_effects_p (op1))
3142 return op0;
3143 /* x/1.0 is x. */
3144 if (trueop1 == CONST1_RTX (mode)
3145 && !HONOR_SNANS (mode))
3146 return op0;
3148 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3149 && trueop1 != CONST0_RTX (mode))
3151 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3153 /* x/-1.0 is -x. */
3154 if (real_equal (d1, &dconstm1)
3155 && !HONOR_SNANS (mode))
3156 return simplify_gen_unary (NEG, mode, op0, mode);
3158 /* Change FP division by a constant into multiplication.
3159 Only do this with -freciprocal-math. */
3160 if (flag_reciprocal_math
3161 && !real_equal (d1, &dconst0))
3163 REAL_VALUE_TYPE d;
3164 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3165 tem = const_double_from_real_value (d, mode);
3166 return simplify_gen_binary (MULT, mode, op0, tem);
3170 else if (SCALAR_INT_MODE_P (mode))
3172 /* 0/x is 0 (or x&0 if x has side-effects). */
3173 if (trueop0 == CONST0_RTX (mode)
3174 && !cfun->can_throw_non_call_exceptions)
3176 if (side_effects_p (op1))
3177 return simplify_gen_binary (AND, mode, op1, trueop0);
3178 return trueop0;
3180 /* x/1 is x. */
3181 if (trueop1 == CONST1_RTX (mode))
3183 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3184 if (tem)
3185 return tem;
3187 /* x/-1 is -x. */
3188 if (trueop1 == constm1_rtx)
3190 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3191 if (x)
3192 return simplify_gen_unary (NEG, mode, x, mode);
3195 break;
3197 case UMOD:
3198 /* 0%x is 0 (or x&0 if x has side-effects). */
3199 if (trueop0 == CONST0_RTX (mode))
3201 if (side_effects_p (op1))
3202 return simplify_gen_binary (AND, mode, op1, trueop0);
3203 return trueop0;
3205 /* x%1 is 0 (of x&0 if x has side-effects). */
3206 if (trueop1 == CONST1_RTX (mode))
3208 if (side_effects_p (op0))
3209 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3210 return CONST0_RTX (mode);
3212 /* Implement modulus by power of two as AND. */
3213 if (CONST_INT_P (trueop1)
3214 && exact_log2 (UINTVAL (trueop1)) > 0)
3215 return simplify_gen_binary (AND, mode, op0,
3216 gen_int_mode (INTVAL (op1) - 1, mode));
3217 break;
3219 case MOD:
3220 /* 0%x is 0 (or x&0 if x has side-effects). */
3221 if (trueop0 == CONST0_RTX (mode))
3223 if (side_effects_p (op1))
3224 return simplify_gen_binary (AND, mode, op1, trueop0);
3225 return trueop0;
3227 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3228 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3230 if (side_effects_p (op0))
3231 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3232 return CONST0_RTX (mode);
3234 break;
3236 case ROTATERT:
3237 case ROTATE:
3238 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3239 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3240 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3241 amount instead. */
3242 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3243 if (CONST_INT_P (trueop1)
3244 && IN_RANGE (INTVAL (trueop1),
3245 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3246 GET_MODE_PRECISION (mode) - 1))
3247 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3248 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3249 - INTVAL (trueop1)));
3250 #endif
3251 /* FALLTHRU */
3252 case ASHIFTRT:
3253 if (trueop1 == CONST0_RTX (mode))
3254 return op0;
3255 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3256 return op0;
3257 /* Rotating ~0 always results in ~0. */
3258 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3259 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3260 && ! side_effects_p (op1))
3261 return op0;
3262 /* Given:
3263 scalar modes M1, M2
3264 scalar constants c1, c2
3265 size (M2) > size (M1)
3266 c1 == size (M2) - size (M1)
3267 optimize:
3268 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3269 <low_part>)
3270 (const_int <c2>))
3272 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3273 <low_part>). */
3274 if (code == ASHIFTRT
3275 && !VECTOR_MODE_P (mode)
3276 && SUBREG_P (op0)
3277 && CONST_INT_P (op1)
3278 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3279 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3280 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3281 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3282 > GET_MODE_BITSIZE (mode))
3283 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3284 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3285 - GET_MODE_BITSIZE (mode)))
3286 && subreg_lowpart_p (op0))
3288 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3289 + INTVAL (op1));
3290 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3291 tmp = simplify_gen_binary (ASHIFTRT,
3292 GET_MODE (SUBREG_REG (op0)),
3293 XEXP (SUBREG_REG (op0), 0),
3294 tmp);
3295 return lowpart_subreg (mode, tmp, inner_mode);
3297 canonicalize_shift:
3298 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3300 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3301 if (val != INTVAL (op1))
3302 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3304 break;
3306 case ASHIFT:
3307 case SS_ASHIFT:
3308 case US_ASHIFT:
3309 if (trueop1 == CONST0_RTX (mode))
3310 return op0;
3311 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3312 return op0;
3313 goto canonicalize_shift;
3315 case LSHIFTRT:
3316 if (trueop1 == CONST0_RTX (mode))
3317 return op0;
3318 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3319 return op0;
3320 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3321 if (GET_CODE (op0) == CLZ
3322 && CONST_INT_P (trueop1)
3323 && STORE_FLAG_VALUE == 1
3324 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3326 machine_mode imode = GET_MODE (XEXP (op0, 0));
3327 unsigned HOST_WIDE_INT zero_val = 0;
3329 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3330 && zero_val == GET_MODE_PRECISION (imode)
3331 && INTVAL (trueop1) == exact_log2 (zero_val))
3332 return simplify_gen_relational (EQ, mode, imode,
3333 XEXP (op0, 0), const0_rtx);
3335 goto canonicalize_shift;
3337 case SMIN:
3338 if (width <= HOST_BITS_PER_WIDE_INT
3339 && mode_signbit_p (mode, trueop1)
3340 && ! side_effects_p (op0))
3341 return op1;
3342 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3343 return op0;
3344 tem = simplify_associative_operation (code, mode, op0, op1);
3345 if (tem)
3346 return tem;
3347 break;
3349 case SMAX:
3350 if (width <= HOST_BITS_PER_WIDE_INT
3351 && CONST_INT_P (trueop1)
3352 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3353 && ! side_effects_p (op0))
3354 return op1;
3355 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3356 return op0;
3357 tem = simplify_associative_operation (code, mode, op0, op1);
3358 if (tem)
3359 return tem;
3360 break;
3362 case UMIN:
3363 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3364 return op1;
3365 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3366 return op0;
3367 tem = simplify_associative_operation (code, mode, op0, op1);
3368 if (tem)
3369 return tem;
3370 break;
3372 case UMAX:
3373 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3374 return op1;
3375 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3376 return op0;
3377 tem = simplify_associative_operation (code, mode, op0, op1);
3378 if (tem)
3379 return tem;
3380 break;
3382 case SS_PLUS:
3383 case US_PLUS:
3384 case SS_MINUS:
3385 case US_MINUS:
3386 case SS_MULT:
3387 case US_MULT:
3388 case SS_DIV:
3389 case US_DIV:
3390 /* ??? There are simplifications that can be done. */
3391 return 0;
3393 case VEC_SELECT:
3394 if (!VECTOR_MODE_P (mode))
3396 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3397 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3398 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3399 gcc_assert (XVECLEN (trueop1, 0) == 1);
3400 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3402 if (GET_CODE (trueop0) == CONST_VECTOR)
3403 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3404 (trueop1, 0, 0)));
3406 /* Extract a scalar element from a nested VEC_SELECT expression
3407 (with optional nested VEC_CONCAT expression). Some targets
3408 (i386) extract scalar element from a vector using chain of
3409 nested VEC_SELECT expressions. When input operand is a memory
3410 operand, this operation can be simplified to a simple scalar
3411 load from an offseted memory address. */
3412 if (GET_CODE (trueop0) == VEC_SELECT)
3414 rtx op0 = XEXP (trueop0, 0);
3415 rtx op1 = XEXP (trueop0, 1);
3417 machine_mode opmode = GET_MODE (op0);
3418 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3419 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3421 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3422 int elem;
3424 rtvec vec;
3425 rtx tmp_op, tmp;
3427 gcc_assert (GET_CODE (op1) == PARALLEL);
3428 gcc_assert (i < n_elts);
3430 /* Select element, pointed by nested selector. */
3431 elem = INTVAL (XVECEXP (op1, 0, i));
3433 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3434 if (GET_CODE (op0) == VEC_CONCAT)
3436 rtx op00 = XEXP (op0, 0);
3437 rtx op01 = XEXP (op0, 1);
3439 machine_mode mode00, mode01;
3440 int n_elts00, n_elts01;
3442 mode00 = GET_MODE (op00);
3443 mode01 = GET_MODE (op01);
3445 /* Find out number of elements of each operand. */
3446 if (VECTOR_MODE_P (mode00))
3448 elt_size = GET_MODE_UNIT_SIZE (mode00);
3449 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3451 else
3452 n_elts00 = 1;
3454 if (VECTOR_MODE_P (mode01))
3456 elt_size = GET_MODE_UNIT_SIZE (mode01);
3457 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3459 else
3460 n_elts01 = 1;
3462 gcc_assert (n_elts == n_elts00 + n_elts01);
3464 /* Select correct operand of VEC_CONCAT
3465 and adjust selector. */
3466 if (elem < n_elts01)
3467 tmp_op = op00;
3468 else
3470 tmp_op = op01;
3471 elem -= n_elts00;
3474 else
3475 tmp_op = op0;
3477 vec = rtvec_alloc (1);
3478 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3480 tmp = gen_rtx_fmt_ee (code, mode,
3481 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3482 return tmp;
3484 if (GET_CODE (trueop0) == VEC_DUPLICATE
3485 && GET_MODE (XEXP (trueop0, 0)) == mode)
3486 return XEXP (trueop0, 0);
3488 else
3490 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3491 gcc_assert (GET_MODE_INNER (mode)
3492 == GET_MODE_INNER (GET_MODE (trueop0)));
3493 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3495 if (GET_CODE (trueop0) == CONST_VECTOR)
3497 int elt_size = GET_MODE_UNIT_SIZE (mode);
3498 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3499 rtvec v = rtvec_alloc (n_elts);
3500 unsigned int i;
3502 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3503 for (i = 0; i < n_elts; i++)
3505 rtx x = XVECEXP (trueop1, 0, i);
3507 gcc_assert (CONST_INT_P (x));
3508 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3509 INTVAL (x));
3512 return gen_rtx_CONST_VECTOR (mode, v);
3515 /* Recognize the identity. */
3516 if (GET_MODE (trueop0) == mode)
3518 bool maybe_ident = true;
3519 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3521 rtx j = XVECEXP (trueop1, 0, i);
3522 if (!CONST_INT_P (j) || INTVAL (j) != i)
3524 maybe_ident = false;
3525 break;
3528 if (maybe_ident)
3529 return trueop0;
3532 /* If we build {a,b} then permute it, build the result directly. */
3533 if (XVECLEN (trueop1, 0) == 2
3534 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3535 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3536 && GET_CODE (trueop0) == VEC_CONCAT
3537 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3538 && GET_MODE (XEXP (trueop0, 0)) == mode
3539 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3540 && GET_MODE (XEXP (trueop0, 1)) == mode)
3542 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3543 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3544 rtx subop0, subop1;
3546 gcc_assert (i0 < 4 && i1 < 4);
3547 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3548 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3550 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3553 if (XVECLEN (trueop1, 0) == 2
3554 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3555 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3556 && GET_CODE (trueop0) == VEC_CONCAT
3557 && GET_MODE (trueop0) == mode)
3559 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3560 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3561 rtx subop0, subop1;
3563 gcc_assert (i0 < 2 && i1 < 2);
3564 subop0 = XEXP (trueop0, i0);
3565 subop1 = XEXP (trueop0, i1);
3567 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3570 /* If we select one half of a vec_concat, return that. */
3571 if (GET_CODE (trueop0) == VEC_CONCAT
3572 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3574 rtx subop0 = XEXP (trueop0, 0);
3575 rtx subop1 = XEXP (trueop0, 1);
3576 machine_mode mode0 = GET_MODE (subop0);
3577 machine_mode mode1 = GET_MODE (subop1);
3578 int li = GET_MODE_UNIT_SIZE (mode0);
3579 int l0 = GET_MODE_SIZE (mode0) / li;
3580 int l1 = GET_MODE_SIZE (mode1) / li;
3581 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3582 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3584 bool success = true;
3585 for (int i = 1; i < l0; ++i)
3587 rtx j = XVECEXP (trueop1, 0, i);
3588 if (!CONST_INT_P (j) || INTVAL (j) != i)
3590 success = false;
3591 break;
3594 if (success)
3595 return subop0;
3597 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3599 bool success = true;
3600 for (int i = 1; i < l1; ++i)
3602 rtx j = XVECEXP (trueop1, 0, i);
3603 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3605 success = false;
3606 break;
3609 if (success)
3610 return subop1;
3615 if (XVECLEN (trueop1, 0) == 1
3616 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3617 && GET_CODE (trueop0) == VEC_CONCAT)
3619 rtx vec = trueop0;
3620 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3622 /* Try to find the element in the VEC_CONCAT. */
3623 while (GET_MODE (vec) != mode
3624 && GET_CODE (vec) == VEC_CONCAT)
3626 HOST_WIDE_INT vec_size;
3628 if (CONST_INT_P (XEXP (vec, 0)))
3630 /* vec_concat of two const_ints doesn't make sense with
3631 respect to modes. */
3632 if (CONST_INT_P (XEXP (vec, 1)))
3633 return 0;
3635 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3636 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3638 else
3639 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3641 if (offset < vec_size)
3642 vec = XEXP (vec, 0);
3643 else
3645 offset -= vec_size;
3646 vec = XEXP (vec, 1);
3648 vec = avoid_constant_pool_reference (vec);
3651 if (GET_MODE (vec) == mode)
3652 return vec;
3655 /* If we select elements in a vec_merge that all come from the same
3656 operand, select from that operand directly. */
3657 if (GET_CODE (op0) == VEC_MERGE)
3659 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3660 if (CONST_INT_P (trueop02))
3662 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3663 bool all_operand0 = true;
3664 bool all_operand1 = true;
3665 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3667 rtx j = XVECEXP (trueop1, 0, i);
3668 if (sel & (1 << UINTVAL (j)))
3669 all_operand1 = false;
3670 else
3671 all_operand0 = false;
3673 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3674 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3675 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3676 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3680 /* If we have two nested selects that are inverses of each
3681 other, replace them with the source operand. */
3682 if (GET_CODE (trueop0) == VEC_SELECT
3683 && GET_MODE (XEXP (trueop0, 0)) == mode)
3685 rtx op0_subop1 = XEXP (trueop0, 1);
3686 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3687 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3689 /* Apply the outer ordering vector to the inner one. (The inner
3690 ordering vector is expressly permitted to be of a different
3691 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3692 then the two VEC_SELECTs cancel. */
3693 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3695 rtx x = XVECEXP (trueop1, 0, i);
3696 if (!CONST_INT_P (x))
3697 return 0;
3698 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3699 if (!CONST_INT_P (y) || i != INTVAL (y))
3700 return 0;
3702 return XEXP (trueop0, 0);
3705 return 0;
3706 case VEC_CONCAT:
3708 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3709 ? GET_MODE (trueop0)
3710 : GET_MODE_INNER (mode));
3711 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3712 ? GET_MODE (trueop1)
3713 : GET_MODE_INNER (mode));
3715 gcc_assert (VECTOR_MODE_P (mode));
3716 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3717 == GET_MODE_SIZE (mode));
3719 if (VECTOR_MODE_P (op0_mode))
3720 gcc_assert (GET_MODE_INNER (mode)
3721 == GET_MODE_INNER (op0_mode));
3722 else
3723 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3725 if (VECTOR_MODE_P (op1_mode))
3726 gcc_assert (GET_MODE_INNER (mode)
3727 == GET_MODE_INNER (op1_mode));
3728 else
3729 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3731 if ((GET_CODE (trueop0) == CONST_VECTOR
3732 || CONST_SCALAR_INT_P (trueop0)
3733 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3734 && (GET_CODE (trueop1) == CONST_VECTOR
3735 || CONST_SCALAR_INT_P (trueop1)
3736 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3738 int elt_size = GET_MODE_UNIT_SIZE (mode);
3739 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3740 rtvec v = rtvec_alloc (n_elts);
3741 unsigned int i;
3742 unsigned in_n_elts = 1;
3744 if (VECTOR_MODE_P (op0_mode))
3745 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3746 for (i = 0; i < n_elts; i++)
3748 if (i < in_n_elts)
3750 if (!VECTOR_MODE_P (op0_mode))
3751 RTVEC_ELT (v, i) = trueop0;
3752 else
3753 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3755 else
3757 if (!VECTOR_MODE_P (op1_mode))
3758 RTVEC_ELT (v, i) = trueop1;
3759 else
3760 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3761 i - in_n_elts);
3765 return gen_rtx_CONST_VECTOR (mode, v);
3768 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3769 Restrict the transformation to avoid generating a VEC_SELECT with a
3770 mode unrelated to its operand. */
3771 if (GET_CODE (trueop0) == VEC_SELECT
3772 && GET_CODE (trueop1) == VEC_SELECT
3773 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3774 && GET_MODE (XEXP (trueop0, 0)) == mode)
3776 rtx par0 = XEXP (trueop0, 1);
3777 rtx par1 = XEXP (trueop1, 1);
3778 int len0 = XVECLEN (par0, 0);
3779 int len1 = XVECLEN (par1, 0);
3780 rtvec vec = rtvec_alloc (len0 + len1);
3781 for (int i = 0; i < len0; i++)
3782 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3783 for (int i = 0; i < len1; i++)
3784 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3785 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3786 gen_rtx_PARALLEL (VOIDmode, vec));
3789 return 0;
3791 default:
3792 gcc_unreachable ();
3795 return 0;
3799 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3800 rtx op0, rtx op1)
3802 unsigned int width = GET_MODE_PRECISION (mode);
3804 if (VECTOR_MODE_P (mode)
3805 && code != VEC_CONCAT
3806 && GET_CODE (op0) == CONST_VECTOR
3807 && GET_CODE (op1) == CONST_VECTOR)
3809 unsigned n_elts = GET_MODE_NUNITS (mode);
3810 machine_mode op0mode = GET_MODE (op0);
3811 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3812 machine_mode op1mode = GET_MODE (op1);
3813 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3814 rtvec v = rtvec_alloc (n_elts);
3815 unsigned int i;
3817 gcc_assert (op0_n_elts == n_elts);
3818 gcc_assert (op1_n_elts == n_elts);
3819 for (i = 0; i < n_elts; i++)
3821 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3822 CONST_VECTOR_ELT (op0, i),
3823 CONST_VECTOR_ELT (op1, i));
3824 if (!x)
3825 return 0;
3826 RTVEC_ELT (v, i) = x;
3829 return gen_rtx_CONST_VECTOR (mode, v);
3832 if (VECTOR_MODE_P (mode)
3833 && code == VEC_CONCAT
3834 && (CONST_SCALAR_INT_P (op0)
3835 || GET_CODE (op0) == CONST_FIXED
3836 || CONST_DOUBLE_AS_FLOAT_P (op0))
3837 && (CONST_SCALAR_INT_P (op1)
3838 || CONST_DOUBLE_AS_FLOAT_P (op1)
3839 || GET_CODE (op1) == CONST_FIXED))
3841 unsigned n_elts = GET_MODE_NUNITS (mode);
3842 rtvec v = rtvec_alloc (n_elts);
3844 gcc_assert (n_elts >= 2);
3845 if (n_elts == 2)
3847 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3848 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3850 RTVEC_ELT (v, 0) = op0;
3851 RTVEC_ELT (v, 1) = op1;
3853 else
3855 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3856 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3857 unsigned i;
3859 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3860 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3861 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3863 for (i = 0; i < op0_n_elts; ++i)
3864 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3865 for (i = 0; i < op1_n_elts; ++i)
3866 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3869 return gen_rtx_CONST_VECTOR (mode, v);
3872 if (SCALAR_FLOAT_MODE_P (mode)
3873 && CONST_DOUBLE_AS_FLOAT_P (op0)
3874 && CONST_DOUBLE_AS_FLOAT_P (op1)
3875 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3877 if (code == AND
3878 || code == IOR
3879 || code == XOR)
3881 long tmp0[4];
3882 long tmp1[4];
3883 REAL_VALUE_TYPE r;
3884 int i;
3886 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3887 GET_MODE (op0));
3888 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3889 GET_MODE (op1));
3890 for (i = 0; i < 4; i++)
3892 switch (code)
3894 case AND:
3895 tmp0[i] &= tmp1[i];
3896 break;
3897 case IOR:
3898 tmp0[i] |= tmp1[i];
3899 break;
3900 case XOR:
3901 tmp0[i] ^= tmp1[i];
3902 break;
3903 default:
3904 gcc_unreachable ();
3907 real_from_target (&r, tmp0, mode);
3908 return const_double_from_real_value (r, mode);
3910 else
3912 REAL_VALUE_TYPE f0, f1, value, result;
3913 const REAL_VALUE_TYPE *opr0, *opr1;
3914 bool inexact;
3916 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
3917 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
3919 if (HONOR_SNANS (mode)
3920 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
3921 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
3922 return 0;
3924 real_convert (&f0, mode, opr0);
3925 real_convert (&f1, mode, opr1);
3927 if (code == DIV
3928 && real_equal (&f1, &dconst0)
3929 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3930 return 0;
3932 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3933 && flag_trapping_math
3934 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3936 int s0 = REAL_VALUE_NEGATIVE (f0);
3937 int s1 = REAL_VALUE_NEGATIVE (f1);
3939 switch (code)
3941 case PLUS:
3942 /* Inf + -Inf = NaN plus exception. */
3943 if (s0 != s1)
3944 return 0;
3945 break;
3946 case MINUS:
3947 /* Inf - Inf = NaN plus exception. */
3948 if (s0 == s1)
3949 return 0;
3950 break;
3951 case DIV:
3952 /* Inf / Inf = NaN plus exception. */
3953 return 0;
3954 default:
3955 break;
3959 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3960 && flag_trapping_math
3961 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
3962 || (REAL_VALUE_ISINF (f1)
3963 && real_equal (&f0, &dconst0))))
3964 /* Inf * 0 = NaN plus exception. */
3965 return 0;
3967 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3968 &f0, &f1);
3969 real_convert (&result, mode, &value);
3971 /* Don't constant fold this floating point operation if
3972 the result has overflowed and flag_trapping_math. */
3974 if (flag_trapping_math
3975 && MODE_HAS_INFINITIES (mode)
3976 && REAL_VALUE_ISINF (result)
3977 && !REAL_VALUE_ISINF (f0)
3978 && !REAL_VALUE_ISINF (f1))
3979 /* Overflow plus exception. */
3980 return 0;
3982 /* Don't constant fold this floating point operation if the
3983 result may dependent upon the run-time rounding mode and
3984 flag_rounding_math is set, or if GCC's software emulation
3985 is unable to accurately represent the result. */
3987 if ((flag_rounding_math
3988 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3989 && (inexact || !real_identical (&result, &value)))
3990 return NULL_RTX;
3992 return const_double_from_real_value (result, mode);
3996 /* We can fold some multi-word operations. */
3997 if ((GET_MODE_CLASS (mode) == MODE_INT
3998 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3999 && CONST_SCALAR_INT_P (op0)
4000 && CONST_SCALAR_INT_P (op1))
4002 wide_int result;
4003 bool overflow;
4004 rtx_mode_t pop0 = std::make_pair (op0, mode);
4005 rtx_mode_t pop1 = std::make_pair (op1, mode);
4007 #if TARGET_SUPPORTS_WIDE_INT == 0
4008 /* This assert keeps the simplification from producing a result
4009 that cannot be represented in a CONST_DOUBLE but a lot of
4010 upstream callers expect that this function never fails to
4011 simplify something and so you if you added this to the test
4012 above the code would die later anyway. If this assert
4013 happens, you just need to make the port support wide int. */
4014 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
4015 #endif
4016 switch (code)
4018 case MINUS:
4019 result = wi::sub (pop0, pop1);
4020 break;
4022 case PLUS:
4023 result = wi::add (pop0, pop1);
4024 break;
4026 case MULT:
4027 result = wi::mul (pop0, pop1);
4028 break;
4030 case DIV:
4031 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4032 if (overflow)
4033 return NULL_RTX;
4034 break;
4036 case MOD:
4037 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4038 if (overflow)
4039 return NULL_RTX;
4040 break;
4042 case UDIV:
4043 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4044 if (overflow)
4045 return NULL_RTX;
4046 break;
4048 case UMOD:
4049 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4050 if (overflow)
4051 return NULL_RTX;
4052 break;
4054 case AND:
4055 result = wi::bit_and (pop0, pop1);
4056 break;
4058 case IOR:
4059 result = wi::bit_or (pop0, pop1);
4060 break;
4062 case XOR:
4063 result = wi::bit_xor (pop0, pop1);
4064 break;
4066 case SMIN:
4067 result = wi::smin (pop0, pop1);
4068 break;
4070 case SMAX:
4071 result = wi::smax (pop0, pop1);
4072 break;
4074 case UMIN:
4075 result = wi::umin (pop0, pop1);
4076 break;
4078 case UMAX:
4079 result = wi::umax (pop0, pop1);
4080 break;
4082 case LSHIFTRT:
4083 case ASHIFTRT:
4084 case ASHIFT:
4086 wide_int wop1 = pop1;
4087 if (SHIFT_COUNT_TRUNCATED)
4088 wop1 = wi::umod_trunc (wop1, width);
4089 else if (wi::geu_p (wop1, width))
4090 return NULL_RTX;
4092 switch (code)
4094 case LSHIFTRT:
4095 result = wi::lrshift (pop0, wop1);
4096 break;
4098 case ASHIFTRT:
4099 result = wi::arshift (pop0, wop1);
4100 break;
4102 case ASHIFT:
4103 result = wi::lshift (pop0, wop1);
4104 break;
4106 default:
4107 gcc_unreachable ();
4109 break;
4111 case ROTATE:
4112 case ROTATERT:
4114 if (wi::neg_p (pop1))
4115 return NULL_RTX;
4117 switch (code)
4119 case ROTATE:
4120 result = wi::lrotate (pop0, pop1);
4121 break;
4123 case ROTATERT:
4124 result = wi::rrotate (pop0, pop1);
4125 break;
4127 default:
4128 gcc_unreachable ();
4130 break;
4132 default:
4133 return NULL_RTX;
4135 return immed_wide_int_const (result, mode);
4138 return NULL_RTX;
4143 /* Return a positive integer if X should sort after Y. The value
4144 returned is 1 if and only if X and Y are both regs. */
4146 static int
4147 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4149 int result;
4151 result = (commutative_operand_precedence (y)
4152 - commutative_operand_precedence (x));
4153 if (result)
4154 return result + result;
4156 /* Group together equal REGs to do more simplification. */
4157 if (REG_P (x) && REG_P (y))
4158 return REGNO (x) > REGNO (y);
4160 return 0;
4163 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4164 operands may be another PLUS or MINUS.
4166 Rather than test for specific case, we do this by a brute-force method
4167 and do all possible simplifications until no more changes occur. Then
4168 we rebuild the operation.
4170 May return NULL_RTX when no changes were made. */
4172 static rtx
4173 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4174 rtx op1)
4176 struct simplify_plus_minus_op_data
4178 rtx op;
4179 short neg;
4180 } ops[16];
4181 rtx result, tem;
4182 int n_ops = 2;
4183 int changed, n_constants, canonicalized = 0;
4184 int i, j;
4186 memset (ops, 0, sizeof ops);
4188 /* Set up the two operands and then expand them until nothing has been
4189 changed. If we run out of room in our array, give up; this should
4190 almost never happen. */
4192 ops[0].op = op0;
4193 ops[0].neg = 0;
4194 ops[1].op = op1;
4195 ops[1].neg = (code == MINUS);
4199 changed = 0;
4200 n_constants = 0;
4202 for (i = 0; i < n_ops; i++)
4204 rtx this_op = ops[i].op;
4205 int this_neg = ops[i].neg;
4206 enum rtx_code this_code = GET_CODE (this_op);
4208 switch (this_code)
4210 case PLUS:
4211 case MINUS:
4212 if (n_ops == ARRAY_SIZE (ops))
4213 return NULL_RTX;
4215 ops[n_ops].op = XEXP (this_op, 1);
4216 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4217 n_ops++;
4219 ops[i].op = XEXP (this_op, 0);
4220 changed = 1;
4221 /* If this operand was negated then we will potentially
4222 canonicalize the expression. Similarly if we don't
4223 place the operands adjacent we're re-ordering the
4224 expression and thus might be performing a
4225 canonicalization. Ignore register re-ordering.
4226 ??? It might be better to shuffle the ops array here,
4227 but then (plus (plus (A, B), plus (C, D))) wouldn't
4228 be seen as non-canonical. */
4229 if (this_neg
4230 || (i != n_ops - 2
4231 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4232 canonicalized = 1;
4233 break;
4235 case NEG:
4236 ops[i].op = XEXP (this_op, 0);
4237 ops[i].neg = ! this_neg;
4238 changed = 1;
4239 canonicalized = 1;
4240 break;
4242 case CONST:
4243 if (n_ops != ARRAY_SIZE (ops)
4244 && GET_CODE (XEXP (this_op, 0)) == PLUS
4245 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4246 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4248 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4249 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4250 ops[n_ops].neg = this_neg;
4251 n_ops++;
4252 changed = 1;
4253 canonicalized = 1;
4255 break;
4257 case NOT:
4258 /* ~a -> (-a - 1) */
4259 if (n_ops != ARRAY_SIZE (ops))
4261 ops[n_ops].op = CONSTM1_RTX (mode);
4262 ops[n_ops++].neg = this_neg;
4263 ops[i].op = XEXP (this_op, 0);
4264 ops[i].neg = !this_neg;
4265 changed = 1;
4266 canonicalized = 1;
4268 break;
4270 case CONST_INT:
4271 n_constants++;
4272 if (this_neg)
4274 ops[i].op = neg_const_int (mode, this_op);
4275 ops[i].neg = 0;
4276 changed = 1;
4277 canonicalized = 1;
4279 break;
4281 default:
4282 break;
4286 while (changed);
4288 if (n_constants > 1)
4289 canonicalized = 1;
4291 gcc_assert (n_ops >= 2);
4293 /* If we only have two operands, we can avoid the loops. */
4294 if (n_ops == 2)
4296 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4297 rtx lhs, rhs;
4299 /* Get the two operands. Be careful with the order, especially for
4300 the cases where code == MINUS. */
4301 if (ops[0].neg && ops[1].neg)
4303 lhs = gen_rtx_NEG (mode, ops[0].op);
4304 rhs = ops[1].op;
4306 else if (ops[0].neg)
4308 lhs = ops[1].op;
4309 rhs = ops[0].op;
4311 else
4313 lhs = ops[0].op;
4314 rhs = ops[1].op;
4317 return simplify_const_binary_operation (code, mode, lhs, rhs);
4320 /* Now simplify each pair of operands until nothing changes. */
4321 while (1)
4323 /* Insertion sort is good enough for a small array. */
4324 for (i = 1; i < n_ops; i++)
4326 struct simplify_plus_minus_op_data save;
4327 int cmp;
4329 j = i - 1;
4330 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4331 if (cmp <= 0)
4332 continue;
4333 /* Just swapping registers doesn't count as canonicalization. */
4334 if (cmp != 1)
4335 canonicalized = 1;
4337 save = ops[i];
4339 ops[j + 1] = ops[j];
4340 while (j--
4341 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4342 ops[j + 1] = save;
4345 changed = 0;
4346 for (i = n_ops - 1; i > 0; i--)
4347 for (j = i - 1; j >= 0; j--)
4349 rtx lhs = ops[j].op, rhs = ops[i].op;
4350 int lneg = ops[j].neg, rneg = ops[i].neg;
4352 if (lhs != 0 && rhs != 0)
4354 enum rtx_code ncode = PLUS;
4356 if (lneg != rneg)
4358 ncode = MINUS;
4359 if (lneg)
4360 std::swap (lhs, rhs);
4362 else if (swap_commutative_operands_p (lhs, rhs))
4363 std::swap (lhs, rhs);
4365 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4366 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4368 rtx tem_lhs, tem_rhs;
4370 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4371 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4372 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4373 tem_rhs);
4375 if (tem && !CONSTANT_P (tem))
4376 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4378 else
4379 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4381 if (tem)
4383 /* Reject "simplifications" that just wrap the two
4384 arguments in a CONST. Failure to do so can result
4385 in infinite recursion with simplify_binary_operation
4386 when it calls us to simplify CONST operations.
4387 Also, if we find such a simplification, don't try
4388 any more combinations with this rhs: We must have
4389 something like symbol+offset, ie. one of the
4390 trivial CONST expressions we handle later. */
4391 if (GET_CODE (tem) == CONST
4392 && GET_CODE (XEXP (tem, 0)) == ncode
4393 && XEXP (XEXP (tem, 0), 0) == lhs
4394 && XEXP (XEXP (tem, 0), 1) == rhs)
4395 break;
4396 lneg &= rneg;
4397 if (GET_CODE (tem) == NEG)
4398 tem = XEXP (tem, 0), lneg = !lneg;
4399 if (CONST_INT_P (tem) && lneg)
4400 tem = neg_const_int (mode, tem), lneg = 0;
4402 ops[i].op = tem;
4403 ops[i].neg = lneg;
4404 ops[j].op = NULL_RTX;
4405 changed = 1;
4406 canonicalized = 1;
4411 if (!changed)
4412 break;
4414 /* Pack all the operands to the lower-numbered entries. */
4415 for (i = 0, j = 0; j < n_ops; j++)
4416 if (ops[j].op)
4418 ops[i] = ops[j];
4419 i++;
4421 n_ops = i;
4424 /* If nothing changed, check that rematerialization of rtl instructions
4425 is still required. */
4426 if (!canonicalized)
4428 /* Perform rematerialization if only all operands are registers and
4429 all operations are PLUS. */
4430 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4431 around rs6000 and how it uses the CA register. See PR67145. */
4432 for (i = 0; i < n_ops; i++)
4433 if (ops[i].neg
4434 || !REG_P (ops[i].op)
4435 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4436 && fixed_regs[REGNO (ops[i].op)]
4437 && !global_regs[REGNO (ops[i].op)]
4438 && ops[i].op != frame_pointer_rtx
4439 && ops[i].op != arg_pointer_rtx
4440 && ops[i].op != stack_pointer_rtx))
4441 return NULL_RTX;
4442 goto gen_result;
4445 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4446 if (n_ops == 2
4447 && CONST_INT_P (ops[1].op)
4448 && CONSTANT_P (ops[0].op)
4449 && ops[0].neg)
4450 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4452 /* We suppressed creation of trivial CONST expressions in the
4453 combination loop to avoid recursion. Create one manually now.
4454 The combination loop should have ensured that there is exactly
4455 one CONST_INT, and the sort will have ensured that it is last
4456 in the array and that any other constant will be next-to-last. */
4458 if (n_ops > 1
4459 && CONST_INT_P (ops[n_ops - 1].op)
4460 && CONSTANT_P (ops[n_ops - 2].op))
4462 rtx value = ops[n_ops - 1].op;
4463 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4464 value = neg_const_int (mode, value);
4465 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4466 INTVAL (value));
4467 n_ops--;
4470 /* Put a non-negated operand first, if possible. */
4472 for (i = 0; i < n_ops && ops[i].neg; i++)
4473 continue;
4474 if (i == n_ops)
4475 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4476 else if (i != 0)
4478 tem = ops[0].op;
4479 ops[0] = ops[i];
4480 ops[i].op = tem;
4481 ops[i].neg = 1;
4484 /* Now make the result by performing the requested operations. */
4485 gen_result:
4486 result = ops[0].op;
4487 for (i = 1; i < n_ops; i++)
4488 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4489 mode, result, ops[i].op);
4491 return result;
4494 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4495 static bool
4496 plus_minus_operand_p (const_rtx x)
4498 return GET_CODE (x) == PLUS
4499 || GET_CODE (x) == MINUS
4500 || (GET_CODE (x) == CONST
4501 && GET_CODE (XEXP (x, 0)) == PLUS
4502 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4503 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4506 /* Like simplify_binary_operation except used for relational operators.
4507 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4508 not also be VOIDmode.
4510 CMP_MODE specifies in which mode the comparison is done in, so it is
4511 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4512 the operands or, if both are VOIDmode, the operands are compared in
4513 "infinite precision". */
4515 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4516 machine_mode cmp_mode, rtx op0, rtx op1)
4518 rtx tem, trueop0, trueop1;
4520 if (cmp_mode == VOIDmode)
4521 cmp_mode = GET_MODE (op0);
4522 if (cmp_mode == VOIDmode)
4523 cmp_mode = GET_MODE (op1);
4525 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4526 if (tem)
4528 if (SCALAR_FLOAT_MODE_P (mode))
4530 if (tem == const0_rtx)
4531 return CONST0_RTX (mode);
4532 #ifdef FLOAT_STORE_FLAG_VALUE
4534 REAL_VALUE_TYPE val;
4535 val = FLOAT_STORE_FLAG_VALUE (mode);
4536 return const_double_from_real_value (val, mode);
4538 #else
4539 return NULL_RTX;
4540 #endif
4542 if (VECTOR_MODE_P (mode))
4544 if (tem == const0_rtx)
4545 return CONST0_RTX (mode);
4546 #ifdef VECTOR_STORE_FLAG_VALUE
4548 int i, units;
4549 rtvec v;
4551 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4552 if (val == NULL_RTX)
4553 return NULL_RTX;
4554 if (val == const1_rtx)
4555 return CONST1_RTX (mode);
4557 units = GET_MODE_NUNITS (mode);
4558 v = rtvec_alloc (units);
4559 for (i = 0; i < units; i++)
4560 RTVEC_ELT (v, i) = val;
4561 return gen_rtx_raw_CONST_VECTOR (mode, v);
4563 #else
4564 return NULL_RTX;
4565 #endif
4568 return tem;
4571 /* For the following tests, ensure const0_rtx is op1. */
4572 if (swap_commutative_operands_p (op0, op1)
4573 || (op0 == const0_rtx && op1 != const0_rtx))
4574 std::swap (op0, op1), code = swap_condition (code);
4576 /* If op0 is a compare, extract the comparison arguments from it. */
4577 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4578 return simplify_gen_relational (code, mode, VOIDmode,
4579 XEXP (op0, 0), XEXP (op0, 1));
4581 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4582 || CC0_P (op0))
4583 return NULL_RTX;
4585 trueop0 = avoid_constant_pool_reference (op0);
4586 trueop1 = avoid_constant_pool_reference (op1);
4587 return simplify_relational_operation_1 (code, mode, cmp_mode,
4588 trueop0, trueop1);
4591 /* This part of simplify_relational_operation is only used when CMP_MODE
4592 is not in class MODE_CC (i.e. it is a real comparison).
4594 MODE is the mode of the result, while CMP_MODE specifies in which
4595 mode the comparison is done in, so it is the mode of the operands. */
4597 static rtx
4598 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4599 machine_mode cmp_mode, rtx op0, rtx op1)
4601 enum rtx_code op0code = GET_CODE (op0);
4603 if (op1 == const0_rtx && COMPARISON_P (op0))
4605 /* If op0 is a comparison, extract the comparison arguments
4606 from it. */
4607 if (code == NE)
4609 if (GET_MODE (op0) == mode)
4610 return simplify_rtx (op0);
4611 else
4612 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4613 XEXP (op0, 0), XEXP (op0, 1));
4615 else if (code == EQ)
4617 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4618 if (new_code != UNKNOWN)
4619 return simplify_gen_relational (new_code, mode, VOIDmode,
4620 XEXP (op0, 0), XEXP (op0, 1));
4624 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4625 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4626 if ((code == LTU || code == GEU)
4627 && GET_CODE (op0) == PLUS
4628 && CONST_INT_P (XEXP (op0, 1))
4629 && (rtx_equal_p (op1, XEXP (op0, 0))
4630 || rtx_equal_p (op1, XEXP (op0, 1)))
4631 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4632 && XEXP (op0, 1) != const0_rtx)
4634 rtx new_cmp
4635 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4636 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4637 cmp_mode, XEXP (op0, 0), new_cmp);
4640 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4641 if ((code == LTU || code == GEU)
4642 && GET_CODE (op0) == PLUS
4643 && rtx_equal_p (op1, XEXP (op0, 1))
4644 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4645 && !rtx_equal_p (op1, XEXP (op0, 0)))
4646 return simplify_gen_relational (code, mode, cmp_mode, op0,
4647 copy_rtx (XEXP (op0, 0)));
4649 if (op1 == const0_rtx)
4651 /* Canonicalize (GTU x 0) as (NE x 0). */
4652 if (code == GTU)
4653 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4654 /* Canonicalize (LEU x 0) as (EQ x 0). */
4655 if (code == LEU)
4656 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4658 else if (op1 == const1_rtx)
4660 switch (code)
4662 case GE:
4663 /* Canonicalize (GE x 1) as (GT x 0). */
4664 return simplify_gen_relational (GT, mode, cmp_mode,
4665 op0, const0_rtx);
4666 case GEU:
4667 /* Canonicalize (GEU x 1) as (NE x 0). */
4668 return simplify_gen_relational (NE, mode, cmp_mode,
4669 op0, const0_rtx);
4670 case LT:
4671 /* Canonicalize (LT x 1) as (LE x 0). */
4672 return simplify_gen_relational (LE, mode, cmp_mode,
4673 op0, const0_rtx);
4674 case LTU:
4675 /* Canonicalize (LTU x 1) as (EQ x 0). */
4676 return simplify_gen_relational (EQ, mode, cmp_mode,
4677 op0, const0_rtx);
4678 default:
4679 break;
4682 else if (op1 == constm1_rtx)
4684 /* Canonicalize (LE x -1) as (LT x 0). */
4685 if (code == LE)
4686 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4687 /* Canonicalize (GT x -1) as (GE x 0). */
4688 if (code == GT)
4689 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4692 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4693 if ((code == EQ || code == NE)
4694 && (op0code == PLUS || op0code == MINUS)
4695 && CONSTANT_P (op1)
4696 && CONSTANT_P (XEXP (op0, 1))
4697 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4699 rtx x = XEXP (op0, 0);
4700 rtx c = XEXP (op0, 1);
4701 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4702 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4704 /* Detect an infinite recursive condition, where we oscillate at this
4705 simplification case between:
4706 A + B == C <---> C - B == A,
4707 where A, B, and C are all constants with non-simplifiable expressions,
4708 usually SYMBOL_REFs. */
4709 if (GET_CODE (tem) == invcode
4710 && CONSTANT_P (x)
4711 && rtx_equal_p (c, XEXP (tem, 1)))
4712 return NULL_RTX;
4714 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4717 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4718 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4719 if (code == NE
4720 && op1 == const0_rtx
4721 && GET_MODE_CLASS (mode) == MODE_INT
4722 && cmp_mode != VOIDmode
4723 /* ??? Work-around BImode bugs in the ia64 backend. */
4724 && mode != BImode
4725 && cmp_mode != BImode
4726 && nonzero_bits (op0, cmp_mode) == 1
4727 && STORE_FLAG_VALUE == 1)
4728 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4729 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4730 : lowpart_subreg (mode, op0, cmp_mode);
4732 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4733 if ((code == EQ || code == NE)
4734 && op1 == const0_rtx
4735 && op0code == XOR)
4736 return simplify_gen_relational (code, mode, cmp_mode,
4737 XEXP (op0, 0), XEXP (op0, 1));
4739 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4740 if ((code == EQ || code == NE)
4741 && op0code == XOR
4742 && rtx_equal_p (XEXP (op0, 0), op1)
4743 && !side_effects_p (XEXP (op0, 0)))
4744 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4745 CONST0_RTX (mode));
4747 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4748 if ((code == EQ || code == NE)
4749 && op0code == XOR
4750 && rtx_equal_p (XEXP (op0, 1), op1)
4751 && !side_effects_p (XEXP (op0, 1)))
4752 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4753 CONST0_RTX (mode));
4755 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4756 if ((code == EQ || code == NE)
4757 && op0code == XOR
4758 && CONST_SCALAR_INT_P (op1)
4759 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4760 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4761 simplify_gen_binary (XOR, cmp_mode,
4762 XEXP (op0, 1), op1));
4764 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4765 can be implemented with a BICS instruction on some targets, or
4766 constant-folded if y is a constant. */
4767 if ((code == EQ || code == NE)
4768 && op0code == AND
4769 && rtx_equal_p (XEXP (op0, 0), op1)
4770 && !side_effects_p (op1)
4771 && op1 != CONST0_RTX (cmp_mode))
4773 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4774 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4776 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4777 CONST0_RTX (cmp_mode));
4780 /* Likewise for (eq/ne (and x y) y). */
4781 if ((code == EQ || code == NE)
4782 && op0code == AND
4783 && rtx_equal_p (XEXP (op0, 1), op1)
4784 && !side_effects_p (op1)
4785 && op1 != CONST0_RTX (cmp_mode))
4787 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4788 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4790 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4791 CONST0_RTX (cmp_mode));
4794 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4795 if ((code == EQ || code == NE)
4796 && GET_CODE (op0) == BSWAP
4797 && CONST_SCALAR_INT_P (op1))
4798 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4799 simplify_gen_unary (BSWAP, cmp_mode,
4800 op1, cmp_mode));
4802 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4803 if ((code == EQ || code == NE)
4804 && GET_CODE (op0) == BSWAP
4805 && GET_CODE (op1) == BSWAP)
4806 return simplify_gen_relational (code, mode, cmp_mode,
4807 XEXP (op0, 0), XEXP (op1, 0));
4809 if (op0code == POPCOUNT && op1 == const0_rtx)
4810 switch (code)
4812 case EQ:
4813 case LE:
4814 case LEU:
4815 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4816 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4817 XEXP (op0, 0), const0_rtx);
4819 case NE:
4820 case GT:
4821 case GTU:
4822 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4823 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4824 XEXP (op0, 0), const0_rtx);
4826 default:
4827 break;
4830 return NULL_RTX;
4833 enum
4835 CMP_EQ = 1,
4836 CMP_LT = 2,
4837 CMP_GT = 4,
4838 CMP_LTU = 8,
4839 CMP_GTU = 16
4843 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4844 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4845 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4846 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4847 For floating-point comparisons, assume that the operands were ordered. */
4849 static rtx
4850 comparison_result (enum rtx_code code, int known_results)
4852 switch (code)
4854 case EQ:
4855 case UNEQ:
4856 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4857 case NE:
4858 case LTGT:
4859 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4861 case LT:
4862 case UNLT:
4863 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4864 case GE:
4865 case UNGE:
4866 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4868 case GT:
4869 case UNGT:
4870 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4871 case LE:
4872 case UNLE:
4873 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4875 case LTU:
4876 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4877 case GEU:
4878 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4880 case GTU:
4881 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4882 case LEU:
4883 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4885 case ORDERED:
4886 return const_true_rtx;
4887 case UNORDERED:
4888 return const0_rtx;
4889 default:
4890 gcc_unreachable ();
4894 /* Check if the given comparison (done in the given MODE) is actually
4895 a tautology or a contradiction. If the mode is VOID_mode, the
4896 comparison is done in "infinite precision". If no simplification
4897 is possible, this function returns zero. Otherwise, it returns
4898 either const_true_rtx or const0_rtx. */
4901 simplify_const_relational_operation (enum rtx_code code,
4902 machine_mode mode,
4903 rtx op0, rtx op1)
4905 rtx tem;
4906 rtx trueop0;
4907 rtx trueop1;
4909 gcc_assert (mode != VOIDmode
4910 || (GET_MODE (op0) == VOIDmode
4911 && GET_MODE (op1) == VOIDmode));
4913 /* If op0 is a compare, extract the comparison arguments from it. */
4914 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4916 op1 = XEXP (op0, 1);
4917 op0 = XEXP (op0, 0);
4919 if (GET_MODE (op0) != VOIDmode)
4920 mode = GET_MODE (op0);
4921 else if (GET_MODE (op1) != VOIDmode)
4922 mode = GET_MODE (op1);
4923 else
4924 return 0;
4927 /* We can't simplify MODE_CC values since we don't know what the
4928 actual comparison is. */
4929 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4930 return 0;
4932 /* Make sure the constant is second. */
4933 if (swap_commutative_operands_p (op0, op1))
4935 std::swap (op0, op1);
4936 code = swap_condition (code);
4939 trueop0 = avoid_constant_pool_reference (op0);
4940 trueop1 = avoid_constant_pool_reference (op1);
4942 /* For integer comparisons of A and B maybe we can simplify A - B and can
4943 then simplify a comparison of that with zero. If A and B are both either
4944 a register or a CONST_INT, this can't help; testing for these cases will
4945 prevent infinite recursion here and speed things up.
4947 We can only do this for EQ and NE comparisons as otherwise we may
4948 lose or introduce overflow which we cannot disregard as undefined as
4949 we do not know the signedness of the operation on either the left or
4950 the right hand side of the comparison. */
4952 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4953 && (code == EQ || code == NE)
4954 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4955 && (REG_P (op1) || CONST_INT_P (trueop1)))
4956 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4957 /* We cannot do this if tem is a nonzero address. */
4958 && ! nonzero_address_p (tem))
4959 return simplify_const_relational_operation (signed_condition (code),
4960 mode, tem, const0_rtx);
4962 if (! HONOR_NANS (mode) && code == ORDERED)
4963 return const_true_rtx;
4965 if (! HONOR_NANS (mode) && code == UNORDERED)
4966 return const0_rtx;
4968 /* For modes without NaNs, if the two operands are equal, we know the
4969 result except if they have side-effects. Even with NaNs we know
4970 the result of unordered comparisons and, if signaling NaNs are
4971 irrelevant, also the result of LT/GT/LTGT. */
4972 if ((! HONOR_NANS (trueop0)
4973 || code == UNEQ || code == UNLE || code == UNGE
4974 || ((code == LT || code == GT || code == LTGT)
4975 && ! HONOR_SNANS (trueop0)))
4976 && rtx_equal_p (trueop0, trueop1)
4977 && ! side_effects_p (trueop0))
4978 return comparison_result (code, CMP_EQ);
4980 /* If the operands are floating-point constants, see if we can fold
4981 the result. */
4982 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4983 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4984 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4986 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
4987 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
4989 /* Comparisons are unordered iff at least one of the values is NaN. */
4990 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
4991 switch (code)
4993 case UNEQ:
4994 case UNLT:
4995 case UNGT:
4996 case UNLE:
4997 case UNGE:
4998 case NE:
4999 case UNORDERED:
5000 return const_true_rtx;
5001 case EQ:
5002 case LT:
5003 case GT:
5004 case LE:
5005 case GE:
5006 case LTGT:
5007 case ORDERED:
5008 return const0_rtx;
5009 default:
5010 return 0;
5013 return comparison_result (code,
5014 (real_equal (d0, d1) ? CMP_EQ :
5015 real_less (d0, d1) ? CMP_LT : CMP_GT));
5018 /* Otherwise, see if the operands are both integers. */
5019 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5020 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5022 /* It would be nice if we really had a mode here. However, the
5023 largest int representable on the target is as good as
5024 infinite. */
5025 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5026 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
5027 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
5029 if (wi::eq_p (ptrueop0, ptrueop1))
5030 return comparison_result (code, CMP_EQ);
5031 else
5033 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5034 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5035 return comparison_result (code, cr);
5039 /* Optimize comparisons with upper and lower bounds. */
5040 if (HWI_COMPUTABLE_MODE_P (mode)
5041 && CONST_INT_P (trueop1)
5042 && !side_effects_p (trueop0))
5044 int sign;
5045 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5046 HOST_WIDE_INT val = INTVAL (trueop1);
5047 HOST_WIDE_INT mmin, mmax;
5049 if (code == GEU
5050 || code == LEU
5051 || code == GTU
5052 || code == LTU)
5053 sign = 0;
5054 else
5055 sign = 1;
5057 /* Get a reduced range if the sign bit is zero. */
5058 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5060 mmin = 0;
5061 mmax = nonzero;
5063 else
5065 rtx mmin_rtx, mmax_rtx;
5066 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5068 mmin = INTVAL (mmin_rtx);
5069 mmax = INTVAL (mmax_rtx);
5070 if (sign)
5072 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5074 mmin >>= (sign_copies - 1);
5075 mmax >>= (sign_copies - 1);
5079 switch (code)
5081 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5082 case GEU:
5083 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5084 return const_true_rtx;
5085 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5086 return const0_rtx;
5087 break;
5088 case GE:
5089 if (val <= mmin)
5090 return const_true_rtx;
5091 if (val > mmax)
5092 return const0_rtx;
5093 break;
5095 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5096 case LEU:
5097 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5098 return const_true_rtx;
5099 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5100 return const0_rtx;
5101 break;
5102 case LE:
5103 if (val >= mmax)
5104 return const_true_rtx;
5105 if (val < mmin)
5106 return const0_rtx;
5107 break;
5109 case EQ:
5110 /* x == y is always false for y out of range. */
5111 if (val < mmin || val > mmax)
5112 return const0_rtx;
5113 break;
5115 /* x > y is always false for y >= mmax, always true for y < mmin. */
5116 case GTU:
5117 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5118 return const0_rtx;
5119 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5120 return const_true_rtx;
5121 break;
5122 case GT:
5123 if (val >= mmax)
5124 return const0_rtx;
5125 if (val < mmin)
5126 return const_true_rtx;
5127 break;
5129 /* x < y is always false for y <= mmin, always true for y > mmax. */
5130 case LTU:
5131 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5132 return const0_rtx;
5133 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5134 return const_true_rtx;
5135 break;
5136 case LT:
5137 if (val <= mmin)
5138 return const0_rtx;
5139 if (val > mmax)
5140 return const_true_rtx;
5141 break;
5143 case NE:
5144 /* x != y is always true for y out of range. */
5145 if (val < mmin || val > mmax)
5146 return const_true_rtx;
5147 break;
5149 default:
5150 break;
5154 /* Optimize integer comparisons with zero. */
5155 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5157 /* Some addresses are known to be nonzero. We don't know
5158 their sign, but equality comparisons are known. */
5159 if (nonzero_address_p (trueop0))
5161 if (code == EQ || code == LEU)
5162 return const0_rtx;
5163 if (code == NE || code == GTU)
5164 return const_true_rtx;
5167 /* See if the first operand is an IOR with a constant. If so, we
5168 may be able to determine the result of this comparison. */
5169 if (GET_CODE (op0) == IOR)
5171 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5172 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5174 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5175 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5176 && (UINTVAL (inner_const)
5177 & ((unsigned HOST_WIDE_INT) 1
5178 << sign_bitnum)));
5180 switch (code)
5182 case EQ:
5183 case LEU:
5184 return const0_rtx;
5185 case NE:
5186 case GTU:
5187 return const_true_rtx;
5188 case LT:
5189 case LE:
5190 if (has_sign)
5191 return const_true_rtx;
5192 break;
5193 case GT:
5194 case GE:
5195 if (has_sign)
5196 return const0_rtx;
5197 break;
5198 default:
5199 break;
5205 /* Optimize comparison of ABS with zero. */
5206 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5207 && (GET_CODE (trueop0) == ABS
5208 || (GET_CODE (trueop0) == FLOAT_EXTEND
5209 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5211 switch (code)
5213 case LT:
5214 /* Optimize abs(x) < 0.0. */
5215 if (!HONOR_SNANS (mode)
5216 && (!INTEGRAL_MODE_P (mode)
5217 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5219 if (INTEGRAL_MODE_P (mode)
5220 && (issue_strict_overflow_warning
5221 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5222 warning (OPT_Wstrict_overflow,
5223 ("assuming signed overflow does not occur when "
5224 "assuming abs (x) < 0 is false"));
5225 return const0_rtx;
5227 break;
5229 case GE:
5230 /* Optimize abs(x) >= 0.0. */
5231 if (!HONOR_NANS (mode)
5232 && (!INTEGRAL_MODE_P (mode)
5233 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5235 if (INTEGRAL_MODE_P (mode)
5236 && (issue_strict_overflow_warning
5237 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5238 warning (OPT_Wstrict_overflow,
5239 ("assuming signed overflow does not occur when "
5240 "assuming abs (x) >= 0 is true"));
5241 return const_true_rtx;
5243 break;
5245 case UNGE:
5246 /* Optimize ! (abs(x) < 0.0). */
5247 return const_true_rtx;
5249 default:
5250 break;
5254 return 0;
5257 /* Simplify CODE, an operation with result mode MODE and three operands,
5258 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5259 a constant. Return 0 if no simplifications is possible. */
5262 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5263 machine_mode op0_mode, rtx op0, rtx op1,
5264 rtx op2)
5266 unsigned int width = GET_MODE_PRECISION (mode);
5267 bool any_change = false;
5268 rtx tem, trueop2;
5270 /* VOIDmode means "infinite" precision. */
5271 if (width == 0)
5272 width = HOST_BITS_PER_WIDE_INT;
5274 switch (code)
5276 case FMA:
5277 /* Simplify negations around the multiplication. */
5278 /* -a * -b + c => a * b + c. */
5279 if (GET_CODE (op0) == NEG)
5281 tem = simplify_unary_operation (NEG, mode, op1, mode);
5282 if (tem)
5283 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5285 else if (GET_CODE (op1) == NEG)
5287 tem = simplify_unary_operation (NEG, mode, op0, mode);
5288 if (tem)
5289 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5292 /* Canonicalize the two multiplication operands. */
5293 /* a * -b + c => -b * a + c. */
5294 if (swap_commutative_operands_p (op0, op1))
5295 std::swap (op0, op1), any_change = true;
5297 if (any_change)
5298 return gen_rtx_FMA (mode, op0, op1, op2);
5299 return NULL_RTX;
5301 case SIGN_EXTRACT:
5302 case ZERO_EXTRACT:
5303 if (CONST_INT_P (op0)
5304 && CONST_INT_P (op1)
5305 && CONST_INT_P (op2)
5306 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5307 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5309 /* Extracting a bit-field from a constant */
5310 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5311 HOST_WIDE_INT op1val = INTVAL (op1);
5312 HOST_WIDE_INT op2val = INTVAL (op2);
5313 if (BITS_BIG_ENDIAN)
5314 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5315 else
5316 val >>= op2val;
5318 if (HOST_BITS_PER_WIDE_INT != op1val)
5320 /* First zero-extend. */
5321 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5322 /* If desired, propagate sign bit. */
5323 if (code == SIGN_EXTRACT
5324 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5325 != 0)
5326 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5329 return gen_int_mode (val, mode);
5331 break;
5333 case IF_THEN_ELSE:
5334 if (CONST_INT_P (op0))
5335 return op0 != const0_rtx ? op1 : op2;
5337 /* Convert c ? a : a into "a". */
5338 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5339 return op1;
5341 /* Convert a != b ? a : b into "a". */
5342 if (GET_CODE (op0) == NE
5343 && ! side_effects_p (op0)
5344 && ! HONOR_NANS (mode)
5345 && ! HONOR_SIGNED_ZEROS (mode)
5346 && ((rtx_equal_p (XEXP (op0, 0), op1)
5347 && rtx_equal_p (XEXP (op0, 1), op2))
5348 || (rtx_equal_p (XEXP (op0, 0), op2)
5349 && rtx_equal_p (XEXP (op0, 1), op1))))
5350 return op1;
5352 /* Convert a == b ? a : b into "b". */
5353 if (GET_CODE (op0) == EQ
5354 && ! side_effects_p (op0)
5355 && ! HONOR_NANS (mode)
5356 && ! HONOR_SIGNED_ZEROS (mode)
5357 && ((rtx_equal_p (XEXP (op0, 0), op1)
5358 && rtx_equal_p (XEXP (op0, 1), op2))
5359 || (rtx_equal_p (XEXP (op0, 0), op2)
5360 && rtx_equal_p (XEXP (op0, 1), op1))))
5361 return op2;
5363 /* Convert (!c) != {0,...,0} ? a : b into
5364 c != {0,...,0} ? b : a for vector modes. */
5365 if (VECTOR_MODE_P (GET_MODE (op1))
5366 && GET_CODE (op0) == NE
5367 && GET_CODE (XEXP (op0, 0)) == NOT
5368 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5370 rtx cv = XEXP (op0, 1);
5371 int nunits = CONST_VECTOR_NUNITS (cv);
5372 bool ok = true;
5373 for (int i = 0; i < nunits; ++i)
5374 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5376 ok = false;
5377 break;
5379 if (ok)
5381 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5382 XEXP (XEXP (op0, 0), 0),
5383 XEXP (op0, 1));
5384 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5385 return retval;
5389 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5391 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5392 ? GET_MODE (XEXP (op0, 1))
5393 : GET_MODE (XEXP (op0, 0)));
5394 rtx temp;
5396 /* Look for happy constants in op1 and op2. */
5397 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5399 HOST_WIDE_INT t = INTVAL (op1);
5400 HOST_WIDE_INT f = INTVAL (op2);
5402 if (t == STORE_FLAG_VALUE && f == 0)
5403 code = GET_CODE (op0);
5404 else if (t == 0 && f == STORE_FLAG_VALUE)
5406 enum rtx_code tmp;
5407 tmp = reversed_comparison_code (op0, NULL_RTX);
5408 if (tmp == UNKNOWN)
5409 break;
5410 code = tmp;
5412 else
5413 break;
5415 return simplify_gen_relational (code, mode, cmp_mode,
5416 XEXP (op0, 0), XEXP (op0, 1));
5419 if (cmp_mode == VOIDmode)
5420 cmp_mode = op0_mode;
5421 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5422 cmp_mode, XEXP (op0, 0),
5423 XEXP (op0, 1));
5425 /* See if any simplifications were possible. */
5426 if (temp)
5428 if (CONST_INT_P (temp))
5429 return temp == const0_rtx ? op2 : op1;
5430 else if (temp)
5431 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5434 break;
5436 case VEC_MERGE:
5437 gcc_assert (GET_MODE (op0) == mode);
5438 gcc_assert (GET_MODE (op1) == mode);
5439 gcc_assert (VECTOR_MODE_P (mode));
5440 trueop2 = avoid_constant_pool_reference (op2);
5441 if (CONST_INT_P (trueop2))
5443 int elt_size = GET_MODE_UNIT_SIZE (mode);
5444 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5445 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5446 unsigned HOST_WIDE_INT mask;
5447 if (n_elts == HOST_BITS_PER_WIDE_INT)
5448 mask = -1;
5449 else
5450 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5452 if (!(sel & mask) && !side_effects_p (op0))
5453 return op1;
5454 if ((sel & mask) == mask && !side_effects_p (op1))
5455 return op0;
5457 rtx trueop0 = avoid_constant_pool_reference (op0);
5458 rtx trueop1 = avoid_constant_pool_reference (op1);
5459 if (GET_CODE (trueop0) == CONST_VECTOR
5460 && GET_CODE (trueop1) == CONST_VECTOR)
5462 rtvec v = rtvec_alloc (n_elts);
5463 unsigned int i;
5465 for (i = 0; i < n_elts; i++)
5466 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5467 ? CONST_VECTOR_ELT (trueop0, i)
5468 : CONST_VECTOR_ELT (trueop1, i));
5469 return gen_rtx_CONST_VECTOR (mode, v);
5472 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5473 if no element from a appears in the result. */
5474 if (GET_CODE (op0) == VEC_MERGE)
5476 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5477 if (CONST_INT_P (tem))
5479 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5480 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5481 return simplify_gen_ternary (code, mode, mode,
5482 XEXP (op0, 1), op1, op2);
5483 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5484 return simplify_gen_ternary (code, mode, mode,
5485 XEXP (op0, 0), op1, op2);
5488 if (GET_CODE (op1) == VEC_MERGE)
5490 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5491 if (CONST_INT_P (tem))
5493 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5494 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5495 return simplify_gen_ternary (code, mode, mode,
5496 op0, XEXP (op1, 1), op2);
5497 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5498 return simplify_gen_ternary (code, mode, mode,
5499 op0, XEXP (op1, 0), op2);
5503 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5504 with a. */
5505 if (GET_CODE (op0) == VEC_DUPLICATE
5506 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5507 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5508 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5510 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5511 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5513 if (XEXP (XEXP (op0, 0), 0) == op1
5514 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5515 return op1;
5520 if (rtx_equal_p (op0, op1)
5521 && !side_effects_p (op2) && !side_effects_p (op1))
5522 return op0;
5524 break;
5526 default:
5527 gcc_unreachable ();
5530 return 0;
5533 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5534 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5535 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5537 Works by unpacking OP into a collection of 8-bit values
5538 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5539 and then repacking them again for OUTERMODE. */
5541 static rtx
5542 simplify_immed_subreg (machine_mode outermode, rtx op,
5543 machine_mode innermode, unsigned int byte)
5545 enum {
5546 value_bit = 8,
5547 value_mask = (1 << value_bit) - 1
5549 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5550 int value_start;
5551 int i;
5552 int elem;
5554 int num_elem;
5555 rtx * elems;
5556 int elem_bitsize;
5557 rtx result_s;
5558 rtvec result_v = NULL;
5559 enum mode_class outer_class;
5560 machine_mode outer_submode;
5561 int max_bitsize;
5563 /* Some ports misuse CCmode. */
5564 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5565 return op;
5567 /* We have no way to represent a complex constant at the rtl level. */
5568 if (COMPLEX_MODE_P (outermode))
5569 return NULL_RTX;
5571 /* We support any size mode. */
5572 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5573 GET_MODE_BITSIZE (innermode));
5575 /* Unpack the value. */
5577 if (GET_CODE (op) == CONST_VECTOR)
5579 num_elem = CONST_VECTOR_NUNITS (op);
5580 elems = &CONST_VECTOR_ELT (op, 0);
5581 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5583 else
5585 num_elem = 1;
5586 elems = &op;
5587 elem_bitsize = max_bitsize;
5589 /* If this asserts, it is too complicated; reducing value_bit may help. */
5590 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5591 /* I don't know how to handle endianness of sub-units. */
5592 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5594 for (elem = 0; elem < num_elem; elem++)
5596 unsigned char * vp;
5597 rtx el = elems[elem];
5599 /* Vectors are kept in target memory order. (This is probably
5600 a mistake.) */
5602 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5603 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5604 / BITS_PER_UNIT);
5605 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5606 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5607 unsigned bytele = (subword_byte % UNITS_PER_WORD
5608 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5609 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5612 switch (GET_CODE (el))
5614 case CONST_INT:
5615 for (i = 0;
5616 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5617 i += value_bit)
5618 *vp++ = INTVAL (el) >> i;
5619 /* CONST_INTs are always logically sign-extended. */
5620 for (; i < elem_bitsize; i += value_bit)
5621 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5622 break;
5624 case CONST_WIDE_INT:
5626 rtx_mode_t val = std::make_pair (el, innermode);
5627 unsigned char extend = wi::sign_mask (val);
5629 for (i = 0; i < elem_bitsize; i += value_bit)
5630 *vp++ = wi::extract_uhwi (val, i, value_bit);
5631 for (; i < elem_bitsize; i += value_bit)
5632 *vp++ = extend;
5634 break;
5636 case CONST_DOUBLE:
5637 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5639 unsigned char extend = 0;
5640 /* If this triggers, someone should have generated a
5641 CONST_INT instead. */
5642 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5644 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5645 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5646 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5648 *vp++
5649 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5650 i += value_bit;
5653 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5654 extend = -1;
5655 for (; i < elem_bitsize; i += value_bit)
5656 *vp++ = extend;
5658 else
5660 /* This is big enough for anything on the platform. */
5661 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5662 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5664 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5665 gcc_assert (bitsize <= elem_bitsize);
5666 gcc_assert (bitsize % value_bit == 0);
5668 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5669 GET_MODE (el));
5671 /* real_to_target produces its result in words affected by
5672 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5673 and use WORDS_BIG_ENDIAN instead; see the documentation
5674 of SUBREG in rtl.texi. */
5675 for (i = 0; i < bitsize; i += value_bit)
5677 int ibase;
5678 if (WORDS_BIG_ENDIAN)
5679 ibase = bitsize - 1 - i;
5680 else
5681 ibase = i;
5682 *vp++ = tmp[ibase / 32] >> i % 32;
5685 /* It shouldn't matter what's done here, so fill it with
5686 zero. */
5687 for (; i < elem_bitsize; i += value_bit)
5688 *vp++ = 0;
5690 break;
5692 case CONST_FIXED:
5693 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5695 for (i = 0; i < elem_bitsize; i += value_bit)
5696 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5698 else
5700 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5701 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5702 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5703 i += value_bit)
5704 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5705 >> (i - HOST_BITS_PER_WIDE_INT);
5706 for (; i < elem_bitsize; i += value_bit)
5707 *vp++ = 0;
5709 break;
5711 default:
5712 gcc_unreachable ();
5716 /* Now, pick the right byte to start with. */
5717 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5718 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5719 will already have offset 0. */
5720 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5722 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5723 - byte);
5724 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5725 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5726 byte = (subword_byte % UNITS_PER_WORD
5727 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5730 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5731 so if it's become negative it will instead be very large.) */
5732 gcc_assert (byte < GET_MODE_SIZE (innermode));
5734 /* Convert from bytes to chunks of size value_bit. */
5735 value_start = byte * (BITS_PER_UNIT / value_bit);
5737 /* Re-pack the value. */
5738 num_elem = GET_MODE_NUNITS (outermode);
5740 if (VECTOR_MODE_P (outermode))
5742 result_v = rtvec_alloc (num_elem);
5743 elems = &RTVEC_ELT (result_v, 0);
5745 else
5746 elems = &result_s;
5748 outer_submode = GET_MODE_INNER (outermode);
5749 outer_class = GET_MODE_CLASS (outer_submode);
5750 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5752 gcc_assert (elem_bitsize % value_bit == 0);
5753 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5755 for (elem = 0; elem < num_elem; elem++)
5757 unsigned char *vp;
5759 /* Vectors are stored in target memory order. (This is probably
5760 a mistake.) */
5762 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5763 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5764 / BITS_PER_UNIT);
5765 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5766 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5767 unsigned bytele = (subword_byte % UNITS_PER_WORD
5768 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5769 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5772 switch (outer_class)
5774 case MODE_INT:
5775 case MODE_PARTIAL_INT:
5777 int u;
5778 int base = 0;
5779 int units
5780 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5781 / HOST_BITS_PER_WIDE_INT;
5782 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5783 wide_int r;
5785 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5786 return NULL_RTX;
5787 for (u = 0; u < units; u++)
5789 unsigned HOST_WIDE_INT buf = 0;
5790 for (i = 0;
5791 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5792 i += value_bit)
5793 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5795 tmp[u] = buf;
5796 base += HOST_BITS_PER_WIDE_INT;
5798 r = wide_int::from_array (tmp, units,
5799 GET_MODE_PRECISION (outer_submode));
5800 #if TARGET_SUPPORTS_WIDE_INT == 0
5801 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5802 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5803 return NULL_RTX;
5804 #endif
5805 elems[elem] = immed_wide_int_const (r, outer_submode);
5807 break;
5809 case MODE_FLOAT:
5810 case MODE_DECIMAL_FLOAT:
5812 REAL_VALUE_TYPE r;
5813 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5815 /* real_from_target wants its input in words affected by
5816 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5817 and use WORDS_BIG_ENDIAN instead; see the documentation
5818 of SUBREG in rtl.texi. */
5819 for (i = 0; i < max_bitsize / 32; i++)
5820 tmp[i] = 0;
5821 for (i = 0; i < elem_bitsize; i += value_bit)
5823 int ibase;
5824 if (WORDS_BIG_ENDIAN)
5825 ibase = elem_bitsize - 1 - i;
5826 else
5827 ibase = i;
5828 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5831 real_from_target (&r, tmp, outer_submode);
5832 elems[elem] = const_double_from_real_value (r, outer_submode);
5834 break;
5836 case MODE_FRACT:
5837 case MODE_UFRACT:
5838 case MODE_ACCUM:
5839 case MODE_UACCUM:
5841 FIXED_VALUE_TYPE f;
5842 f.data.low = 0;
5843 f.data.high = 0;
5844 f.mode = outer_submode;
5846 for (i = 0;
5847 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5848 i += value_bit)
5849 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5850 for (; i < elem_bitsize; i += value_bit)
5851 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5852 << (i - HOST_BITS_PER_WIDE_INT));
5854 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5856 break;
5858 default:
5859 gcc_unreachable ();
5862 if (VECTOR_MODE_P (outermode))
5863 return gen_rtx_CONST_VECTOR (outermode, result_v);
5864 else
5865 return result_s;
5868 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5869 Return 0 if no simplifications are possible. */
5871 simplify_subreg (machine_mode outermode, rtx op,
5872 machine_mode innermode, unsigned int byte)
5874 /* Little bit of sanity checking. */
5875 gcc_assert (innermode != VOIDmode);
5876 gcc_assert (outermode != VOIDmode);
5877 gcc_assert (innermode != BLKmode);
5878 gcc_assert (outermode != BLKmode);
5880 gcc_assert (GET_MODE (op) == innermode
5881 || GET_MODE (op) == VOIDmode);
5883 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5884 return NULL_RTX;
5886 if (byte >= GET_MODE_SIZE (innermode))
5887 return NULL_RTX;
5889 if (outermode == innermode && !byte)
5890 return op;
5892 if (CONST_SCALAR_INT_P (op)
5893 || CONST_DOUBLE_AS_FLOAT_P (op)
5894 || GET_CODE (op) == CONST_FIXED
5895 || GET_CODE (op) == CONST_VECTOR)
5896 return simplify_immed_subreg (outermode, op, innermode, byte);
5898 /* Changing mode twice with SUBREG => just change it once,
5899 or not at all if changing back op starting mode. */
5900 if (GET_CODE (op) == SUBREG)
5902 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5903 int final_offset = byte + SUBREG_BYTE (op);
5904 rtx newx;
5906 if (outermode == innermostmode
5907 && byte == 0 && SUBREG_BYTE (op) == 0)
5908 return SUBREG_REG (op);
5910 /* The SUBREG_BYTE represents offset, as if the value were stored
5911 in memory. Irritating exception is paradoxical subreg, where
5912 we define SUBREG_BYTE to be 0. On big endian machines, this
5913 value should be negative. For a moment, undo this exception. */
5914 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5916 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5917 if (WORDS_BIG_ENDIAN)
5918 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5919 if (BYTES_BIG_ENDIAN)
5920 final_offset += difference % UNITS_PER_WORD;
5922 if (SUBREG_BYTE (op) == 0
5923 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5925 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5926 if (WORDS_BIG_ENDIAN)
5927 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5928 if (BYTES_BIG_ENDIAN)
5929 final_offset += difference % UNITS_PER_WORD;
5932 /* See whether resulting subreg will be paradoxical. */
5933 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5935 /* In nonparadoxical subregs we can't handle negative offsets. */
5936 if (final_offset < 0)
5937 return NULL_RTX;
5938 /* Bail out in case resulting subreg would be incorrect. */
5939 if (final_offset % GET_MODE_SIZE (outermode)
5940 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5941 return NULL_RTX;
5943 else
5945 int offset = 0;
5946 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5948 /* In paradoxical subreg, see if we are still looking on lower part.
5949 If so, our SUBREG_BYTE will be 0. */
5950 if (WORDS_BIG_ENDIAN)
5951 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5952 if (BYTES_BIG_ENDIAN)
5953 offset += difference % UNITS_PER_WORD;
5954 if (offset == final_offset)
5955 final_offset = 0;
5956 else
5957 return NULL_RTX;
5960 /* Recurse for further possible simplifications. */
5961 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5962 final_offset);
5963 if (newx)
5964 return newx;
5965 if (validate_subreg (outermode, innermostmode,
5966 SUBREG_REG (op), final_offset))
5968 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5969 if (SUBREG_PROMOTED_VAR_P (op)
5970 && SUBREG_PROMOTED_SIGN (op) >= 0
5971 && GET_MODE_CLASS (outermode) == MODE_INT
5972 && IN_RANGE (GET_MODE_SIZE (outermode),
5973 GET_MODE_SIZE (innermode),
5974 GET_MODE_SIZE (innermostmode))
5975 && subreg_lowpart_p (newx))
5977 SUBREG_PROMOTED_VAR_P (newx) = 1;
5978 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5980 return newx;
5982 return NULL_RTX;
5985 /* SUBREG of a hard register => just change the register number
5986 and/or mode. If the hard register is not valid in that mode,
5987 suppress this simplification. If the hard register is the stack,
5988 frame, or argument pointer, leave this as a SUBREG. */
5990 if (REG_P (op) && HARD_REGISTER_P (op))
5992 unsigned int regno, final_regno;
5994 regno = REGNO (op);
5995 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5996 if (HARD_REGISTER_NUM_P (final_regno))
5998 rtx x;
5999 int final_offset = byte;
6001 /* Adjust offset for paradoxical subregs. */
6002 if (byte == 0
6003 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6005 int difference = (GET_MODE_SIZE (innermode)
6006 - GET_MODE_SIZE (outermode));
6007 if (WORDS_BIG_ENDIAN)
6008 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6009 if (BYTES_BIG_ENDIAN)
6010 final_offset += difference % UNITS_PER_WORD;
6013 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
6015 /* Propagate original regno. We don't have any way to specify
6016 the offset inside original regno, so do so only for lowpart.
6017 The information is used only by alias analysis that can not
6018 grog partial register anyway. */
6020 if (subreg_lowpart_offset (outermode, innermode) == byte)
6021 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6022 return x;
6026 /* If we have a SUBREG of a register that we are replacing and we are
6027 replacing it with a MEM, make a new MEM and try replacing the
6028 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6029 or if we would be widening it. */
6031 if (MEM_P (op)
6032 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6033 /* Allow splitting of volatile memory references in case we don't
6034 have instruction to move the whole thing. */
6035 && (! MEM_VOLATILE_P (op)
6036 || ! have_insn_for (SET, innermode))
6037 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6038 return adjust_address_nv (op, outermode, byte);
6040 /* Handle complex values represented as CONCAT
6041 of real and imaginary part. */
6042 if (GET_CODE (op) == CONCAT)
6044 unsigned int part_size, final_offset;
6045 rtx part, res;
6047 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
6048 if (byte < part_size)
6050 part = XEXP (op, 0);
6051 final_offset = byte;
6053 else
6055 part = XEXP (op, 1);
6056 final_offset = byte - part_size;
6059 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6060 return NULL_RTX;
6062 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
6063 if (res)
6064 return res;
6065 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
6066 return gen_rtx_SUBREG (outermode, part, final_offset);
6067 return NULL_RTX;
6070 /* A SUBREG resulting from a zero extension may fold to zero if
6071 it extracts higher bits that the ZERO_EXTEND's source bits. */
6072 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6074 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6075 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6076 return CONST0_RTX (outermode);
6079 if (SCALAR_INT_MODE_P (outermode)
6080 && SCALAR_INT_MODE_P (innermode)
6081 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6082 && byte == subreg_lowpart_offset (outermode, innermode))
6084 rtx tem = simplify_truncation (outermode, op, innermode);
6085 if (tem)
6086 return tem;
6089 return NULL_RTX;
6092 /* Make a SUBREG operation or equivalent if it folds. */
6095 simplify_gen_subreg (machine_mode outermode, rtx op,
6096 machine_mode innermode, unsigned int byte)
6098 rtx newx;
6100 newx = simplify_subreg (outermode, op, innermode, byte);
6101 if (newx)
6102 return newx;
6104 if (GET_CODE (op) == SUBREG
6105 || GET_CODE (op) == CONCAT
6106 || GET_MODE (op) == VOIDmode)
6107 return NULL_RTX;
6109 if (validate_subreg (outermode, innermode, op, byte))
6110 return gen_rtx_SUBREG (outermode, op, byte);
6112 return NULL_RTX;
6115 /* Generates a subreg to get the least significant part of EXPR (in mode
6116 INNER_MODE) to OUTER_MODE. */
6119 lowpart_subreg (machine_mode outer_mode, rtx expr,
6120 machine_mode inner_mode)
6122 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6123 subreg_lowpart_offset (outer_mode, inner_mode));
6126 /* Simplify X, an rtx expression.
6128 Return the simplified expression or NULL if no simplifications
6129 were possible.
6131 This is the preferred entry point into the simplification routines;
6132 however, we still allow passes to call the more specific routines.
6134 Right now GCC has three (yes, three) major bodies of RTL simplification
6135 code that need to be unified.
6137 1. fold_rtx in cse.c. This code uses various CSE specific
6138 information to aid in RTL simplification.
6140 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6141 it uses combine specific information to aid in RTL
6142 simplification.
6144 3. The routines in this file.
6147 Long term we want to only have one body of simplification code; to
6148 get to that state I recommend the following steps:
6150 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6151 which are not pass dependent state into these routines.
6153 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6154 use this routine whenever possible.
6156 3. Allow for pass dependent state to be provided to these
6157 routines and add simplifications based on the pass dependent
6158 state. Remove code from cse.c & combine.c that becomes
6159 redundant/dead.
6161 It will take time, but ultimately the compiler will be easier to
6162 maintain and improve. It's totally silly that when we add a
6163 simplification that it needs to be added to 4 places (3 for RTL
6164 simplification and 1 for tree simplification. */
6167 simplify_rtx (const_rtx x)
6169 const enum rtx_code code = GET_CODE (x);
6170 const machine_mode mode = GET_MODE (x);
6172 switch (GET_RTX_CLASS (code))
6174 case RTX_UNARY:
6175 return simplify_unary_operation (code, mode,
6176 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6177 case RTX_COMM_ARITH:
6178 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6179 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6181 /* Fall through.... */
6183 case RTX_BIN_ARITH:
6184 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6186 case RTX_TERNARY:
6187 case RTX_BITFIELD_OPS:
6188 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6189 XEXP (x, 0), XEXP (x, 1),
6190 XEXP (x, 2));
6192 case RTX_COMPARE:
6193 case RTX_COMM_COMPARE:
6194 return simplify_relational_operation (code, mode,
6195 ((GET_MODE (XEXP (x, 0))
6196 != VOIDmode)
6197 ? GET_MODE (XEXP (x, 0))
6198 : GET_MODE (XEXP (x, 1))),
6199 XEXP (x, 0),
6200 XEXP (x, 1));
6202 case RTX_EXTRA:
6203 if (code == SUBREG)
6204 return simplify_subreg (mode, SUBREG_REG (x),
6205 GET_MODE (SUBREG_REG (x)),
6206 SUBREG_BYTE (x));
6207 break;
6209 case RTX_OBJ:
6210 if (code == LO_SUM)
6212 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6213 if (GET_CODE (XEXP (x, 0)) == HIGH
6214 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6215 return XEXP (x, 1);
6217 break;
6219 default:
6220 break;
6222 return NULL;