Configury changes for obstack optimization
[official-gcc.git] / gcc / simplify-rtx.c
blob17568baa8b05fb9059af6250dec45580de5debfd
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "optabs.h"
30 #include "emit-rtl.h"
31 #include "recog.h"
32 #include "diagnostic-core.h"
33 #include "varasm.h"
34 #include "flags.h"
36 /* Simplification and canonicalization of RTL. */
38 /* Much code operates on (low, high) pairs; the low value is an
39 unsigned wide int, the high value a signed wide int. We
40 occasionally need to sign extend from low to high as if low were a
41 signed wide int. */
42 #define HWI_SIGN_EXTEND(low) \
43 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
45 static rtx neg_const_int (machine_mode, const_rtx);
46 static bool plus_minus_operand_p (const_rtx);
47 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
48 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
49 unsigned int);
50 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
51 rtx, rtx);
52 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
53 machine_mode, rtx, rtx);
54 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
55 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
56 rtx, rtx, rtx, rtx);
58 /* Negate a CONST_INT rtx, truncating (because a conversion from a
59 maximally negative number can overflow). */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
63 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
66 /* Test whether expression, X, is an immediate constant that represents
67 the most significant bit of machine mode MODE. */
69 bool
70 mode_signbit_p (machine_mode mode, const_rtx x)
72 unsigned HOST_WIDE_INT val;
73 unsigned int width;
75 if (GET_MODE_CLASS (mode) != MODE_INT)
76 return false;
78 width = GET_MODE_PRECISION (mode);
79 if (width == 0)
80 return false;
82 if (width <= HOST_BITS_PER_WIDE_INT
83 && CONST_INT_P (x))
84 val = INTVAL (x);
85 #if TARGET_SUPPORTS_WIDE_INT
86 else if (CONST_WIDE_INT_P (x))
88 unsigned int i;
89 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
90 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
91 return false;
92 for (i = 0; i < elts - 1; i++)
93 if (CONST_WIDE_INT_ELT (x, i) != 0)
94 return false;
95 val = CONST_WIDE_INT_ELT (x, elts - 1);
96 width %= HOST_BITS_PER_WIDE_INT;
97 if (width == 0)
98 width = HOST_BITS_PER_WIDE_INT;
100 #else
101 else if (width <= HOST_BITS_PER_DOUBLE_INT
102 && CONST_DOUBLE_AS_INT_P (x)
103 && CONST_DOUBLE_LOW (x) == 0)
105 val = CONST_DOUBLE_HIGH (x);
106 width -= HOST_BITS_PER_WIDE_INT;
108 #endif
109 else
110 /* X is not an integer constant. */
111 return false;
113 if (width < HOST_BITS_PER_WIDE_INT)
114 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
115 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
118 /* Test whether VAL is equal to the most significant bit of mode MODE
119 (after masking with the mode mask of MODE). Returns false if the
120 precision of MODE is too large to handle. */
122 bool
123 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
125 unsigned int width;
127 if (GET_MODE_CLASS (mode) != MODE_INT)
128 return false;
130 width = GET_MODE_PRECISION (mode);
131 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
132 return false;
134 val &= GET_MODE_MASK (mode);
135 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
138 /* Test whether the most significant bit of mode MODE is set in VAL.
139 Returns false if the precision of MODE is too large to handle. */
140 bool
141 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
143 unsigned int width;
145 if (GET_MODE_CLASS (mode) != MODE_INT)
146 return false;
148 width = GET_MODE_PRECISION (mode);
149 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
150 return false;
152 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
153 return val != 0;
156 /* Test whether the most significant bit of mode MODE is clear in VAL.
157 Returns false if the precision of MODE is too large to handle. */
158 bool
159 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
161 unsigned int width;
163 if (GET_MODE_CLASS (mode) != MODE_INT)
164 return false;
166 width = GET_MODE_PRECISION (mode);
167 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
168 return false;
170 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
171 return val == 0;
174 /* Make a binary operation by properly ordering the operands and
175 seeing if the expression folds. */
178 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
179 rtx op1)
181 rtx tem;
183 /* If this simplifies, do it. */
184 tem = simplify_binary_operation (code, mode, op0, op1);
185 if (tem)
186 return tem;
188 /* Put complex operands first and constants second if commutative. */
189 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
190 && swap_commutative_operands_p (op0, op1))
191 std::swap (op0, op1);
193 return gen_rtx_fmt_ee (code, mode, op0, op1);
196 /* If X is a MEM referencing the constant pool, return the real value.
197 Otherwise return X. */
199 avoid_constant_pool_reference (rtx x)
201 rtx c, tmp, addr;
202 machine_mode cmode;
203 HOST_WIDE_INT offset = 0;
205 switch (GET_CODE (x))
207 case MEM:
208 break;
210 case FLOAT_EXTEND:
211 /* Handle float extensions of constant pool references. */
212 tmp = XEXP (x, 0);
213 c = avoid_constant_pool_reference (tmp);
214 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
215 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
216 GET_MODE (x));
217 return x;
219 default:
220 return x;
223 if (GET_MODE (x) == BLKmode)
224 return x;
226 addr = XEXP (x, 0);
228 /* Call target hook to avoid the effects of -fpic etc.... */
229 addr = targetm.delegitimize_address (addr);
231 /* Split the address into a base and integer offset. */
232 if (GET_CODE (addr) == CONST
233 && GET_CODE (XEXP (addr, 0)) == PLUS
234 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
236 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
237 addr = XEXP (XEXP (addr, 0), 0);
240 if (GET_CODE (addr) == LO_SUM)
241 addr = XEXP (addr, 1);
243 /* If this is a constant pool reference, we can turn it into its
244 constant and hope that simplifications happen. */
245 if (GET_CODE (addr) == SYMBOL_REF
246 && CONSTANT_POOL_ADDRESS_P (addr))
248 c = get_pool_constant (addr);
249 cmode = get_pool_mode (addr);
251 /* If we're accessing the constant in a different mode than it was
252 originally stored, attempt to fix that up via subreg simplifications.
253 If that fails we have no choice but to return the original memory. */
254 if ((offset != 0 || cmode != GET_MODE (x))
255 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
257 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
258 if (tem && CONSTANT_P (tem))
259 return tem;
261 else
262 return c;
265 return x;
268 /* Simplify a MEM based on its attributes. This is the default
269 delegitimize_address target hook, and it's recommended that every
270 overrider call it. */
273 delegitimize_mem_from_attrs (rtx x)
275 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
276 use their base addresses as equivalent. */
277 if (MEM_P (x)
278 && MEM_EXPR (x)
279 && MEM_OFFSET_KNOWN_P (x))
281 tree decl = MEM_EXPR (x);
282 machine_mode mode = GET_MODE (x);
283 HOST_WIDE_INT offset = 0;
285 switch (TREE_CODE (decl))
287 default:
288 decl = NULL;
289 break;
291 case VAR_DECL:
292 break;
294 case ARRAY_REF:
295 case ARRAY_RANGE_REF:
296 case COMPONENT_REF:
297 case BIT_FIELD_REF:
298 case REALPART_EXPR:
299 case IMAGPART_EXPR:
300 case VIEW_CONVERT_EXPR:
302 HOST_WIDE_INT bitsize, bitpos;
303 tree toffset;
304 int unsignedp, reversep, volatilep = 0;
306 decl
307 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
308 &unsignedp, &reversep, &volatilep, false);
309 if (bitsize != GET_MODE_BITSIZE (mode)
310 || (bitpos % BITS_PER_UNIT)
311 || (toffset && !tree_fits_shwi_p (toffset)))
312 decl = NULL;
313 else
315 offset += bitpos / BITS_PER_UNIT;
316 if (toffset)
317 offset += tree_to_shwi (toffset);
319 break;
323 if (decl
324 && mode == GET_MODE (x)
325 && TREE_CODE (decl) == VAR_DECL
326 && (TREE_STATIC (decl)
327 || DECL_THREAD_LOCAL_P (decl))
328 && DECL_RTL_SET_P (decl)
329 && MEM_P (DECL_RTL (decl)))
331 rtx newx;
333 offset += MEM_OFFSET (x);
335 newx = DECL_RTL (decl);
337 if (MEM_P (newx))
339 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
341 /* Avoid creating a new MEM needlessly if we already had
342 the same address. We do if there's no OFFSET and the
343 old address X is identical to NEWX, or if X is of the
344 form (plus NEWX OFFSET), or the NEWX is of the form
345 (plus Y (const_int Z)) and X is that with the offset
346 added: (plus Y (const_int Z+OFFSET)). */
347 if (!((offset == 0
348 || (GET_CODE (o) == PLUS
349 && GET_CODE (XEXP (o, 1)) == CONST_INT
350 && (offset == INTVAL (XEXP (o, 1))
351 || (GET_CODE (n) == PLUS
352 && GET_CODE (XEXP (n, 1)) == CONST_INT
353 && (INTVAL (XEXP (n, 1)) + offset
354 == INTVAL (XEXP (o, 1)))
355 && (n = XEXP (n, 0))))
356 && (o = XEXP (o, 0))))
357 && rtx_equal_p (o, n)))
358 x = adjust_address_nv (newx, mode, offset);
360 else if (GET_MODE (x) == GET_MODE (newx)
361 && offset == 0)
362 x = newx;
366 return x;
369 /* Make a unary operation by first seeing if it folds and otherwise making
370 the specified operation. */
373 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
374 machine_mode op_mode)
376 rtx tem;
378 /* If this simplifies, use it. */
379 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
380 return tem;
382 return gen_rtx_fmt_e (code, mode, op);
385 /* Likewise for ternary operations. */
388 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
389 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
391 rtx tem;
393 /* If this simplifies, use it. */
394 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
395 op0, op1, op2)))
396 return tem;
398 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
401 /* Likewise, for relational operations.
402 CMP_MODE specifies mode comparison is done in. */
405 simplify_gen_relational (enum rtx_code code, machine_mode mode,
406 machine_mode cmp_mode, rtx op0, rtx op1)
408 rtx tem;
410 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
411 op0, op1)))
412 return tem;
414 return gen_rtx_fmt_ee (code, mode, op0, op1);
417 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
418 and simplify the result. If FN is non-NULL, call this callback on each
419 X, if it returns non-NULL, replace X with its return value and simplify the
420 result. */
423 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
424 rtx (*fn) (rtx, const_rtx, void *), void *data)
426 enum rtx_code code = GET_CODE (x);
427 machine_mode mode = GET_MODE (x);
428 machine_mode op_mode;
429 const char *fmt;
430 rtx op0, op1, op2, newx, op;
431 rtvec vec, newvec;
432 int i, j;
434 if (__builtin_expect (fn != NULL, 0))
436 newx = fn (x, old_rtx, data);
437 if (newx)
438 return newx;
440 else if (rtx_equal_p (x, old_rtx))
441 return copy_rtx ((rtx) data);
443 switch (GET_RTX_CLASS (code))
445 case RTX_UNARY:
446 op0 = XEXP (x, 0);
447 op_mode = GET_MODE (op0);
448 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
449 if (op0 == XEXP (x, 0))
450 return x;
451 return simplify_gen_unary (code, mode, op0, op_mode);
453 case RTX_BIN_ARITH:
454 case RTX_COMM_ARITH:
455 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
456 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
457 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
458 return x;
459 return simplify_gen_binary (code, mode, op0, op1);
461 case RTX_COMPARE:
462 case RTX_COMM_COMPARE:
463 op0 = XEXP (x, 0);
464 op1 = XEXP (x, 1);
465 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
466 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
467 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
468 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
469 return x;
470 return simplify_gen_relational (code, mode, op_mode, op0, op1);
472 case RTX_TERNARY:
473 case RTX_BITFIELD_OPS:
474 op0 = XEXP (x, 0);
475 op_mode = GET_MODE (op0);
476 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
477 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
478 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
479 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
480 return x;
481 if (op_mode == VOIDmode)
482 op_mode = GET_MODE (op0);
483 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
485 case RTX_EXTRA:
486 if (code == SUBREG)
488 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
489 if (op0 == SUBREG_REG (x))
490 return x;
491 op0 = simplify_gen_subreg (GET_MODE (x), op0,
492 GET_MODE (SUBREG_REG (x)),
493 SUBREG_BYTE (x));
494 return op0 ? op0 : x;
496 break;
498 case RTX_OBJ:
499 if (code == MEM)
501 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
502 if (op0 == XEXP (x, 0))
503 return x;
504 return replace_equiv_address_nv (x, op0);
506 else if (code == LO_SUM)
508 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
509 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
511 /* (lo_sum (high x) y) -> y where x and y have the same base. */
512 if (GET_CODE (op0) == HIGH)
514 rtx base0, base1, offset0, offset1;
515 split_const (XEXP (op0, 0), &base0, &offset0);
516 split_const (op1, &base1, &offset1);
517 if (rtx_equal_p (base0, base1))
518 return op1;
521 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
522 return x;
523 return gen_rtx_LO_SUM (mode, op0, op1);
525 break;
527 default:
528 break;
531 newx = x;
532 fmt = GET_RTX_FORMAT (code);
533 for (i = 0; fmt[i]; i++)
534 switch (fmt[i])
536 case 'E':
537 vec = XVEC (x, i);
538 newvec = XVEC (newx, i);
539 for (j = 0; j < GET_NUM_ELEM (vec); j++)
541 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
542 old_rtx, fn, data);
543 if (op != RTVEC_ELT (vec, j))
545 if (newvec == vec)
547 newvec = shallow_copy_rtvec (vec);
548 if (x == newx)
549 newx = shallow_copy_rtx (x);
550 XVEC (newx, i) = newvec;
552 RTVEC_ELT (newvec, j) = op;
555 break;
557 case 'e':
558 if (XEXP (x, i))
560 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
561 if (op != XEXP (x, i))
563 if (x == newx)
564 newx = shallow_copy_rtx (x);
565 XEXP (newx, i) = op;
568 break;
570 return newx;
573 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
574 resulting RTX. Return a new RTX which is as simplified as possible. */
577 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
579 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
582 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
583 Only handle cases where the truncated value is inherently an rvalue.
585 RTL provides two ways of truncating a value:
587 1. a lowpart subreg. This form is only a truncation when both
588 the outer and inner modes (here MODE and OP_MODE respectively)
589 are scalar integers, and only then when the subreg is used as
590 an rvalue.
592 It is only valid to form such truncating subregs if the
593 truncation requires no action by the target. The onus for
594 proving this is on the creator of the subreg -- e.g. the
595 caller to simplify_subreg or simplify_gen_subreg -- and typically
596 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
598 2. a TRUNCATE. This form handles both scalar and compound integers.
600 The first form is preferred where valid. However, the TRUNCATE
601 handling in simplify_unary_operation turns the second form into the
602 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
603 so it is generally safe to form rvalue truncations using:
605 simplify_gen_unary (TRUNCATE, ...)
607 and leave simplify_unary_operation to work out which representation
608 should be used.
610 Because of the proof requirements on (1), simplify_truncation must
611 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
612 regardless of whether the outer truncation came from a SUBREG or a
613 TRUNCATE. For example, if the caller has proven that an SImode
614 truncation of:
616 (and:DI X Y)
618 is a no-op and can be represented as a subreg, it does not follow
619 that SImode truncations of X and Y are also no-ops. On a target
620 like 64-bit MIPS that requires SImode values to be stored in
621 sign-extended form, an SImode truncation of:
623 (and:DI (reg:DI X) (const_int 63))
625 is trivially a no-op because only the lower 6 bits can be set.
626 However, X is still an arbitrary 64-bit number and so we cannot
627 assume that truncating it too is a no-op. */
629 static rtx
630 simplify_truncation (machine_mode mode, rtx op,
631 machine_mode op_mode)
633 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
634 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
635 gcc_assert (precision <= op_precision);
637 /* Optimize truncations of zero and sign extended values. */
638 if (GET_CODE (op) == ZERO_EXTEND
639 || GET_CODE (op) == SIGN_EXTEND)
641 /* There are three possibilities. If MODE is the same as the
642 origmode, we can omit both the extension and the subreg.
643 If MODE is not larger than the origmode, we can apply the
644 truncation without the extension. Finally, if the outermode
645 is larger than the origmode, we can just extend to the appropriate
646 mode. */
647 machine_mode origmode = GET_MODE (XEXP (op, 0));
648 if (mode == origmode)
649 return XEXP (op, 0);
650 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
651 return simplify_gen_unary (TRUNCATE, mode,
652 XEXP (op, 0), origmode);
653 else
654 return simplify_gen_unary (GET_CODE (op), mode,
655 XEXP (op, 0), origmode);
658 /* If the machine can perform operations in the truncated mode, distribute
659 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
660 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
661 if (1
662 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
663 && (GET_CODE (op) == PLUS
664 || GET_CODE (op) == MINUS
665 || GET_CODE (op) == MULT))
667 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
668 if (op0)
670 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
671 if (op1)
672 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
676 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
677 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
678 the outer subreg is effectively a truncation to the original mode. */
679 if ((GET_CODE (op) == LSHIFTRT
680 || GET_CODE (op) == ASHIFTRT)
681 /* Ensure that OP_MODE is at least twice as wide as MODE
682 to avoid the possibility that an outer LSHIFTRT shifts by more
683 than the sign extension's sign_bit_copies and introduces zeros
684 into the high bits of the result. */
685 && 2 * precision <= op_precision
686 && CONST_INT_P (XEXP (op, 1))
687 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
688 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
689 && UINTVAL (XEXP (op, 1)) < precision)
690 return simplify_gen_binary (ASHIFTRT, mode,
691 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
693 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
694 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
695 the outer subreg is effectively a truncation to the original mode. */
696 if ((GET_CODE (op) == LSHIFTRT
697 || GET_CODE (op) == ASHIFTRT)
698 && CONST_INT_P (XEXP (op, 1))
699 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
700 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
701 && UINTVAL (XEXP (op, 1)) < precision)
702 return simplify_gen_binary (LSHIFTRT, mode,
703 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
705 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
706 to (ashift:QI (x:QI) C), where C is a suitable small constant and
707 the outer subreg is effectively a truncation to the original mode. */
708 if (GET_CODE (op) == ASHIFT
709 && CONST_INT_P (XEXP (op, 1))
710 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
711 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
712 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
713 && UINTVAL (XEXP (op, 1)) < precision)
714 return simplify_gen_binary (ASHIFT, mode,
715 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
717 /* Recognize a word extraction from a multi-word subreg. */
718 if ((GET_CODE (op) == LSHIFTRT
719 || GET_CODE (op) == ASHIFTRT)
720 && SCALAR_INT_MODE_P (mode)
721 && SCALAR_INT_MODE_P (op_mode)
722 && precision >= BITS_PER_WORD
723 && 2 * precision <= op_precision
724 && CONST_INT_P (XEXP (op, 1))
725 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
726 && UINTVAL (XEXP (op, 1)) < op_precision)
728 int byte = subreg_lowpart_offset (mode, op_mode);
729 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
730 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
731 (WORDS_BIG_ENDIAN
732 ? byte - shifted_bytes
733 : byte + shifted_bytes));
736 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
737 and try replacing the TRUNCATE and shift with it. Don't do this
738 if the MEM has a mode-dependent address. */
739 if ((GET_CODE (op) == LSHIFTRT
740 || GET_CODE (op) == ASHIFTRT)
741 && SCALAR_INT_MODE_P (op_mode)
742 && MEM_P (XEXP (op, 0))
743 && CONST_INT_P (XEXP (op, 1))
744 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
745 && INTVAL (XEXP (op, 1)) > 0
746 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
747 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
748 MEM_ADDR_SPACE (XEXP (op, 0)))
749 && ! MEM_VOLATILE_P (XEXP (op, 0))
750 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
751 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
753 int byte = subreg_lowpart_offset (mode, op_mode);
754 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
755 return adjust_address_nv (XEXP (op, 0), mode,
756 (WORDS_BIG_ENDIAN
757 ? byte - shifted_bytes
758 : byte + shifted_bytes));
761 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
762 (OP:SI foo:SI) if OP is NEG or ABS. */
763 if ((GET_CODE (op) == ABS
764 || GET_CODE (op) == NEG)
765 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
766 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
767 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
768 return simplify_gen_unary (GET_CODE (op), mode,
769 XEXP (XEXP (op, 0), 0), mode);
771 /* (truncate:A (subreg:B (truncate:C X) 0)) is
772 (truncate:A X). */
773 if (GET_CODE (op) == SUBREG
774 && SCALAR_INT_MODE_P (mode)
775 && SCALAR_INT_MODE_P (op_mode)
776 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
777 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
778 && subreg_lowpart_p (op))
780 rtx inner = XEXP (SUBREG_REG (op), 0);
781 if (GET_MODE_PRECISION (mode)
782 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
783 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
784 else
785 /* If subreg above is paradoxical and C is narrower
786 than A, return (subreg:A (truncate:C X) 0). */
787 return simplify_gen_subreg (mode, SUBREG_REG (op),
788 GET_MODE (SUBREG_REG (op)), 0);
791 /* (truncate:A (truncate:B X)) is (truncate:A X). */
792 if (GET_CODE (op) == TRUNCATE)
793 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
794 GET_MODE (XEXP (op, 0)));
796 return NULL_RTX;
799 /* Try to simplify a unary operation CODE whose output mode is to be
800 MODE with input operand OP whose mode was originally OP_MODE.
801 Return zero if no simplification can be made. */
803 simplify_unary_operation (enum rtx_code code, machine_mode mode,
804 rtx op, machine_mode op_mode)
806 rtx trueop, tem;
808 trueop = avoid_constant_pool_reference (op);
810 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
811 if (tem)
812 return tem;
814 return simplify_unary_operation_1 (code, mode, op);
817 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
818 to be exact. */
820 static bool
821 exact_int_to_float_conversion_p (const_rtx op)
823 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
824 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
825 /* Constants shouldn't reach here. */
826 gcc_assert (op0_mode != VOIDmode);
827 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
828 int in_bits = in_prec;
829 if (HWI_COMPUTABLE_MODE_P (op0_mode))
831 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
832 if (GET_CODE (op) == FLOAT)
833 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
834 else if (GET_CODE (op) == UNSIGNED_FLOAT)
835 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
836 else
837 gcc_unreachable ();
838 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
840 return in_bits <= out_bits;
843 /* Perform some simplifications we can do even if the operands
844 aren't constant. */
845 static rtx
846 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
848 enum rtx_code reversed;
849 rtx temp;
851 switch (code)
853 case NOT:
854 /* (not (not X)) == X. */
855 if (GET_CODE (op) == NOT)
856 return XEXP (op, 0);
858 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
859 comparison is all ones. */
860 if (COMPARISON_P (op)
861 && (mode == BImode || STORE_FLAG_VALUE == -1)
862 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
863 return simplify_gen_relational (reversed, mode, VOIDmode,
864 XEXP (op, 0), XEXP (op, 1));
866 /* (not (plus X -1)) can become (neg X). */
867 if (GET_CODE (op) == PLUS
868 && XEXP (op, 1) == constm1_rtx)
869 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
871 /* Similarly, (not (neg X)) is (plus X -1). */
872 if (GET_CODE (op) == NEG)
873 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
874 CONSTM1_RTX (mode));
876 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
877 if (GET_CODE (op) == XOR
878 && CONST_INT_P (XEXP (op, 1))
879 && (temp = simplify_unary_operation (NOT, mode,
880 XEXP (op, 1), mode)) != 0)
881 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
883 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
884 if (GET_CODE (op) == PLUS
885 && CONST_INT_P (XEXP (op, 1))
886 && mode_signbit_p (mode, XEXP (op, 1))
887 && (temp = simplify_unary_operation (NOT, mode,
888 XEXP (op, 1), mode)) != 0)
889 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
892 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
893 operands other than 1, but that is not valid. We could do a
894 similar simplification for (not (lshiftrt C X)) where C is
895 just the sign bit, but this doesn't seem common enough to
896 bother with. */
897 if (GET_CODE (op) == ASHIFT
898 && XEXP (op, 0) == const1_rtx)
900 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
901 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
904 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
905 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
906 so we can perform the above simplification. */
907 if (STORE_FLAG_VALUE == -1
908 && GET_CODE (op) == ASHIFTRT
909 && CONST_INT_P (XEXP (op, 1))
910 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
911 return simplify_gen_relational (GE, mode, VOIDmode,
912 XEXP (op, 0), const0_rtx);
915 if (GET_CODE (op) == SUBREG
916 && subreg_lowpart_p (op)
917 && (GET_MODE_SIZE (GET_MODE (op))
918 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
919 && GET_CODE (SUBREG_REG (op)) == ASHIFT
920 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
922 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
923 rtx x;
925 x = gen_rtx_ROTATE (inner_mode,
926 simplify_gen_unary (NOT, inner_mode, const1_rtx,
927 inner_mode),
928 XEXP (SUBREG_REG (op), 1));
929 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
930 if (temp)
931 return temp;
934 /* Apply De Morgan's laws to reduce number of patterns for machines
935 with negating logical insns (and-not, nand, etc.). If result has
936 only one NOT, put it first, since that is how the patterns are
937 coded. */
938 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
940 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
941 machine_mode op_mode;
943 op_mode = GET_MODE (in1);
944 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
946 op_mode = GET_MODE (in2);
947 if (op_mode == VOIDmode)
948 op_mode = mode;
949 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
951 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
952 std::swap (in1, in2);
954 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
955 mode, in1, in2);
958 /* (not (bswap x)) -> (bswap (not x)). */
959 if (GET_CODE (op) == BSWAP)
961 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
962 return simplify_gen_unary (BSWAP, mode, x, mode);
964 break;
966 case NEG:
967 /* (neg (neg X)) == X. */
968 if (GET_CODE (op) == NEG)
969 return XEXP (op, 0);
971 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
972 If comparison is not reversible use
973 x ? y : (neg y). */
974 if (GET_CODE (op) == IF_THEN_ELSE)
976 rtx cond = XEXP (op, 0);
977 rtx true_rtx = XEXP (op, 1);
978 rtx false_rtx = XEXP (op, 2);
980 if ((GET_CODE (true_rtx) == NEG
981 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
982 || (GET_CODE (false_rtx) == NEG
983 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
985 if (reversed_comparison_code (cond, NULL_RTX) != UNKNOWN)
986 temp = reversed_comparison (cond, mode);
987 else
989 temp = cond;
990 std::swap (true_rtx, false_rtx);
992 return simplify_gen_ternary (IF_THEN_ELSE, mode,
993 mode, temp, true_rtx, false_rtx);
997 /* (neg (plus X 1)) can become (not X). */
998 if (GET_CODE (op) == PLUS
999 && XEXP (op, 1) == const1_rtx)
1000 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1002 /* Similarly, (neg (not X)) is (plus X 1). */
1003 if (GET_CODE (op) == NOT)
1004 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1005 CONST1_RTX (mode));
1007 /* (neg (minus X Y)) can become (minus Y X). This transformation
1008 isn't safe for modes with signed zeros, since if X and Y are
1009 both +0, (minus Y X) is the same as (minus X Y). If the
1010 rounding mode is towards +infinity (or -infinity) then the two
1011 expressions will be rounded differently. */
1012 if (GET_CODE (op) == MINUS
1013 && !HONOR_SIGNED_ZEROS (mode)
1014 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1015 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1017 if (GET_CODE (op) == PLUS
1018 && !HONOR_SIGNED_ZEROS (mode)
1019 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1021 /* (neg (plus A C)) is simplified to (minus -C A). */
1022 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1023 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1025 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1026 if (temp)
1027 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1030 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1031 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1032 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1035 /* (neg (mult A B)) becomes (mult A (neg B)).
1036 This works even for floating-point values. */
1037 if (GET_CODE (op) == MULT
1038 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1040 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1041 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1044 /* NEG commutes with ASHIFT since it is multiplication. Only do
1045 this if we can then eliminate the NEG (e.g., if the operand
1046 is a constant). */
1047 if (GET_CODE (op) == ASHIFT)
1049 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1050 if (temp)
1051 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1054 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1055 C is equal to the width of MODE minus 1. */
1056 if (GET_CODE (op) == ASHIFTRT
1057 && CONST_INT_P (XEXP (op, 1))
1058 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1059 return simplify_gen_binary (LSHIFTRT, mode,
1060 XEXP (op, 0), XEXP (op, 1));
1062 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1063 C is equal to the width of MODE minus 1. */
1064 if (GET_CODE (op) == LSHIFTRT
1065 && CONST_INT_P (XEXP (op, 1))
1066 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1067 return simplify_gen_binary (ASHIFTRT, mode,
1068 XEXP (op, 0), XEXP (op, 1));
1070 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1071 if (GET_CODE (op) == XOR
1072 && XEXP (op, 1) == const1_rtx
1073 && nonzero_bits (XEXP (op, 0), mode) == 1)
1074 return plus_constant (mode, XEXP (op, 0), -1);
1076 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1077 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1078 if (GET_CODE (op) == LT
1079 && XEXP (op, 1) == const0_rtx
1080 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1082 machine_mode inner = GET_MODE (XEXP (op, 0));
1083 int isize = GET_MODE_PRECISION (inner);
1084 if (STORE_FLAG_VALUE == 1)
1086 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1087 GEN_INT (isize - 1));
1088 if (mode == inner)
1089 return temp;
1090 if (GET_MODE_PRECISION (mode) > isize)
1091 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1092 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1094 else if (STORE_FLAG_VALUE == -1)
1096 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1097 GEN_INT (isize - 1));
1098 if (mode == inner)
1099 return temp;
1100 if (GET_MODE_PRECISION (mode) > isize)
1101 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1102 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1105 break;
1107 case TRUNCATE:
1108 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1109 with the umulXi3_highpart patterns. */
1110 if (GET_CODE (op) == LSHIFTRT
1111 && GET_CODE (XEXP (op, 0)) == MULT)
1112 break;
1114 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1116 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1118 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1119 if (temp)
1120 return temp;
1122 /* We can't handle truncation to a partial integer mode here
1123 because we don't know the real bitsize of the partial
1124 integer mode. */
1125 break;
1128 if (GET_MODE (op) != VOIDmode)
1130 temp = simplify_truncation (mode, op, GET_MODE (op));
1131 if (temp)
1132 return temp;
1135 /* If we know that the value is already truncated, we can
1136 replace the TRUNCATE with a SUBREG. */
1137 if (GET_MODE_NUNITS (mode) == 1
1138 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1139 || truncated_to_mode (mode, op)))
1141 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1142 if (temp)
1143 return temp;
1146 /* A truncate of a comparison can be replaced with a subreg if
1147 STORE_FLAG_VALUE permits. This is like the previous test,
1148 but it works even if the comparison is done in a mode larger
1149 than HOST_BITS_PER_WIDE_INT. */
1150 if (HWI_COMPUTABLE_MODE_P (mode)
1151 && COMPARISON_P (op)
1152 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1154 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1155 if (temp)
1156 return temp;
1159 /* A truncate of a memory is just loading the low part of the memory
1160 if we are not changing the meaning of the address. */
1161 if (GET_CODE (op) == MEM
1162 && !VECTOR_MODE_P (mode)
1163 && !MEM_VOLATILE_P (op)
1164 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1166 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1167 if (temp)
1168 return temp;
1171 break;
1173 case FLOAT_TRUNCATE:
1174 if (DECIMAL_FLOAT_MODE_P (mode))
1175 break;
1177 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1178 if (GET_CODE (op) == FLOAT_EXTEND
1179 && GET_MODE (XEXP (op, 0)) == mode)
1180 return XEXP (op, 0);
1182 /* (float_truncate:SF (float_truncate:DF foo:XF))
1183 = (float_truncate:SF foo:XF).
1184 This may eliminate double rounding, so it is unsafe.
1186 (float_truncate:SF (float_extend:XF foo:DF))
1187 = (float_truncate:SF foo:DF).
1189 (float_truncate:DF (float_extend:XF foo:SF))
1190 = (float_extend:DF foo:SF). */
1191 if ((GET_CODE (op) == FLOAT_TRUNCATE
1192 && flag_unsafe_math_optimizations)
1193 || GET_CODE (op) == FLOAT_EXTEND)
1194 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1195 0)))
1196 > GET_MODE_SIZE (mode)
1197 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1198 mode,
1199 XEXP (op, 0), mode);
1201 /* (float_truncate (float x)) is (float x) */
1202 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1203 && (flag_unsafe_math_optimizations
1204 || exact_int_to_float_conversion_p (op)))
1205 return simplify_gen_unary (GET_CODE (op), mode,
1206 XEXP (op, 0),
1207 GET_MODE (XEXP (op, 0)));
1209 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1210 (OP:SF foo:SF) if OP is NEG or ABS. */
1211 if ((GET_CODE (op) == ABS
1212 || GET_CODE (op) == NEG)
1213 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1214 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1215 return simplify_gen_unary (GET_CODE (op), mode,
1216 XEXP (XEXP (op, 0), 0), mode);
1218 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1219 is (float_truncate:SF x). */
1220 if (GET_CODE (op) == SUBREG
1221 && subreg_lowpart_p (op)
1222 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1223 return SUBREG_REG (op);
1224 break;
1226 case FLOAT_EXTEND:
1227 if (DECIMAL_FLOAT_MODE_P (mode))
1228 break;
1230 /* (float_extend (float_extend x)) is (float_extend x)
1232 (float_extend (float x)) is (float x) assuming that double
1233 rounding can't happen.
1235 if (GET_CODE (op) == FLOAT_EXTEND
1236 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1237 && exact_int_to_float_conversion_p (op)))
1238 return simplify_gen_unary (GET_CODE (op), mode,
1239 XEXP (op, 0),
1240 GET_MODE (XEXP (op, 0)));
1242 break;
1244 case ABS:
1245 /* (abs (neg <foo>)) -> (abs <foo>) */
1246 if (GET_CODE (op) == NEG)
1247 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1248 GET_MODE (XEXP (op, 0)));
1250 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1251 do nothing. */
1252 if (GET_MODE (op) == VOIDmode)
1253 break;
1255 /* If operand is something known to be positive, ignore the ABS. */
1256 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1257 || val_signbit_known_clear_p (GET_MODE (op),
1258 nonzero_bits (op, GET_MODE (op))))
1259 return op;
1261 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1262 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1263 return gen_rtx_NEG (mode, op);
1265 break;
1267 case FFS:
1268 /* (ffs (*_extend <X>)) = (ffs <X>) */
1269 if (GET_CODE (op) == SIGN_EXTEND
1270 || GET_CODE (op) == ZERO_EXTEND)
1271 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1272 GET_MODE (XEXP (op, 0)));
1273 break;
1275 case POPCOUNT:
1276 switch (GET_CODE (op))
1278 case BSWAP:
1279 case ZERO_EXTEND:
1280 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1281 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1282 GET_MODE (XEXP (op, 0)));
1284 case ROTATE:
1285 case ROTATERT:
1286 /* Rotations don't affect popcount. */
1287 if (!side_effects_p (XEXP (op, 1)))
1288 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1289 GET_MODE (XEXP (op, 0)));
1290 break;
1292 default:
1293 break;
1295 break;
1297 case PARITY:
1298 switch (GET_CODE (op))
1300 case NOT:
1301 case BSWAP:
1302 case ZERO_EXTEND:
1303 case SIGN_EXTEND:
1304 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1305 GET_MODE (XEXP (op, 0)));
1307 case ROTATE:
1308 case ROTATERT:
1309 /* Rotations don't affect parity. */
1310 if (!side_effects_p (XEXP (op, 1)))
1311 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1312 GET_MODE (XEXP (op, 0)));
1313 break;
1315 default:
1316 break;
1318 break;
1320 case BSWAP:
1321 /* (bswap (bswap x)) -> x. */
1322 if (GET_CODE (op) == BSWAP)
1323 return XEXP (op, 0);
1324 break;
1326 case FLOAT:
1327 /* (float (sign_extend <X>)) = (float <X>). */
1328 if (GET_CODE (op) == SIGN_EXTEND)
1329 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1330 GET_MODE (XEXP (op, 0)));
1331 break;
1333 case SIGN_EXTEND:
1334 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1335 becomes just the MINUS if its mode is MODE. This allows
1336 folding switch statements on machines using casesi (such as
1337 the VAX). */
1338 if (GET_CODE (op) == TRUNCATE
1339 && GET_MODE (XEXP (op, 0)) == mode
1340 && GET_CODE (XEXP (op, 0)) == MINUS
1341 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1342 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1343 return XEXP (op, 0);
1345 /* Extending a widening multiplication should be canonicalized to
1346 a wider widening multiplication. */
1347 if (GET_CODE (op) == MULT)
1349 rtx lhs = XEXP (op, 0);
1350 rtx rhs = XEXP (op, 1);
1351 enum rtx_code lcode = GET_CODE (lhs);
1352 enum rtx_code rcode = GET_CODE (rhs);
1354 /* Widening multiplies usually extend both operands, but sometimes
1355 they use a shift to extract a portion of a register. */
1356 if ((lcode == SIGN_EXTEND
1357 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1358 && (rcode == SIGN_EXTEND
1359 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1361 machine_mode lmode = GET_MODE (lhs);
1362 machine_mode rmode = GET_MODE (rhs);
1363 int bits;
1365 if (lcode == ASHIFTRT)
1366 /* Number of bits not shifted off the end. */
1367 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1368 else /* lcode == SIGN_EXTEND */
1369 /* Size of inner mode. */
1370 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1372 if (rcode == ASHIFTRT)
1373 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1374 else /* rcode == SIGN_EXTEND */
1375 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1377 /* We can only widen multiplies if the result is mathematiclly
1378 equivalent. I.e. if overflow was impossible. */
1379 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1380 return simplify_gen_binary
1381 (MULT, mode,
1382 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1383 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1387 /* Check for a sign extension of a subreg of a promoted
1388 variable, where the promotion is sign-extended, and the
1389 target mode is the same as the variable's promotion. */
1390 if (GET_CODE (op) == SUBREG
1391 && SUBREG_PROMOTED_VAR_P (op)
1392 && SUBREG_PROMOTED_SIGNED_P (op)
1393 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1395 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1396 if (temp)
1397 return temp;
1400 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1401 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1402 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1404 gcc_assert (GET_MODE_PRECISION (mode)
1405 > GET_MODE_PRECISION (GET_MODE (op)));
1406 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1407 GET_MODE (XEXP (op, 0)));
1410 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1411 is (sign_extend:M (subreg:O <X>)) if there is mode with
1412 GET_MODE_BITSIZE (N) - I bits.
1413 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1414 is similarly (zero_extend:M (subreg:O <X>)). */
1415 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1416 && GET_CODE (XEXP (op, 0)) == ASHIFT
1417 && CONST_INT_P (XEXP (op, 1))
1418 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1419 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1421 machine_mode tmode
1422 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1423 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1424 gcc_assert (GET_MODE_BITSIZE (mode)
1425 > GET_MODE_BITSIZE (GET_MODE (op)));
1426 if (tmode != BLKmode)
1428 rtx inner =
1429 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1430 if (inner)
1431 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1432 ? SIGN_EXTEND : ZERO_EXTEND,
1433 mode, inner, tmode);
1437 #if defined(POINTERS_EXTEND_UNSIGNED)
1438 /* As we do not know which address space the pointer is referring to,
1439 we can do this only if the target does not support different pointer
1440 or address modes depending on the address space. */
1441 if (target_default_pointer_address_modes_p ()
1442 && ! POINTERS_EXTEND_UNSIGNED
1443 && mode == Pmode && GET_MODE (op) == ptr_mode
1444 && (CONSTANT_P (op)
1445 || (GET_CODE (op) == SUBREG
1446 && REG_P (SUBREG_REG (op))
1447 && REG_POINTER (SUBREG_REG (op))
1448 && GET_MODE (SUBREG_REG (op)) == Pmode))
1449 && !targetm.have_ptr_extend ())
1450 return convert_memory_address (Pmode, op);
1451 #endif
1452 break;
1454 case ZERO_EXTEND:
1455 /* Check for a zero extension of a subreg of a promoted
1456 variable, where the promotion is zero-extended, and the
1457 target mode is the same as the variable's promotion. */
1458 if (GET_CODE (op) == SUBREG
1459 && SUBREG_PROMOTED_VAR_P (op)
1460 && SUBREG_PROMOTED_UNSIGNED_P (op)
1461 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1463 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1464 if (temp)
1465 return temp;
1468 /* Extending a widening multiplication should be canonicalized to
1469 a wider widening multiplication. */
1470 if (GET_CODE (op) == MULT)
1472 rtx lhs = XEXP (op, 0);
1473 rtx rhs = XEXP (op, 1);
1474 enum rtx_code lcode = GET_CODE (lhs);
1475 enum rtx_code rcode = GET_CODE (rhs);
1477 /* Widening multiplies usually extend both operands, but sometimes
1478 they use a shift to extract a portion of a register. */
1479 if ((lcode == ZERO_EXTEND
1480 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1481 && (rcode == ZERO_EXTEND
1482 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1484 machine_mode lmode = GET_MODE (lhs);
1485 machine_mode rmode = GET_MODE (rhs);
1486 int bits;
1488 if (lcode == LSHIFTRT)
1489 /* Number of bits not shifted off the end. */
1490 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1491 else /* lcode == ZERO_EXTEND */
1492 /* Size of inner mode. */
1493 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1495 if (rcode == LSHIFTRT)
1496 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1497 else /* rcode == ZERO_EXTEND */
1498 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1500 /* We can only widen multiplies if the result is mathematiclly
1501 equivalent. I.e. if overflow was impossible. */
1502 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1503 return simplify_gen_binary
1504 (MULT, mode,
1505 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1506 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1510 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1511 if (GET_CODE (op) == ZERO_EXTEND)
1512 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1513 GET_MODE (XEXP (op, 0)));
1515 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1516 is (zero_extend:M (subreg:O <X>)) if there is mode with
1517 GET_MODE_PRECISION (N) - I bits. */
1518 if (GET_CODE (op) == LSHIFTRT
1519 && GET_CODE (XEXP (op, 0)) == ASHIFT
1520 && CONST_INT_P (XEXP (op, 1))
1521 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1522 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1524 machine_mode tmode
1525 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1526 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1527 if (tmode != BLKmode)
1529 rtx inner =
1530 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1531 if (inner)
1532 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1536 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1537 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1538 of mode N. E.g.
1539 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1540 (and:SI (reg:SI) (const_int 63)). */
1541 if (GET_CODE (op) == SUBREG
1542 && GET_MODE_PRECISION (GET_MODE (op))
1543 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1544 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1545 <= HOST_BITS_PER_WIDE_INT
1546 && GET_MODE_PRECISION (mode)
1547 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1548 && subreg_lowpart_p (op)
1549 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1550 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1552 if (GET_MODE_PRECISION (mode)
1553 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1554 return SUBREG_REG (op);
1555 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1556 GET_MODE (SUBREG_REG (op)));
1559 #if defined(POINTERS_EXTEND_UNSIGNED)
1560 /* As we do not know which address space the pointer is referring to,
1561 we can do this only if the target does not support different pointer
1562 or address modes depending on the address space. */
1563 if (target_default_pointer_address_modes_p ()
1564 && POINTERS_EXTEND_UNSIGNED > 0
1565 && mode == Pmode && GET_MODE (op) == ptr_mode
1566 && (CONSTANT_P (op)
1567 || (GET_CODE (op) == SUBREG
1568 && REG_P (SUBREG_REG (op))
1569 && REG_POINTER (SUBREG_REG (op))
1570 && GET_MODE (SUBREG_REG (op)) == Pmode))
1571 && !targetm.have_ptr_extend ())
1572 return convert_memory_address (Pmode, op);
1573 #endif
1574 break;
1576 default:
1577 break;
1580 return 0;
1583 /* Try to compute the value of a unary operation CODE whose output mode is to
1584 be MODE with input operand OP whose mode was originally OP_MODE.
1585 Return zero if the value cannot be computed. */
1587 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1588 rtx op, machine_mode op_mode)
1590 unsigned int width = GET_MODE_PRECISION (mode);
1592 if (code == VEC_DUPLICATE)
1594 gcc_assert (VECTOR_MODE_P (mode));
1595 if (GET_MODE (op) != VOIDmode)
1597 if (!VECTOR_MODE_P (GET_MODE (op)))
1598 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1599 else
1600 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1601 (GET_MODE (op)));
1603 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1604 || GET_CODE (op) == CONST_VECTOR)
1606 int elt_size = GET_MODE_UNIT_SIZE (mode);
1607 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1608 rtvec v = rtvec_alloc (n_elts);
1609 unsigned int i;
1611 if (GET_CODE (op) != CONST_VECTOR)
1612 for (i = 0; i < n_elts; i++)
1613 RTVEC_ELT (v, i) = op;
1614 else
1616 machine_mode inmode = GET_MODE (op);
1617 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1618 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1620 gcc_assert (in_n_elts < n_elts);
1621 gcc_assert ((n_elts % in_n_elts) == 0);
1622 for (i = 0; i < n_elts; i++)
1623 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1625 return gen_rtx_CONST_VECTOR (mode, v);
1629 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1631 int elt_size = GET_MODE_UNIT_SIZE (mode);
1632 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1633 machine_mode opmode = GET_MODE (op);
1634 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1635 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1636 rtvec v = rtvec_alloc (n_elts);
1637 unsigned int i;
1639 gcc_assert (op_n_elts == n_elts);
1640 for (i = 0; i < n_elts; i++)
1642 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1643 CONST_VECTOR_ELT (op, i),
1644 GET_MODE_INNER (opmode));
1645 if (!x)
1646 return 0;
1647 RTVEC_ELT (v, i) = x;
1649 return gen_rtx_CONST_VECTOR (mode, v);
1652 /* The order of these tests is critical so that, for example, we don't
1653 check the wrong mode (input vs. output) for a conversion operation,
1654 such as FIX. At some point, this should be simplified. */
1656 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1658 REAL_VALUE_TYPE d;
1660 if (op_mode == VOIDmode)
1662 /* CONST_INT have VOIDmode as the mode. We assume that all
1663 the bits of the constant are significant, though, this is
1664 a dangerous assumption as many times CONST_INTs are
1665 created and used with garbage in the bits outside of the
1666 precision of the implied mode of the const_int. */
1667 op_mode = MAX_MODE_INT;
1670 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1671 d = real_value_truncate (mode, d);
1672 return const_double_from_real_value (d, mode);
1674 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1676 REAL_VALUE_TYPE d;
1678 if (op_mode == VOIDmode)
1680 /* CONST_INT have VOIDmode as the mode. We assume that all
1681 the bits of the constant are significant, though, this is
1682 a dangerous assumption as many times CONST_INTs are
1683 created and used with garbage in the bits outside of the
1684 precision of the implied mode of the const_int. */
1685 op_mode = MAX_MODE_INT;
1688 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1689 d = real_value_truncate (mode, d);
1690 return const_double_from_real_value (d, mode);
1693 if (CONST_SCALAR_INT_P (op) && width > 0)
1695 wide_int result;
1696 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1697 rtx_mode_t op0 = std::make_pair (op, imode);
1698 int int_value;
1700 #if TARGET_SUPPORTS_WIDE_INT == 0
1701 /* This assert keeps the simplification from producing a result
1702 that cannot be represented in a CONST_DOUBLE but a lot of
1703 upstream callers expect that this function never fails to
1704 simplify something and so you if you added this to the test
1705 above the code would die later anyway. If this assert
1706 happens, you just need to make the port support wide int. */
1707 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1708 #endif
1710 switch (code)
1712 case NOT:
1713 result = wi::bit_not (op0);
1714 break;
1716 case NEG:
1717 result = wi::neg (op0);
1718 break;
1720 case ABS:
1721 result = wi::abs (op0);
1722 break;
1724 case FFS:
1725 result = wi::shwi (wi::ffs (op0), mode);
1726 break;
1728 case CLZ:
1729 if (wi::ne_p (op0, 0))
1730 int_value = wi::clz (op0);
1731 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1732 int_value = GET_MODE_PRECISION (mode);
1733 result = wi::shwi (int_value, mode);
1734 break;
1736 case CLRSB:
1737 result = wi::shwi (wi::clrsb (op0), mode);
1738 break;
1740 case CTZ:
1741 if (wi::ne_p (op0, 0))
1742 int_value = wi::ctz (op0);
1743 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1744 int_value = GET_MODE_PRECISION (mode);
1745 result = wi::shwi (int_value, mode);
1746 break;
1748 case POPCOUNT:
1749 result = wi::shwi (wi::popcount (op0), mode);
1750 break;
1752 case PARITY:
1753 result = wi::shwi (wi::parity (op0), mode);
1754 break;
1756 case BSWAP:
1757 result = wide_int (op0).bswap ();
1758 break;
1760 case TRUNCATE:
1761 case ZERO_EXTEND:
1762 result = wide_int::from (op0, width, UNSIGNED);
1763 break;
1765 case SIGN_EXTEND:
1766 result = wide_int::from (op0, width, SIGNED);
1767 break;
1769 case SQRT:
1770 default:
1771 return 0;
1774 return immed_wide_int_const (result, mode);
1777 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1778 && SCALAR_FLOAT_MODE_P (mode)
1779 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1781 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1782 switch (code)
1784 case SQRT:
1785 return 0;
1786 case ABS:
1787 d = real_value_abs (&d);
1788 break;
1789 case NEG:
1790 d = real_value_negate (&d);
1791 break;
1792 case FLOAT_TRUNCATE:
1793 d = real_value_truncate (mode, d);
1794 break;
1795 case FLOAT_EXTEND:
1796 /* All this does is change the mode, unless changing
1797 mode class. */
1798 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1799 real_convert (&d, mode, &d);
1800 break;
1801 case FIX:
1802 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1803 break;
1804 case NOT:
1806 long tmp[4];
1807 int i;
1809 real_to_target (tmp, &d, GET_MODE (op));
1810 for (i = 0; i < 4; i++)
1811 tmp[i] = ~tmp[i];
1812 real_from_target (&d, tmp, mode);
1813 break;
1815 default:
1816 gcc_unreachable ();
1818 return const_double_from_real_value (d, mode);
1820 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1821 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1822 && GET_MODE_CLASS (mode) == MODE_INT
1823 && width > 0)
1825 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1826 operators are intentionally left unspecified (to ease implementation
1827 by target backends), for consistency, this routine implements the
1828 same semantics for constant folding as used by the middle-end. */
1830 /* This was formerly used only for non-IEEE float.
1831 eggert@twinsun.com says it is safe for IEEE also. */
1832 REAL_VALUE_TYPE t;
1833 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1834 wide_int wmax, wmin;
1835 /* This is part of the abi to real_to_integer, but we check
1836 things before making this call. */
1837 bool fail;
1839 switch (code)
1841 case FIX:
1842 if (REAL_VALUE_ISNAN (*x))
1843 return const0_rtx;
1845 /* Test against the signed upper bound. */
1846 wmax = wi::max_value (width, SIGNED);
1847 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1848 if (real_less (&t, x))
1849 return immed_wide_int_const (wmax, mode);
1851 /* Test against the signed lower bound. */
1852 wmin = wi::min_value (width, SIGNED);
1853 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1854 if (real_less (x, &t))
1855 return immed_wide_int_const (wmin, mode);
1857 return immed_wide_int_const (real_to_integer (x, &fail, width),
1858 mode);
1860 case UNSIGNED_FIX:
1861 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1862 return const0_rtx;
1864 /* Test against the unsigned upper bound. */
1865 wmax = wi::max_value (width, UNSIGNED);
1866 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1867 if (real_less (&t, x))
1868 return immed_wide_int_const (wmax, mode);
1870 return immed_wide_int_const (real_to_integer (x, &fail, width),
1871 mode);
1873 default:
1874 gcc_unreachable ();
1878 return NULL_RTX;
1881 /* Subroutine of simplify_binary_operation to simplify a binary operation
1882 CODE that can commute with byte swapping, with result mode MODE and
1883 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1884 Return zero if no simplification or canonicalization is possible. */
1886 static rtx
1887 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1888 rtx op0, rtx op1)
1890 rtx tem;
1892 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1893 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1895 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1896 simplify_gen_unary (BSWAP, mode, op1, mode));
1897 return simplify_gen_unary (BSWAP, mode, tem, mode);
1900 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1901 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1903 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1904 return simplify_gen_unary (BSWAP, mode, tem, mode);
1907 return NULL_RTX;
1910 /* Subroutine of simplify_binary_operation to simplify a commutative,
1911 associative binary operation CODE with result mode MODE, operating
1912 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1913 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1914 canonicalization is possible. */
1916 static rtx
1917 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1918 rtx op0, rtx op1)
1920 rtx tem;
1922 /* Linearize the operator to the left. */
1923 if (GET_CODE (op1) == code)
1925 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1926 if (GET_CODE (op0) == code)
1928 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1929 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1932 /* "a op (b op c)" becomes "(b op c) op a". */
1933 if (! swap_commutative_operands_p (op1, op0))
1934 return simplify_gen_binary (code, mode, op1, op0);
1936 std::swap (op0, op1);
1939 if (GET_CODE (op0) == code)
1941 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1942 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1944 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1945 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1948 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1949 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1950 if (tem != 0)
1951 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1953 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1954 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1955 if (tem != 0)
1956 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1959 return 0;
1963 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1964 and OP1. Return 0 if no simplification is possible.
1966 Don't use this for relational operations such as EQ or LT.
1967 Use simplify_relational_operation instead. */
1969 simplify_binary_operation (enum rtx_code code, machine_mode mode,
1970 rtx op0, rtx op1)
1972 rtx trueop0, trueop1;
1973 rtx tem;
1975 /* Relational operations don't work here. We must know the mode
1976 of the operands in order to do the comparison correctly.
1977 Assuming a full word can give incorrect results.
1978 Consider comparing 128 with -128 in QImode. */
1979 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1980 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1982 /* Make sure the constant is second. */
1983 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1984 && swap_commutative_operands_p (op0, op1))
1985 std::swap (op0, op1);
1987 trueop0 = avoid_constant_pool_reference (op0);
1988 trueop1 = avoid_constant_pool_reference (op1);
1990 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1991 if (tem)
1992 return tem;
1993 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1995 if (tem)
1996 return tem;
1998 /* If the above steps did not result in a simplification and op0 or op1
1999 were constant pool references, use the referenced constants directly. */
2000 if (trueop0 != op0 || trueop1 != op1)
2001 return simplify_gen_binary (code, mode, trueop0, trueop1);
2003 return NULL_RTX;
2006 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2007 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2008 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2009 actual constants. */
2011 static rtx
2012 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2013 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2015 rtx tem, reversed, opleft, opright;
2016 HOST_WIDE_INT val;
2017 unsigned int width = GET_MODE_PRECISION (mode);
2019 /* Even if we can't compute a constant result,
2020 there are some cases worth simplifying. */
2022 switch (code)
2024 case PLUS:
2025 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2026 when x is NaN, infinite, or finite and nonzero. They aren't
2027 when x is -0 and the rounding mode is not towards -infinity,
2028 since (-0) + 0 is then 0. */
2029 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2030 return op0;
2032 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2033 transformations are safe even for IEEE. */
2034 if (GET_CODE (op0) == NEG)
2035 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2036 else if (GET_CODE (op1) == NEG)
2037 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2039 /* (~a) + 1 -> -a */
2040 if (INTEGRAL_MODE_P (mode)
2041 && GET_CODE (op0) == NOT
2042 && trueop1 == const1_rtx)
2043 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2045 /* Handle both-operands-constant cases. We can only add
2046 CONST_INTs to constants since the sum of relocatable symbols
2047 can't be handled by most assemblers. Don't add CONST_INT
2048 to CONST_INT since overflow won't be computed properly if wider
2049 than HOST_BITS_PER_WIDE_INT. */
2051 if ((GET_CODE (op0) == CONST
2052 || GET_CODE (op0) == SYMBOL_REF
2053 || GET_CODE (op0) == LABEL_REF)
2054 && CONST_INT_P (op1))
2055 return plus_constant (mode, op0, INTVAL (op1));
2056 else if ((GET_CODE (op1) == CONST
2057 || GET_CODE (op1) == SYMBOL_REF
2058 || GET_CODE (op1) == LABEL_REF)
2059 && CONST_INT_P (op0))
2060 return plus_constant (mode, op1, INTVAL (op0));
2062 /* See if this is something like X * C - X or vice versa or
2063 if the multiplication is written as a shift. If so, we can
2064 distribute and make a new multiply, shift, or maybe just
2065 have X (if C is 2 in the example above). But don't make
2066 something more expensive than we had before. */
2068 if (SCALAR_INT_MODE_P (mode))
2070 rtx lhs = op0, rhs = op1;
2072 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2073 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2075 if (GET_CODE (lhs) == NEG)
2077 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2078 lhs = XEXP (lhs, 0);
2080 else if (GET_CODE (lhs) == MULT
2081 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2083 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2084 lhs = XEXP (lhs, 0);
2086 else if (GET_CODE (lhs) == ASHIFT
2087 && CONST_INT_P (XEXP (lhs, 1))
2088 && INTVAL (XEXP (lhs, 1)) >= 0
2089 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2091 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2092 GET_MODE_PRECISION (mode));
2093 lhs = XEXP (lhs, 0);
2096 if (GET_CODE (rhs) == NEG)
2098 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2099 rhs = XEXP (rhs, 0);
2101 else if (GET_CODE (rhs) == MULT
2102 && CONST_INT_P (XEXP (rhs, 1)))
2104 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2105 rhs = XEXP (rhs, 0);
2107 else if (GET_CODE (rhs) == ASHIFT
2108 && CONST_INT_P (XEXP (rhs, 1))
2109 && INTVAL (XEXP (rhs, 1)) >= 0
2110 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2112 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2113 GET_MODE_PRECISION (mode));
2114 rhs = XEXP (rhs, 0);
2117 if (rtx_equal_p (lhs, rhs))
2119 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2120 rtx coeff;
2121 bool speed = optimize_function_for_speed_p (cfun);
2123 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2125 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2126 return (set_src_cost (tem, mode, speed)
2127 <= set_src_cost (orig, mode, speed) ? tem : 0);
2131 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2132 if (CONST_SCALAR_INT_P (op1)
2133 && GET_CODE (op0) == XOR
2134 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2135 && mode_signbit_p (mode, op1))
2136 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2137 simplify_gen_binary (XOR, mode, op1,
2138 XEXP (op0, 1)));
2140 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2141 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2142 && GET_CODE (op0) == MULT
2143 && GET_CODE (XEXP (op0, 0)) == NEG)
2145 rtx in1, in2;
2147 in1 = XEXP (XEXP (op0, 0), 0);
2148 in2 = XEXP (op0, 1);
2149 return simplify_gen_binary (MINUS, mode, op1,
2150 simplify_gen_binary (MULT, mode,
2151 in1, in2));
2154 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2155 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2156 is 1. */
2157 if (COMPARISON_P (op0)
2158 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2159 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2160 && (reversed = reversed_comparison (op0, mode)))
2161 return
2162 simplify_gen_unary (NEG, mode, reversed, mode);
2164 /* If one of the operands is a PLUS or a MINUS, see if we can
2165 simplify this by the associative law.
2166 Don't use the associative law for floating point.
2167 The inaccuracy makes it nonassociative,
2168 and subtle programs can break if operations are associated. */
2170 if (INTEGRAL_MODE_P (mode)
2171 && (plus_minus_operand_p (op0)
2172 || plus_minus_operand_p (op1))
2173 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2174 return tem;
2176 /* Reassociate floating point addition only when the user
2177 specifies associative math operations. */
2178 if (FLOAT_MODE_P (mode)
2179 && flag_associative_math)
2181 tem = simplify_associative_operation (code, mode, op0, op1);
2182 if (tem)
2183 return tem;
2185 break;
2187 case COMPARE:
2188 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2189 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2190 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2191 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2193 rtx xop00 = XEXP (op0, 0);
2194 rtx xop10 = XEXP (op1, 0);
2196 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2197 return xop00;
2199 if (REG_P (xop00) && REG_P (xop10)
2200 && GET_MODE (xop00) == GET_MODE (xop10)
2201 && REGNO (xop00) == REGNO (xop10)
2202 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2203 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2204 return xop00;
2206 break;
2208 case MINUS:
2209 /* We can't assume x-x is 0 even with non-IEEE floating point,
2210 but since it is zero except in very strange circumstances, we
2211 will treat it as zero with -ffinite-math-only. */
2212 if (rtx_equal_p (trueop0, trueop1)
2213 && ! side_effects_p (op0)
2214 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2215 return CONST0_RTX (mode);
2217 /* Change subtraction from zero into negation. (0 - x) is the
2218 same as -x when x is NaN, infinite, or finite and nonzero.
2219 But if the mode has signed zeros, and does not round towards
2220 -infinity, then 0 - 0 is 0, not -0. */
2221 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2222 return simplify_gen_unary (NEG, mode, op1, mode);
2224 /* (-1 - a) is ~a. */
2225 if (trueop0 == constm1_rtx)
2226 return simplify_gen_unary (NOT, mode, op1, mode);
2228 /* Subtracting 0 has no effect unless the mode has signed zeros
2229 and supports rounding towards -infinity. In such a case,
2230 0 - 0 is -0. */
2231 if (!(HONOR_SIGNED_ZEROS (mode)
2232 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2233 && trueop1 == CONST0_RTX (mode))
2234 return op0;
2236 /* See if this is something like X * C - X or vice versa or
2237 if the multiplication is written as a shift. If so, we can
2238 distribute and make a new multiply, shift, or maybe just
2239 have X (if C is 2 in the example above). But don't make
2240 something more expensive than we had before. */
2242 if (SCALAR_INT_MODE_P (mode))
2244 rtx lhs = op0, rhs = op1;
2246 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2247 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2249 if (GET_CODE (lhs) == NEG)
2251 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2252 lhs = XEXP (lhs, 0);
2254 else if (GET_CODE (lhs) == MULT
2255 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2257 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2258 lhs = XEXP (lhs, 0);
2260 else if (GET_CODE (lhs) == ASHIFT
2261 && CONST_INT_P (XEXP (lhs, 1))
2262 && INTVAL (XEXP (lhs, 1)) >= 0
2263 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2265 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2266 GET_MODE_PRECISION (mode));
2267 lhs = XEXP (lhs, 0);
2270 if (GET_CODE (rhs) == NEG)
2272 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2273 rhs = XEXP (rhs, 0);
2275 else if (GET_CODE (rhs) == MULT
2276 && CONST_INT_P (XEXP (rhs, 1)))
2278 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2279 rhs = XEXP (rhs, 0);
2281 else if (GET_CODE (rhs) == ASHIFT
2282 && CONST_INT_P (XEXP (rhs, 1))
2283 && INTVAL (XEXP (rhs, 1)) >= 0
2284 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2286 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2287 GET_MODE_PRECISION (mode));
2288 negcoeff1 = -negcoeff1;
2289 rhs = XEXP (rhs, 0);
2292 if (rtx_equal_p (lhs, rhs))
2294 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2295 rtx coeff;
2296 bool speed = optimize_function_for_speed_p (cfun);
2298 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2300 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2301 return (set_src_cost (tem, mode, speed)
2302 <= set_src_cost (orig, mode, speed) ? tem : 0);
2306 /* (a - (-b)) -> (a + b). True even for IEEE. */
2307 if (GET_CODE (op1) == NEG)
2308 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2310 /* (-x - c) may be simplified as (-c - x). */
2311 if (GET_CODE (op0) == NEG
2312 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2314 tem = simplify_unary_operation (NEG, mode, op1, mode);
2315 if (tem)
2316 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2319 /* Don't let a relocatable value get a negative coeff. */
2320 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2321 return simplify_gen_binary (PLUS, mode,
2322 op0,
2323 neg_const_int (mode, op1));
2325 /* (x - (x & y)) -> (x & ~y) */
2326 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2328 if (rtx_equal_p (op0, XEXP (op1, 0)))
2330 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2331 GET_MODE (XEXP (op1, 1)));
2332 return simplify_gen_binary (AND, mode, op0, tem);
2334 if (rtx_equal_p (op0, XEXP (op1, 1)))
2336 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2337 GET_MODE (XEXP (op1, 0)));
2338 return simplify_gen_binary (AND, mode, op0, tem);
2342 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2343 by reversing the comparison code if valid. */
2344 if (STORE_FLAG_VALUE == 1
2345 && trueop0 == const1_rtx
2346 && COMPARISON_P (op1)
2347 && (reversed = reversed_comparison (op1, mode)))
2348 return reversed;
2350 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2351 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2352 && GET_CODE (op1) == MULT
2353 && GET_CODE (XEXP (op1, 0)) == NEG)
2355 rtx in1, in2;
2357 in1 = XEXP (XEXP (op1, 0), 0);
2358 in2 = XEXP (op1, 1);
2359 return simplify_gen_binary (PLUS, mode,
2360 simplify_gen_binary (MULT, mode,
2361 in1, in2),
2362 op0);
2365 /* Canonicalize (minus (neg A) (mult B C)) to
2366 (minus (mult (neg B) C) A). */
2367 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2368 && GET_CODE (op1) == MULT
2369 && GET_CODE (op0) == NEG)
2371 rtx in1, in2;
2373 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2374 in2 = XEXP (op1, 1);
2375 return simplify_gen_binary (MINUS, mode,
2376 simplify_gen_binary (MULT, mode,
2377 in1, in2),
2378 XEXP (op0, 0));
2381 /* If one of the operands is a PLUS or a MINUS, see if we can
2382 simplify this by the associative law. This will, for example,
2383 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2384 Don't use the associative law for floating point.
2385 The inaccuracy makes it nonassociative,
2386 and subtle programs can break if operations are associated. */
2388 if (INTEGRAL_MODE_P (mode)
2389 && (plus_minus_operand_p (op0)
2390 || plus_minus_operand_p (op1))
2391 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2392 return tem;
2393 break;
2395 case MULT:
2396 if (trueop1 == constm1_rtx)
2397 return simplify_gen_unary (NEG, mode, op0, mode);
2399 if (GET_CODE (op0) == NEG)
2401 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2402 /* If op1 is a MULT as well and simplify_unary_operation
2403 just moved the NEG to the second operand, simplify_gen_binary
2404 below could through simplify_associative_operation move
2405 the NEG around again and recurse endlessly. */
2406 if (temp
2407 && GET_CODE (op1) == MULT
2408 && GET_CODE (temp) == MULT
2409 && XEXP (op1, 0) == XEXP (temp, 0)
2410 && GET_CODE (XEXP (temp, 1)) == NEG
2411 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2412 temp = NULL_RTX;
2413 if (temp)
2414 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2416 if (GET_CODE (op1) == NEG)
2418 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2419 /* If op0 is a MULT as well and simplify_unary_operation
2420 just moved the NEG to the second operand, simplify_gen_binary
2421 below could through simplify_associative_operation move
2422 the NEG around again and recurse endlessly. */
2423 if (temp
2424 && GET_CODE (op0) == MULT
2425 && GET_CODE (temp) == MULT
2426 && XEXP (op0, 0) == XEXP (temp, 0)
2427 && GET_CODE (XEXP (temp, 1)) == NEG
2428 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2429 temp = NULL_RTX;
2430 if (temp)
2431 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2434 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2435 x is NaN, since x * 0 is then also NaN. Nor is it valid
2436 when the mode has signed zeros, since multiplying a negative
2437 number by 0 will give -0, not 0. */
2438 if (!HONOR_NANS (mode)
2439 && !HONOR_SIGNED_ZEROS (mode)
2440 && trueop1 == CONST0_RTX (mode)
2441 && ! side_effects_p (op0))
2442 return op1;
2444 /* In IEEE floating point, x*1 is not equivalent to x for
2445 signalling NaNs. */
2446 if (!HONOR_SNANS (mode)
2447 && trueop1 == CONST1_RTX (mode))
2448 return op0;
2450 /* Convert multiply by constant power of two into shift. */
2451 if (CONST_SCALAR_INT_P (trueop1))
2453 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2454 if (val >= 0)
2455 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2458 /* x*2 is x+x and x*(-1) is -x */
2459 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2460 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2461 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2462 && GET_MODE (op0) == mode)
2464 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2466 if (real_equal (d1, &dconst2))
2467 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2469 if (!HONOR_SNANS (mode)
2470 && real_equal (d1, &dconstm1))
2471 return simplify_gen_unary (NEG, mode, op0, mode);
2474 /* Optimize -x * -x as x * x. */
2475 if (FLOAT_MODE_P (mode)
2476 && GET_CODE (op0) == NEG
2477 && GET_CODE (op1) == NEG
2478 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2479 && !side_effects_p (XEXP (op0, 0)))
2480 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2482 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2483 if (SCALAR_FLOAT_MODE_P (mode)
2484 && GET_CODE (op0) == ABS
2485 && GET_CODE (op1) == ABS
2486 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2487 && !side_effects_p (XEXP (op0, 0)))
2488 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2490 /* Reassociate multiplication, but for floating point MULTs
2491 only when the user specifies unsafe math optimizations. */
2492 if (! FLOAT_MODE_P (mode)
2493 || flag_unsafe_math_optimizations)
2495 tem = simplify_associative_operation (code, mode, op0, op1);
2496 if (tem)
2497 return tem;
2499 break;
2501 case IOR:
2502 if (trueop1 == CONST0_RTX (mode))
2503 return op0;
2504 if (INTEGRAL_MODE_P (mode)
2505 && trueop1 == CONSTM1_RTX (mode)
2506 && !side_effects_p (op0))
2507 return op1;
2508 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2509 return op0;
2510 /* A | (~A) -> -1 */
2511 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2512 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2513 && ! side_effects_p (op0)
2514 && SCALAR_INT_MODE_P (mode))
2515 return constm1_rtx;
2517 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2518 if (CONST_INT_P (op1)
2519 && HWI_COMPUTABLE_MODE_P (mode)
2520 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2521 && !side_effects_p (op0))
2522 return op1;
2524 /* Canonicalize (X & C1) | C2. */
2525 if (GET_CODE (op0) == AND
2526 && CONST_INT_P (trueop1)
2527 && CONST_INT_P (XEXP (op0, 1)))
2529 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2530 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2531 HOST_WIDE_INT c2 = INTVAL (trueop1);
2533 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2534 if ((c1 & c2) == c1
2535 && !side_effects_p (XEXP (op0, 0)))
2536 return trueop1;
2538 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2539 if (((c1|c2) & mask) == mask)
2540 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2542 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2543 if (((c1 & ~c2) & mask) != (c1 & mask))
2545 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2546 gen_int_mode (c1 & ~c2, mode));
2547 return simplify_gen_binary (IOR, mode, tem, op1);
2551 /* Convert (A & B) | A to A. */
2552 if (GET_CODE (op0) == AND
2553 && (rtx_equal_p (XEXP (op0, 0), op1)
2554 || rtx_equal_p (XEXP (op0, 1), op1))
2555 && ! side_effects_p (XEXP (op0, 0))
2556 && ! side_effects_p (XEXP (op0, 1)))
2557 return op1;
2559 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2560 mode size to (rotate A CX). */
2562 if (GET_CODE (op1) == ASHIFT
2563 || GET_CODE (op1) == SUBREG)
2565 opleft = op1;
2566 opright = op0;
2568 else
2570 opright = op1;
2571 opleft = op0;
2574 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2575 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2576 && CONST_INT_P (XEXP (opleft, 1))
2577 && CONST_INT_P (XEXP (opright, 1))
2578 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2579 == GET_MODE_PRECISION (mode)))
2580 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2582 /* Same, but for ashift that has been "simplified" to a wider mode
2583 by simplify_shift_const. */
2585 if (GET_CODE (opleft) == SUBREG
2586 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2587 && GET_CODE (opright) == LSHIFTRT
2588 && GET_CODE (XEXP (opright, 0)) == SUBREG
2589 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2590 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2591 && (GET_MODE_SIZE (GET_MODE (opleft))
2592 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2593 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2594 SUBREG_REG (XEXP (opright, 0)))
2595 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2596 && CONST_INT_P (XEXP (opright, 1))
2597 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2598 == GET_MODE_PRECISION (mode)))
2599 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2600 XEXP (SUBREG_REG (opleft), 1));
2602 /* If we have (ior (and (X C1) C2)), simplify this by making
2603 C1 as small as possible if C1 actually changes. */
2604 if (CONST_INT_P (op1)
2605 && (HWI_COMPUTABLE_MODE_P (mode)
2606 || INTVAL (op1) > 0)
2607 && GET_CODE (op0) == AND
2608 && CONST_INT_P (XEXP (op0, 1))
2609 && CONST_INT_P (op1)
2610 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2612 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2613 gen_int_mode (UINTVAL (XEXP (op0, 1))
2614 & ~UINTVAL (op1),
2615 mode));
2616 return simplify_gen_binary (IOR, mode, tmp, op1);
2619 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2620 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2621 the PLUS does not affect any of the bits in OP1: then we can do
2622 the IOR as a PLUS and we can associate. This is valid if OP1
2623 can be safely shifted left C bits. */
2624 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2625 && GET_CODE (XEXP (op0, 0)) == PLUS
2626 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2627 && CONST_INT_P (XEXP (op0, 1))
2628 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2630 int count = INTVAL (XEXP (op0, 1));
2631 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2633 if (mask >> count == INTVAL (trueop1)
2634 && trunc_int_for_mode (mask, mode) == mask
2635 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2636 return simplify_gen_binary (ASHIFTRT, mode,
2637 plus_constant (mode, XEXP (op0, 0),
2638 mask),
2639 XEXP (op0, 1));
2642 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2643 if (tem)
2644 return tem;
2646 tem = simplify_associative_operation (code, mode, op0, op1);
2647 if (tem)
2648 return tem;
2649 break;
2651 case XOR:
2652 if (trueop1 == CONST0_RTX (mode))
2653 return op0;
2654 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2655 return simplify_gen_unary (NOT, mode, op0, mode);
2656 if (rtx_equal_p (trueop0, trueop1)
2657 && ! side_effects_p (op0)
2658 && GET_MODE_CLASS (mode) != MODE_CC)
2659 return CONST0_RTX (mode);
2661 /* Canonicalize XOR of the most significant bit to PLUS. */
2662 if (CONST_SCALAR_INT_P (op1)
2663 && mode_signbit_p (mode, op1))
2664 return simplify_gen_binary (PLUS, mode, op0, op1);
2665 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2666 if (CONST_SCALAR_INT_P (op1)
2667 && GET_CODE (op0) == PLUS
2668 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2669 && mode_signbit_p (mode, XEXP (op0, 1)))
2670 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2671 simplify_gen_binary (XOR, mode, op1,
2672 XEXP (op0, 1)));
2674 /* If we are XORing two things that have no bits in common,
2675 convert them into an IOR. This helps to detect rotation encoded
2676 using those methods and possibly other simplifications. */
2678 if (HWI_COMPUTABLE_MODE_P (mode)
2679 && (nonzero_bits (op0, mode)
2680 & nonzero_bits (op1, mode)) == 0)
2681 return (simplify_gen_binary (IOR, mode, op0, op1));
2683 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2684 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2685 (NOT y). */
2687 int num_negated = 0;
2689 if (GET_CODE (op0) == NOT)
2690 num_negated++, op0 = XEXP (op0, 0);
2691 if (GET_CODE (op1) == NOT)
2692 num_negated++, op1 = XEXP (op1, 0);
2694 if (num_negated == 2)
2695 return simplify_gen_binary (XOR, mode, op0, op1);
2696 else if (num_negated == 1)
2697 return simplify_gen_unary (NOT, mode,
2698 simplify_gen_binary (XOR, mode, op0, op1),
2699 mode);
2702 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2703 correspond to a machine insn or result in further simplifications
2704 if B is a constant. */
2706 if (GET_CODE (op0) == AND
2707 && rtx_equal_p (XEXP (op0, 1), op1)
2708 && ! side_effects_p (op1))
2709 return simplify_gen_binary (AND, mode,
2710 simplify_gen_unary (NOT, mode,
2711 XEXP (op0, 0), mode),
2712 op1);
2714 else if (GET_CODE (op0) == AND
2715 && rtx_equal_p (XEXP (op0, 0), op1)
2716 && ! side_effects_p (op1))
2717 return simplify_gen_binary (AND, mode,
2718 simplify_gen_unary (NOT, mode,
2719 XEXP (op0, 1), mode),
2720 op1);
2722 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2723 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2724 out bits inverted twice and not set by C. Similarly, given
2725 (xor (and (xor A B) C) D), simplify without inverting C in
2726 the xor operand: (xor (and A C) (B&C)^D).
2728 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2729 && GET_CODE (XEXP (op0, 0)) == XOR
2730 && CONST_INT_P (op1)
2731 && CONST_INT_P (XEXP (op0, 1))
2732 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2734 enum rtx_code op = GET_CODE (op0);
2735 rtx a = XEXP (XEXP (op0, 0), 0);
2736 rtx b = XEXP (XEXP (op0, 0), 1);
2737 rtx c = XEXP (op0, 1);
2738 rtx d = op1;
2739 HOST_WIDE_INT bval = INTVAL (b);
2740 HOST_WIDE_INT cval = INTVAL (c);
2741 HOST_WIDE_INT dval = INTVAL (d);
2742 HOST_WIDE_INT xcval;
2744 if (op == IOR)
2745 xcval = ~cval;
2746 else
2747 xcval = cval;
2749 return simplify_gen_binary (XOR, mode,
2750 simplify_gen_binary (op, mode, a, c),
2751 gen_int_mode ((bval & xcval) ^ dval,
2752 mode));
2755 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2756 we can transform like this:
2757 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2758 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2759 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2760 Attempt a few simplifications when B and C are both constants. */
2761 if (GET_CODE (op0) == AND
2762 && CONST_INT_P (op1)
2763 && CONST_INT_P (XEXP (op0, 1)))
2765 rtx a = XEXP (op0, 0);
2766 rtx b = XEXP (op0, 1);
2767 rtx c = op1;
2768 HOST_WIDE_INT bval = INTVAL (b);
2769 HOST_WIDE_INT cval = INTVAL (c);
2771 /* Instead of computing ~A&C, we compute its negated value,
2772 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2773 optimize for sure. If it does not simplify, we still try
2774 to compute ~A&C below, but since that always allocates
2775 RTL, we don't try that before committing to returning a
2776 simplified expression. */
2777 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2778 GEN_INT (~cval));
2780 if ((~cval & bval) == 0)
2782 rtx na_c = NULL_RTX;
2783 if (n_na_c)
2784 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2785 else
2787 /* If ~A does not simplify, don't bother: we don't
2788 want to simplify 2 operations into 3, and if na_c
2789 were to simplify with na, n_na_c would have
2790 simplified as well. */
2791 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2792 if (na)
2793 na_c = simplify_gen_binary (AND, mode, na, c);
2796 /* Try to simplify ~A&C | ~B&C. */
2797 if (na_c != NULL_RTX)
2798 return simplify_gen_binary (IOR, mode, na_c,
2799 gen_int_mode (~bval & cval, mode));
2801 else
2803 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2804 if (n_na_c == CONSTM1_RTX (mode))
2806 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2807 gen_int_mode (~cval & bval,
2808 mode));
2809 return simplify_gen_binary (IOR, mode, a_nc_b,
2810 gen_int_mode (~bval & cval,
2811 mode));
2816 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2817 comparison if STORE_FLAG_VALUE is 1. */
2818 if (STORE_FLAG_VALUE == 1
2819 && trueop1 == const1_rtx
2820 && COMPARISON_P (op0)
2821 && (reversed = reversed_comparison (op0, mode)))
2822 return reversed;
2824 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2825 is (lt foo (const_int 0)), so we can perform the above
2826 simplification if STORE_FLAG_VALUE is 1. */
2828 if (STORE_FLAG_VALUE == 1
2829 && trueop1 == const1_rtx
2830 && GET_CODE (op0) == LSHIFTRT
2831 && CONST_INT_P (XEXP (op0, 1))
2832 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2833 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2835 /* (xor (comparison foo bar) (const_int sign-bit))
2836 when STORE_FLAG_VALUE is the sign bit. */
2837 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2838 && trueop1 == const_true_rtx
2839 && COMPARISON_P (op0)
2840 && (reversed = reversed_comparison (op0, mode)))
2841 return reversed;
2843 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2844 if (tem)
2845 return tem;
2847 tem = simplify_associative_operation (code, mode, op0, op1);
2848 if (tem)
2849 return tem;
2850 break;
2852 case AND:
2853 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2854 return trueop1;
2855 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2856 return op0;
2857 if (HWI_COMPUTABLE_MODE_P (mode))
2859 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2860 HOST_WIDE_INT nzop1;
2861 if (CONST_INT_P (trueop1))
2863 HOST_WIDE_INT val1 = INTVAL (trueop1);
2864 /* If we are turning off bits already known off in OP0, we need
2865 not do an AND. */
2866 if ((nzop0 & ~val1) == 0)
2867 return op0;
2869 nzop1 = nonzero_bits (trueop1, mode);
2870 /* If we are clearing all the nonzero bits, the result is zero. */
2871 if ((nzop1 & nzop0) == 0
2872 && !side_effects_p (op0) && !side_effects_p (op1))
2873 return CONST0_RTX (mode);
2875 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2876 && GET_MODE_CLASS (mode) != MODE_CC)
2877 return op0;
2878 /* A & (~A) -> 0 */
2879 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2880 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2881 && ! side_effects_p (op0)
2882 && GET_MODE_CLASS (mode) != MODE_CC)
2883 return CONST0_RTX (mode);
2885 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2886 there are no nonzero bits of C outside of X's mode. */
2887 if ((GET_CODE (op0) == SIGN_EXTEND
2888 || GET_CODE (op0) == ZERO_EXTEND)
2889 && CONST_INT_P (trueop1)
2890 && HWI_COMPUTABLE_MODE_P (mode)
2891 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2892 & UINTVAL (trueop1)) == 0)
2894 machine_mode imode = GET_MODE (XEXP (op0, 0));
2895 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2896 gen_int_mode (INTVAL (trueop1),
2897 imode));
2898 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2901 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2902 we might be able to further simplify the AND with X and potentially
2903 remove the truncation altogether. */
2904 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2906 rtx x = XEXP (op0, 0);
2907 machine_mode xmode = GET_MODE (x);
2908 tem = simplify_gen_binary (AND, xmode, x,
2909 gen_int_mode (INTVAL (trueop1), xmode));
2910 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2913 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2914 if (GET_CODE (op0) == IOR
2915 && CONST_INT_P (trueop1)
2916 && CONST_INT_P (XEXP (op0, 1)))
2918 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2919 return simplify_gen_binary (IOR, mode,
2920 simplify_gen_binary (AND, mode,
2921 XEXP (op0, 0), op1),
2922 gen_int_mode (tmp, mode));
2925 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2926 insn (and may simplify more). */
2927 if (GET_CODE (op0) == XOR
2928 && rtx_equal_p (XEXP (op0, 0), op1)
2929 && ! side_effects_p (op1))
2930 return simplify_gen_binary (AND, mode,
2931 simplify_gen_unary (NOT, mode,
2932 XEXP (op0, 1), mode),
2933 op1);
2935 if (GET_CODE (op0) == XOR
2936 && rtx_equal_p (XEXP (op0, 1), op1)
2937 && ! side_effects_p (op1))
2938 return simplify_gen_binary (AND, mode,
2939 simplify_gen_unary (NOT, mode,
2940 XEXP (op0, 0), mode),
2941 op1);
2943 /* Similarly for (~(A ^ B)) & A. */
2944 if (GET_CODE (op0) == NOT
2945 && GET_CODE (XEXP (op0, 0)) == XOR
2946 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2947 && ! side_effects_p (op1))
2948 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2950 if (GET_CODE (op0) == NOT
2951 && GET_CODE (XEXP (op0, 0)) == XOR
2952 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2953 && ! side_effects_p (op1))
2954 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2956 /* Convert (A | B) & A to A. */
2957 if (GET_CODE (op0) == IOR
2958 && (rtx_equal_p (XEXP (op0, 0), op1)
2959 || rtx_equal_p (XEXP (op0, 1), op1))
2960 && ! side_effects_p (XEXP (op0, 0))
2961 && ! side_effects_p (XEXP (op0, 1)))
2962 return op1;
2964 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2965 ((A & N) + B) & M -> (A + B) & M
2966 Similarly if (N & M) == 0,
2967 ((A | N) + B) & M -> (A + B) & M
2968 and for - instead of + and/or ^ instead of |.
2969 Also, if (N & M) == 0, then
2970 (A +- N) & M -> A & M. */
2971 if (CONST_INT_P (trueop1)
2972 && HWI_COMPUTABLE_MODE_P (mode)
2973 && ~UINTVAL (trueop1)
2974 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2975 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2977 rtx pmop[2];
2978 int which;
2980 pmop[0] = XEXP (op0, 0);
2981 pmop[1] = XEXP (op0, 1);
2983 if (CONST_INT_P (pmop[1])
2984 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2985 return simplify_gen_binary (AND, mode, pmop[0], op1);
2987 for (which = 0; which < 2; which++)
2989 tem = pmop[which];
2990 switch (GET_CODE (tem))
2992 case AND:
2993 if (CONST_INT_P (XEXP (tem, 1))
2994 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2995 == UINTVAL (trueop1))
2996 pmop[which] = XEXP (tem, 0);
2997 break;
2998 case IOR:
2999 case XOR:
3000 if (CONST_INT_P (XEXP (tem, 1))
3001 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3002 pmop[which] = XEXP (tem, 0);
3003 break;
3004 default:
3005 break;
3009 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3011 tem = simplify_gen_binary (GET_CODE (op0), mode,
3012 pmop[0], pmop[1]);
3013 return simplify_gen_binary (code, mode, tem, op1);
3017 /* (and X (ior (not X) Y) -> (and X Y) */
3018 if (GET_CODE (op1) == IOR
3019 && GET_CODE (XEXP (op1, 0)) == NOT
3020 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3021 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3023 /* (and (ior (not X) Y) X) -> (and X Y) */
3024 if (GET_CODE (op0) == IOR
3025 && GET_CODE (XEXP (op0, 0)) == NOT
3026 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3027 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3029 /* (and X (ior Y (not X)) -> (and X Y) */
3030 if (GET_CODE (op1) == IOR
3031 && GET_CODE (XEXP (op1, 1)) == NOT
3032 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3033 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3035 /* (and (ior Y (not X)) X) -> (and X Y) */
3036 if (GET_CODE (op0) == IOR
3037 && GET_CODE (XEXP (op0, 1)) == NOT
3038 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3039 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3041 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3042 if (tem)
3043 return tem;
3045 tem = simplify_associative_operation (code, mode, op0, op1);
3046 if (tem)
3047 return tem;
3048 break;
3050 case UDIV:
3051 /* 0/x is 0 (or x&0 if x has side-effects). */
3052 if (trueop0 == CONST0_RTX (mode))
3054 if (side_effects_p (op1))
3055 return simplify_gen_binary (AND, mode, op1, trueop0);
3056 return trueop0;
3058 /* x/1 is x. */
3059 if (trueop1 == CONST1_RTX (mode))
3061 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3062 if (tem)
3063 return tem;
3065 /* Convert divide by power of two into shift. */
3066 if (CONST_INT_P (trueop1)
3067 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3068 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3069 break;
3071 case DIV:
3072 /* Handle floating point and integers separately. */
3073 if (SCALAR_FLOAT_MODE_P (mode))
3075 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3076 safe for modes with NaNs, since 0.0 / 0.0 will then be
3077 NaN rather than 0.0. Nor is it safe for modes with signed
3078 zeros, since dividing 0 by a negative number gives -0.0 */
3079 if (trueop0 == CONST0_RTX (mode)
3080 && !HONOR_NANS (mode)
3081 && !HONOR_SIGNED_ZEROS (mode)
3082 && ! side_effects_p (op1))
3083 return op0;
3084 /* x/1.0 is x. */
3085 if (trueop1 == CONST1_RTX (mode)
3086 && !HONOR_SNANS (mode))
3087 return op0;
3089 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3090 && trueop1 != CONST0_RTX (mode))
3092 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3094 /* x/-1.0 is -x. */
3095 if (real_equal (d1, &dconstm1)
3096 && !HONOR_SNANS (mode))
3097 return simplify_gen_unary (NEG, mode, op0, mode);
3099 /* Change FP division by a constant into multiplication.
3100 Only do this with -freciprocal-math. */
3101 if (flag_reciprocal_math
3102 && !real_equal (d1, &dconst0))
3104 REAL_VALUE_TYPE d;
3105 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3106 tem = const_double_from_real_value (d, mode);
3107 return simplify_gen_binary (MULT, mode, op0, tem);
3111 else if (SCALAR_INT_MODE_P (mode))
3113 /* 0/x is 0 (or x&0 if x has side-effects). */
3114 if (trueop0 == CONST0_RTX (mode)
3115 && !cfun->can_throw_non_call_exceptions)
3117 if (side_effects_p (op1))
3118 return simplify_gen_binary (AND, mode, op1, trueop0);
3119 return trueop0;
3121 /* x/1 is x. */
3122 if (trueop1 == CONST1_RTX (mode))
3124 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3125 if (tem)
3126 return tem;
3128 /* x/-1 is -x. */
3129 if (trueop1 == constm1_rtx)
3131 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3132 if (x)
3133 return simplify_gen_unary (NEG, mode, x, mode);
3136 break;
3138 case UMOD:
3139 /* 0%x is 0 (or x&0 if x has side-effects). */
3140 if (trueop0 == CONST0_RTX (mode))
3142 if (side_effects_p (op1))
3143 return simplify_gen_binary (AND, mode, op1, trueop0);
3144 return trueop0;
3146 /* x%1 is 0 (of x&0 if x has side-effects). */
3147 if (trueop1 == CONST1_RTX (mode))
3149 if (side_effects_p (op0))
3150 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3151 return CONST0_RTX (mode);
3153 /* Implement modulus by power of two as AND. */
3154 if (CONST_INT_P (trueop1)
3155 && exact_log2 (UINTVAL (trueop1)) > 0)
3156 return simplify_gen_binary (AND, mode, op0,
3157 gen_int_mode (INTVAL (op1) - 1, mode));
3158 break;
3160 case MOD:
3161 /* 0%x is 0 (or x&0 if x has side-effects). */
3162 if (trueop0 == CONST0_RTX (mode))
3164 if (side_effects_p (op1))
3165 return simplify_gen_binary (AND, mode, op1, trueop0);
3166 return trueop0;
3168 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3169 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3171 if (side_effects_p (op0))
3172 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3173 return CONST0_RTX (mode);
3175 break;
3177 case ROTATERT:
3178 case ROTATE:
3179 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3180 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3181 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3182 amount instead. */
3183 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3184 if (CONST_INT_P (trueop1)
3185 && IN_RANGE (INTVAL (trueop1),
3186 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3187 GET_MODE_PRECISION (mode) - 1))
3188 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3189 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3190 - INTVAL (trueop1)));
3191 #endif
3192 /* FALLTHRU */
3193 case ASHIFTRT:
3194 if (trueop1 == CONST0_RTX (mode))
3195 return op0;
3196 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3197 return op0;
3198 /* Rotating ~0 always results in ~0. */
3199 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3200 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3201 && ! side_effects_p (op1))
3202 return op0;
3203 /* Given:
3204 scalar modes M1, M2
3205 scalar constants c1, c2
3206 size (M2) > size (M1)
3207 c1 == size (M2) - size (M1)
3208 optimize:
3209 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3210 <low_part>)
3211 (const_int <c2>))
3213 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3214 <low_part>). */
3215 if (code == ASHIFTRT
3216 && !VECTOR_MODE_P (mode)
3217 && SUBREG_P (op0)
3218 && CONST_INT_P (op1)
3219 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3220 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3221 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3222 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3223 > GET_MODE_BITSIZE (mode))
3224 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3225 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3226 - GET_MODE_BITSIZE (mode)))
3227 && subreg_lowpart_p (op0))
3229 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3230 + INTVAL (op1));
3231 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3232 tmp = simplify_gen_binary (ASHIFTRT,
3233 GET_MODE (SUBREG_REG (op0)),
3234 XEXP (SUBREG_REG (op0), 0),
3235 tmp);
3236 return lowpart_subreg (mode, tmp, inner_mode);
3238 canonicalize_shift:
3239 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3241 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3242 if (val != INTVAL (op1))
3243 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3245 break;
3247 case ASHIFT:
3248 case SS_ASHIFT:
3249 case US_ASHIFT:
3250 if (trueop1 == CONST0_RTX (mode))
3251 return op0;
3252 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3253 return op0;
3254 goto canonicalize_shift;
3256 case LSHIFTRT:
3257 if (trueop1 == CONST0_RTX (mode))
3258 return op0;
3259 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3260 return op0;
3261 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3262 if (GET_CODE (op0) == CLZ
3263 && CONST_INT_P (trueop1)
3264 && STORE_FLAG_VALUE == 1
3265 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3267 machine_mode imode = GET_MODE (XEXP (op0, 0));
3268 unsigned HOST_WIDE_INT zero_val = 0;
3270 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3271 && zero_val == GET_MODE_PRECISION (imode)
3272 && INTVAL (trueop1) == exact_log2 (zero_val))
3273 return simplify_gen_relational (EQ, mode, imode,
3274 XEXP (op0, 0), const0_rtx);
3276 goto canonicalize_shift;
3278 case SMIN:
3279 if (width <= HOST_BITS_PER_WIDE_INT
3280 && mode_signbit_p (mode, trueop1)
3281 && ! side_effects_p (op0))
3282 return op1;
3283 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3284 return op0;
3285 tem = simplify_associative_operation (code, mode, op0, op1);
3286 if (tem)
3287 return tem;
3288 break;
3290 case SMAX:
3291 if (width <= HOST_BITS_PER_WIDE_INT
3292 && CONST_INT_P (trueop1)
3293 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3294 && ! side_effects_p (op0))
3295 return op1;
3296 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3297 return op0;
3298 tem = simplify_associative_operation (code, mode, op0, op1);
3299 if (tem)
3300 return tem;
3301 break;
3303 case UMIN:
3304 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3305 return op1;
3306 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3307 return op0;
3308 tem = simplify_associative_operation (code, mode, op0, op1);
3309 if (tem)
3310 return tem;
3311 break;
3313 case UMAX:
3314 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3315 return op1;
3316 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3317 return op0;
3318 tem = simplify_associative_operation (code, mode, op0, op1);
3319 if (tem)
3320 return tem;
3321 break;
3323 case SS_PLUS:
3324 case US_PLUS:
3325 case SS_MINUS:
3326 case US_MINUS:
3327 case SS_MULT:
3328 case US_MULT:
3329 case SS_DIV:
3330 case US_DIV:
3331 /* ??? There are simplifications that can be done. */
3332 return 0;
3334 case VEC_SELECT:
3335 if (!VECTOR_MODE_P (mode))
3337 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3338 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3339 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3340 gcc_assert (XVECLEN (trueop1, 0) == 1);
3341 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3343 if (GET_CODE (trueop0) == CONST_VECTOR)
3344 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3345 (trueop1, 0, 0)));
3347 /* Extract a scalar element from a nested VEC_SELECT expression
3348 (with optional nested VEC_CONCAT expression). Some targets
3349 (i386) extract scalar element from a vector using chain of
3350 nested VEC_SELECT expressions. When input operand is a memory
3351 operand, this operation can be simplified to a simple scalar
3352 load from an offseted memory address. */
3353 if (GET_CODE (trueop0) == VEC_SELECT)
3355 rtx op0 = XEXP (trueop0, 0);
3356 rtx op1 = XEXP (trueop0, 1);
3358 machine_mode opmode = GET_MODE (op0);
3359 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3360 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3362 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3363 int elem;
3365 rtvec vec;
3366 rtx tmp_op, tmp;
3368 gcc_assert (GET_CODE (op1) == PARALLEL);
3369 gcc_assert (i < n_elts);
3371 /* Select element, pointed by nested selector. */
3372 elem = INTVAL (XVECEXP (op1, 0, i));
3374 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3375 if (GET_CODE (op0) == VEC_CONCAT)
3377 rtx op00 = XEXP (op0, 0);
3378 rtx op01 = XEXP (op0, 1);
3380 machine_mode mode00, mode01;
3381 int n_elts00, n_elts01;
3383 mode00 = GET_MODE (op00);
3384 mode01 = GET_MODE (op01);
3386 /* Find out number of elements of each operand. */
3387 if (VECTOR_MODE_P (mode00))
3389 elt_size = GET_MODE_UNIT_SIZE (mode00);
3390 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3392 else
3393 n_elts00 = 1;
3395 if (VECTOR_MODE_P (mode01))
3397 elt_size = GET_MODE_UNIT_SIZE (mode01);
3398 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3400 else
3401 n_elts01 = 1;
3403 gcc_assert (n_elts == n_elts00 + n_elts01);
3405 /* Select correct operand of VEC_CONCAT
3406 and adjust selector. */
3407 if (elem < n_elts01)
3408 tmp_op = op00;
3409 else
3411 tmp_op = op01;
3412 elem -= n_elts00;
3415 else
3416 tmp_op = op0;
3418 vec = rtvec_alloc (1);
3419 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3421 tmp = gen_rtx_fmt_ee (code, mode,
3422 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3423 return tmp;
3425 if (GET_CODE (trueop0) == VEC_DUPLICATE
3426 && GET_MODE (XEXP (trueop0, 0)) == mode)
3427 return XEXP (trueop0, 0);
3429 else
3431 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3432 gcc_assert (GET_MODE_INNER (mode)
3433 == GET_MODE_INNER (GET_MODE (trueop0)));
3434 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3436 if (GET_CODE (trueop0) == CONST_VECTOR)
3438 int elt_size = GET_MODE_UNIT_SIZE (mode);
3439 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3440 rtvec v = rtvec_alloc (n_elts);
3441 unsigned int i;
3443 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3444 for (i = 0; i < n_elts; i++)
3446 rtx x = XVECEXP (trueop1, 0, i);
3448 gcc_assert (CONST_INT_P (x));
3449 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3450 INTVAL (x));
3453 return gen_rtx_CONST_VECTOR (mode, v);
3456 /* Recognize the identity. */
3457 if (GET_MODE (trueop0) == mode)
3459 bool maybe_ident = true;
3460 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3462 rtx j = XVECEXP (trueop1, 0, i);
3463 if (!CONST_INT_P (j) || INTVAL (j) != i)
3465 maybe_ident = false;
3466 break;
3469 if (maybe_ident)
3470 return trueop0;
3473 /* If we build {a,b} then permute it, build the result directly. */
3474 if (XVECLEN (trueop1, 0) == 2
3475 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3476 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3477 && GET_CODE (trueop0) == VEC_CONCAT
3478 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3479 && GET_MODE (XEXP (trueop0, 0)) == mode
3480 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3481 && GET_MODE (XEXP (trueop0, 1)) == mode)
3483 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3484 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3485 rtx subop0, subop1;
3487 gcc_assert (i0 < 4 && i1 < 4);
3488 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3489 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3491 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3494 if (XVECLEN (trueop1, 0) == 2
3495 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3496 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3497 && GET_CODE (trueop0) == VEC_CONCAT
3498 && GET_MODE (trueop0) == mode)
3500 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3501 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3502 rtx subop0, subop1;
3504 gcc_assert (i0 < 2 && i1 < 2);
3505 subop0 = XEXP (trueop0, i0);
3506 subop1 = XEXP (trueop0, i1);
3508 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3511 /* If we select one half of a vec_concat, return that. */
3512 if (GET_CODE (trueop0) == VEC_CONCAT
3513 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3515 rtx subop0 = XEXP (trueop0, 0);
3516 rtx subop1 = XEXP (trueop0, 1);
3517 machine_mode mode0 = GET_MODE (subop0);
3518 machine_mode mode1 = GET_MODE (subop1);
3519 int li = GET_MODE_UNIT_SIZE (mode0);
3520 int l0 = GET_MODE_SIZE (mode0) / li;
3521 int l1 = GET_MODE_SIZE (mode1) / li;
3522 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3523 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3525 bool success = true;
3526 for (int i = 1; i < l0; ++i)
3528 rtx j = XVECEXP (trueop1, 0, i);
3529 if (!CONST_INT_P (j) || INTVAL (j) != i)
3531 success = false;
3532 break;
3535 if (success)
3536 return subop0;
3538 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3540 bool success = true;
3541 for (int i = 1; i < l1; ++i)
3543 rtx j = XVECEXP (trueop1, 0, i);
3544 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3546 success = false;
3547 break;
3550 if (success)
3551 return subop1;
3556 if (XVECLEN (trueop1, 0) == 1
3557 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3558 && GET_CODE (trueop0) == VEC_CONCAT)
3560 rtx vec = trueop0;
3561 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3563 /* Try to find the element in the VEC_CONCAT. */
3564 while (GET_MODE (vec) != mode
3565 && GET_CODE (vec) == VEC_CONCAT)
3567 HOST_WIDE_INT vec_size;
3569 if (CONST_INT_P (XEXP (vec, 0)))
3571 /* vec_concat of two const_ints doesn't make sense with
3572 respect to modes. */
3573 if (CONST_INT_P (XEXP (vec, 1)))
3574 return 0;
3576 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3577 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3579 else
3580 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3582 if (offset < vec_size)
3583 vec = XEXP (vec, 0);
3584 else
3586 offset -= vec_size;
3587 vec = XEXP (vec, 1);
3589 vec = avoid_constant_pool_reference (vec);
3592 if (GET_MODE (vec) == mode)
3593 return vec;
3596 /* If we select elements in a vec_merge that all come from the same
3597 operand, select from that operand directly. */
3598 if (GET_CODE (op0) == VEC_MERGE)
3600 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3601 if (CONST_INT_P (trueop02))
3603 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3604 bool all_operand0 = true;
3605 bool all_operand1 = true;
3606 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3608 rtx j = XVECEXP (trueop1, 0, i);
3609 if (sel & (1 << UINTVAL (j)))
3610 all_operand1 = false;
3611 else
3612 all_operand0 = false;
3614 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3615 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3616 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3617 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3621 /* If we have two nested selects that are inverses of each
3622 other, replace them with the source operand. */
3623 if (GET_CODE (trueop0) == VEC_SELECT
3624 && GET_MODE (XEXP (trueop0, 0)) == mode)
3626 rtx op0_subop1 = XEXP (trueop0, 1);
3627 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3628 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3630 /* Apply the outer ordering vector to the inner one. (The inner
3631 ordering vector is expressly permitted to be of a different
3632 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3633 then the two VEC_SELECTs cancel. */
3634 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3636 rtx x = XVECEXP (trueop1, 0, i);
3637 if (!CONST_INT_P (x))
3638 return 0;
3639 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3640 if (!CONST_INT_P (y) || i != INTVAL (y))
3641 return 0;
3643 return XEXP (trueop0, 0);
3646 return 0;
3647 case VEC_CONCAT:
3649 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3650 ? GET_MODE (trueop0)
3651 : GET_MODE_INNER (mode));
3652 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3653 ? GET_MODE (trueop1)
3654 : GET_MODE_INNER (mode));
3656 gcc_assert (VECTOR_MODE_P (mode));
3657 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3658 == GET_MODE_SIZE (mode));
3660 if (VECTOR_MODE_P (op0_mode))
3661 gcc_assert (GET_MODE_INNER (mode)
3662 == GET_MODE_INNER (op0_mode));
3663 else
3664 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3666 if (VECTOR_MODE_P (op1_mode))
3667 gcc_assert (GET_MODE_INNER (mode)
3668 == GET_MODE_INNER (op1_mode));
3669 else
3670 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3672 if ((GET_CODE (trueop0) == CONST_VECTOR
3673 || CONST_SCALAR_INT_P (trueop0)
3674 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3675 && (GET_CODE (trueop1) == CONST_VECTOR
3676 || CONST_SCALAR_INT_P (trueop1)
3677 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3679 int elt_size = GET_MODE_UNIT_SIZE (mode);
3680 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3681 rtvec v = rtvec_alloc (n_elts);
3682 unsigned int i;
3683 unsigned in_n_elts = 1;
3685 if (VECTOR_MODE_P (op0_mode))
3686 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3687 for (i = 0; i < n_elts; i++)
3689 if (i < in_n_elts)
3691 if (!VECTOR_MODE_P (op0_mode))
3692 RTVEC_ELT (v, i) = trueop0;
3693 else
3694 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3696 else
3698 if (!VECTOR_MODE_P (op1_mode))
3699 RTVEC_ELT (v, i) = trueop1;
3700 else
3701 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3702 i - in_n_elts);
3706 return gen_rtx_CONST_VECTOR (mode, v);
3709 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3710 Restrict the transformation to avoid generating a VEC_SELECT with a
3711 mode unrelated to its operand. */
3712 if (GET_CODE (trueop0) == VEC_SELECT
3713 && GET_CODE (trueop1) == VEC_SELECT
3714 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3715 && GET_MODE (XEXP (trueop0, 0)) == mode)
3717 rtx par0 = XEXP (trueop0, 1);
3718 rtx par1 = XEXP (trueop1, 1);
3719 int len0 = XVECLEN (par0, 0);
3720 int len1 = XVECLEN (par1, 0);
3721 rtvec vec = rtvec_alloc (len0 + len1);
3722 for (int i = 0; i < len0; i++)
3723 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3724 for (int i = 0; i < len1; i++)
3725 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3726 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3727 gen_rtx_PARALLEL (VOIDmode, vec));
3730 return 0;
3732 default:
3733 gcc_unreachable ();
3736 return 0;
3740 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3741 rtx op0, rtx op1)
3743 unsigned int width = GET_MODE_PRECISION (mode);
3745 if (VECTOR_MODE_P (mode)
3746 && code != VEC_CONCAT
3747 && GET_CODE (op0) == CONST_VECTOR
3748 && GET_CODE (op1) == CONST_VECTOR)
3750 unsigned n_elts = GET_MODE_NUNITS (mode);
3751 machine_mode op0mode = GET_MODE (op0);
3752 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3753 machine_mode op1mode = GET_MODE (op1);
3754 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3755 rtvec v = rtvec_alloc (n_elts);
3756 unsigned int i;
3758 gcc_assert (op0_n_elts == n_elts);
3759 gcc_assert (op1_n_elts == n_elts);
3760 for (i = 0; i < n_elts; i++)
3762 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3763 CONST_VECTOR_ELT (op0, i),
3764 CONST_VECTOR_ELT (op1, i));
3765 if (!x)
3766 return 0;
3767 RTVEC_ELT (v, i) = x;
3770 return gen_rtx_CONST_VECTOR (mode, v);
3773 if (VECTOR_MODE_P (mode)
3774 && code == VEC_CONCAT
3775 && (CONST_SCALAR_INT_P (op0)
3776 || GET_CODE (op0) == CONST_FIXED
3777 || CONST_DOUBLE_AS_FLOAT_P (op0))
3778 && (CONST_SCALAR_INT_P (op1)
3779 || CONST_DOUBLE_AS_FLOAT_P (op1)
3780 || GET_CODE (op1) == CONST_FIXED))
3782 unsigned n_elts = GET_MODE_NUNITS (mode);
3783 rtvec v = rtvec_alloc (n_elts);
3785 gcc_assert (n_elts >= 2);
3786 if (n_elts == 2)
3788 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3789 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3791 RTVEC_ELT (v, 0) = op0;
3792 RTVEC_ELT (v, 1) = op1;
3794 else
3796 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3797 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3798 unsigned i;
3800 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3801 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3802 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3804 for (i = 0; i < op0_n_elts; ++i)
3805 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3806 for (i = 0; i < op1_n_elts; ++i)
3807 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3810 return gen_rtx_CONST_VECTOR (mode, v);
3813 if (SCALAR_FLOAT_MODE_P (mode)
3814 && CONST_DOUBLE_AS_FLOAT_P (op0)
3815 && CONST_DOUBLE_AS_FLOAT_P (op1)
3816 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3818 if (code == AND
3819 || code == IOR
3820 || code == XOR)
3822 long tmp0[4];
3823 long tmp1[4];
3824 REAL_VALUE_TYPE r;
3825 int i;
3827 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3828 GET_MODE (op0));
3829 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3830 GET_MODE (op1));
3831 for (i = 0; i < 4; i++)
3833 switch (code)
3835 case AND:
3836 tmp0[i] &= tmp1[i];
3837 break;
3838 case IOR:
3839 tmp0[i] |= tmp1[i];
3840 break;
3841 case XOR:
3842 tmp0[i] ^= tmp1[i];
3843 break;
3844 default:
3845 gcc_unreachable ();
3848 real_from_target (&r, tmp0, mode);
3849 return const_double_from_real_value (r, mode);
3851 else
3853 REAL_VALUE_TYPE f0, f1, value, result;
3854 bool inexact;
3856 real_convert (&f0, mode, CONST_DOUBLE_REAL_VALUE (op0));
3857 real_convert (&f1, mode, CONST_DOUBLE_REAL_VALUE (op1));
3859 if (HONOR_SNANS (mode)
3860 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3861 return 0;
3863 if (code == DIV
3864 && real_equal (&f1, &dconst0)
3865 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3866 return 0;
3868 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3869 && flag_trapping_math
3870 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3872 int s0 = REAL_VALUE_NEGATIVE (f0);
3873 int s1 = REAL_VALUE_NEGATIVE (f1);
3875 switch (code)
3877 case PLUS:
3878 /* Inf + -Inf = NaN plus exception. */
3879 if (s0 != s1)
3880 return 0;
3881 break;
3882 case MINUS:
3883 /* Inf - Inf = NaN plus exception. */
3884 if (s0 == s1)
3885 return 0;
3886 break;
3887 case DIV:
3888 /* Inf / Inf = NaN plus exception. */
3889 return 0;
3890 default:
3891 break;
3895 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3896 && flag_trapping_math
3897 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
3898 || (REAL_VALUE_ISINF (f1)
3899 && real_equal (&f0, &dconst0))))
3900 /* Inf * 0 = NaN plus exception. */
3901 return 0;
3903 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3904 &f0, &f1);
3905 real_convert (&result, mode, &value);
3907 /* Don't constant fold this floating point operation if
3908 the result has overflowed and flag_trapping_math. */
3910 if (flag_trapping_math
3911 && MODE_HAS_INFINITIES (mode)
3912 && REAL_VALUE_ISINF (result)
3913 && !REAL_VALUE_ISINF (f0)
3914 && !REAL_VALUE_ISINF (f1))
3915 /* Overflow plus exception. */
3916 return 0;
3918 /* Don't constant fold this floating point operation if the
3919 result may dependent upon the run-time rounding mode and
3920 flag_rounding_math is set, or if GCC's software emulation
3921 is unable to accurately represent the result. */
3923 if ((flag_rounding_math
3924 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3925 && (inexact || !real_identical (&result, &value)))
3926 return NULL_RTX;
3928 return const_double_from_real_value (result, mode);
3932 /* We can fold some multi-word operations. */
3933 if ((GET_MODE_CLASS (mode) == MODE_INT
3934 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3935 && CONST_SCALAR_INT_P (op0)
3936 && CONST_SCALAR_INT_P (op1))
3938 wide_int result;
3939 bool overflow;
3940 rtx_mode_t pop0 = std::make_pair (op0, mode);
3941 rtx_mode_t pop1 = std::make_pair (op1, mode);
3943 #if TARGET_SUPPORTS_WIDE_INT == 0
3944 /* This assert keeps the simplification from producing a result
3945 that cannot be represented in a CONST_DOUBLE but a lot of
3946 upstream callers expect that this function never fails to
3947 simplify something and so you if you added this to the test
3948 above the code would die later anyway. If this assert
3949 happens, you just need to make the port support wide int. */
3950 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3951 #endif
3952 switch (code)
3954 case MINUS:
3955 result = wi::sub (pop0, pop1);
3956 break;
3958 case PLUS:
3959 result = wi::add (pop0, pop1);
3960 break;
3962 case MULT:
3963 result = wi::mul (pop0, pop1);
3964 break;
3966 case DIV:
3967 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3968 if (overflow)
3969 return NULL_RTX;
3970 break;
3972 case MOD:
3973 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3974 if (overflow)
3975 return NULL_RTX;
3976 break;
3978 case UDIV:
3979 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3980 if (overflow)
3981 return NULL_RTX;
3982 break;
3984 case UMOD:
3985 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3986 if (overflow)
3987 return NULL_RTX;
3988 break;
3990 case AND:
3991 result = wi::bit_and (pop0, pop1);
3992 break;
3994 case IOR:
3995 result = wi::bit_or (pop0, pop1);
3996 break;
3998 case XOR:
3999 result = wi::bit_xor (pop0, pop1);
4000 break;
4002 case SMIN:
4003 result = wi::smin (pop0, pop1);
4004 break;
4006 case SMAX:
4007 result = wi::smax (pop0, pop1);
4008 break;
4010 case UMIN:
4011 result = wi::umin (pop0, pop1);
4012 break;
4014 case UMAX:
4015 result = wi::umax (pop0, pop1);
4016 break;
4018 case LSHIFTRT:
4019 case ASHIFTRT:
4020 case ASHIFT:
4022 wide_int wop1 = pop1;
4023 if (SHIFT_COUNT_TRUNCATED)
4024 wop1 = wi::umod_trunc (wop1, width);
4025 else if (wi::geu_p (wop1, width))
4026 return NULL_RTX;
4028 switch (code)
4030 case LSHIFTRT:
4031 result = wi::lrshift (pop0, wop1);
4032 break;
4034 case ASHIFTRT:
4035 result = wi::arshift (pop0, wop1);
4036 break;
4038 case ASHIFT:
4039 result = wi::lshift (pop0, wop1);
4040 break;
4042 default:
4043 gcc_unreachable ();
4045 break;
4047 case ROTATE:
4048 case ROTATERT:
4050 if (wi::neg_p (pop1))
4051 return NULL_RTX;
4053 switch (code)
4055 case ROTATE:
4056 result = wi::lrotate (pop0, pop1);
4057 break;
4059 case ROTATERT:
4060 result = wi::rrotate (pop0, pop1);
4061 break;
4063 default:
4064 gcc_unreachable ();
4066 break;
4068 default:
4069 return NULL_RTX;
4071 return immed_wide_int_const (result, mode);
4074 return NULL_RTX;
4079 /* Return a positive integer if X should sort after Y. The value
4080 returned is 1 if and only if X and Y are both regs. */
4082 static int
4083 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4085 int result;
4087 result = (commutative_operand_precedence (y)
4088 - commutative_operand_precedence (x));
4089 if (result)
4090 return result + result;
4092 /* Group together equal REGs to do more simplification. */
4093 if (REG_P (x) && REG_P (y))
4094 return REGNO (x) > REGNO (y);
4096 return 0;
4099 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4100 operands may be another PLUS or MINUS.
4102 Rather than test for specific case, we do this by a brute-force method
4103 and do all possible simplifications until no more changes occur. Then
4104 we rebuild the operation.
4106 May return NULL_RTX when no changes were made. */
4108 static rtx
4109 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4110 rtx op1)
4112 struct simplify_plus_minus_op_data
4114 rtx op;
4115 short neg;
4116 } ops[16];
4117 rtx result, tem;
4118 int n_ops = 2;
4119 int changed, n_constants, canonicalized = 0;
4120 int i, j;
4122 memset (ops, 0, sizeof ops);
4124 /* Set up the two operands and then expand them until nothing has been
4125 changed. If we run out of room in our array, give up; this should
4126 almost never happen. */
4128 ops[0].op = op0;
4129 ops[0].neg = 0;
4130 ops[1].op = op1;
4131 ops[1].neg = (code == MINUS);
4135 changed = 0;
4136 n_constants = 0;
4138 for (i = 0; i < n_ops; i++)
4140 rtx this_op = ops[i].op;
4141 int this_neg = ops[i].neg;
4142 enum rtx_code this_code = GET_CODE (this_op);
4144 switch (this_code)
4146 case PLUS:
4147 case MINUS:
4148 if (n_ops == ARRAY_SIZE (ops))
4149 return NULL_RTX;
4151 ops[n_ops].op = XEXP (this_op, 1);
4152 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4153 n_ops++;
4155 ops[i].op = XEXP (this_op, 0);
4156 changed = 1;
4157 /* If this operand was negated then we will potentially
4158 canonicalize the expression. Similarly if we don't
4159 place the operands adjacent we're re-ordering the
4160 expression and thus might be performing a
4161 canonicalization. Ignore register re-ordering.
4162 ??? It might be better to shuffle the ops array here,
4163 but then (plus (plus (A, B), plus (C, D))) wouldn't
4164 be seen as non-canonical. */
4165 if (this_neg
4166 || (i != n_ops - 2
4167 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4168 canonicalized = 1;
4169 break;
4171 case NEG:
4172 ops[i].op = XEXP (this_op, 0);
4173 ops[i].neg = ! this_neg;
4174 changed = 1;
4175 canonicalized = 1;
4176 break;
4178 case CONST:
4179 if (n_ops != ARRAY_SIZE (ops)
4180 && GET_CODE (XEXP (this_op, 0)) == PLUS
4181 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4182 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4184 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4185 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4186 ops[n_ops].neg = this_neg;
4187 n_ops++;
4188 changed = 1;
4189 canonicalized = 1;
4191 break;
4193 case NOT:
4194 /* ~a -> (-a - 1) */
4195 if (n_ops != ARRAY_SIZE (ops))
4197 ops[n_ops].op = CONSTM1_RTX (mode);
4198 ops[n_ops++].neg = this_neg;
4199 ops[i].op = XEXP (this_op, 0);
4200 ops[i].neg = !this_neg;
4201 changed = 1;
4202 canonicalized = 1;
4204 break;
4206 case CONST_INT:
4207 n_constants++;
4208 if (this_neg)
4210 ops[i].op = neg_const_int (mode, this_op);
4211 ops[i].neg = 0;
4212 changed = 1;
4213 canonicalized = 1;
4215 break;
4217 default:
4218 break;
4222 while (changed);
4224 if (n_constants > 1)
4225 canonicalized = 1;
4227 gcc_assert (n_ops >= 2);
4229 /* If we only have two operands, we can avoid the loops. */
4230 if (n_ops == 2)
4232 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4233 rtx lhs, rhs;
4235 /* Get the two operands. Be careful with the order, especially for
4236 the cases where code == MINUS. */
4237 if (ops[0].neg && ops[1].neg)
4239 lhs = gen_rtx_NEG (mode, ops[0].op);
4240 rhs = ops[1].op;
4242 else if (ops[0].neg)
4244 lhs = ops[1].op;
4245 rhs = ops[0].op;
4247 else
4249 lhs = ops[0].op;
4250 rhs = ops[1].op;
4253 return simplify_const_binary_operation (code, mode, lhs, rhs);
4256 /* Now simplify each pair of operands until nothing changes. */
4257 while (1)
4259 /* Insertion sort is good enough for a small array. */
4260 for (i = 1; i < n_ops; i++)
4262 struct simplify_plus_minus_op_data save;
4263 int cmp;
4265 j = i - 1;
4266 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4267 if (cmp <= 0)
4268 continue;
4269 /* Just swapping registers doesn't count as canonicalization. */
4270 if (cmp != 1)
4271 canonicalized = 1;
4273 save = ops[i];
4275 ops[j + 1] = ops[j];
4276 while (j--
4277 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4278 ops[j + 1] = save;
4281 changed = 0;
4282 for (i = n_ops - 1; i > 0; i--)
4283 for (j = i - 1; j >= 0; j--)
4285 rtx lhs = ops[j].op, rhs = ops[i].op;
4286 int lneg = ops[j].neg, rneg = ops[i].neg;
4288 if (lhs != 0 && rhs != 0)
4290 enum rtx_code ncode = PLUS;
4292 if (lneg != rneg)
4294 ncode = MINUS;
4295 if (lneg)
4296 std::swap (lhs, rhs);
4298 else if (swap_commutative_operands_p (lhs, rhs))
4299 std::swap (lhs, rhs);
4301 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4302 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4304 rtx tem_lhs, tem_rhs;
4306 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4307 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4308 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4309 tem_rhs);
4311 if (tem && !CONSTANT_P (tem))
4312 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4314 else
4315 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4317 if (tem)
4319 /* Reject "simplifications" that just wrap the two
4320 arguments in a CONST. Failure to do so can result
4321 in infinite recursion with simplify_binary_operation
4322 when it calls us to simplify CONST operations.
4323 Also, if we find such a simplification, don't try
4324 any more combinations with this rhs: We must have
4325 something like symbol+offset, ie. one of the
4326 trivial CONST expressions we handle later. */
4327 if (GET_CODE (tem) == CONST
4328 && GET_CODE (XEXP (tem, 0)) == ncode
4329 && XEXP (XEXP (tem, 0), 0) == lhs
4330 && XEXP (XEXP (tem, 0), 1) == rhs)
4331 break;
4332 lneg &= rneg;
4333 if (GET_CODE (tem) == NEG)
4334 tem = XEXP (tem, 0), lneg = !lneg;
4335 if (CONST_INT_P (tem) && lneg)
4336 tem = neg_const_int (mode, tem), lneg = 0;
4338 ops[i].op = tem;
4339 ops[i].neg = lneg;
4340 ops[j].op = NULL_RTX;
4341 changed = 1;
4342 canonicalized = 1;
4347 if (!changed)
4348 break;
4350 /* Pack all the operands to the lower-numbered entries. */
4351 for (i = 0, j = 0; j < n_ops; j++)
4352 if (ops[j].op)
4354 ops[i] = ops[j];
4355 i++;
4357 n_ops = i;
4360 /* If nothing changed, fail. */
4361 if (!canonicalized)
4362 return NULL_RTX;
4364 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4365 if (n_ops == 2
4366 && CONST_INT_P (ops[1].op)
4367 && CONSTANT_P (ops[0].op)
4368 && ops[0].neg)
4369 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4371 /* We suppressed creation of trivial CONST expressions in the
4372 combination loop to avoid recursion. Create one manually now.
4373 The combination loop should have ensured that there is exactly
4374 one CONST_INT, and the sort will have ensured that it is last
4375 in the array and that any other constant will be next-to-last. */
4377 if (n_ops > 1
4378 && CONST_INT_P (ops[n_ops - 1].op)
4379 && CONSTANT_P (ops[n_ops - 2].op))
4381 rtx value = ops[n_ops - 1].op;
4382 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4383 value = neg_const_int (mode, value);
4384 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4385 INTVAL (value));
4386 n_ops--;
4389 /* Put a non-negated operand first, if possible. */
4391 for (i = 0; i < n_ops && ops[i].neg; i++)
4392 continue;
4393 if (i == n_ops)
4394 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4395 else if (i != 0)
4397 tem = ops[0].op;
4398 ops[0] = ops[i];
4399 ops[i].op = tem;
4400 ops[i].neg = 1;
4403 /* Now make the result by performing the requested operations. */
4404 result = ops[0].op;
4405 for (i = 1; i < n_ops; i++)
4406 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4407 mode, result, ops[i].op);
4409 return result;
4412 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4413 static bool
4414 plus_minus_operand_p (const_rtx x)
4416 return GET_CODE (x) == PLUS
4417 || GET_CODE (x) == MINUS
4418 || (GET_CODE (x) == CONST
4419 && GET_CODE (XEXP (x, 0)) == PLUS
4420 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4421 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4424 /* Like simplify_binary_operation except used for relational operators.
4425 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4426 not also be VOIDmode.
4428 CMP_MODE specifies in which mode the comparison is done in, so it is
4429 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4430 the operands or, if both are VOIDmode, the operands are compared in
4431 "infinite precision". */
4433 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4434 machine_mode cmp_mode, rtx op0, rtx op1)
4436 rtx tem, trueop0, trueop1;
4438 if (cmp_mode == VOIDmode)
4439 cmp_mode = GET_MODE (op0);
4440 if (cmp_mode == VOIDmode)
4441 cmp_mode = GET_MODE (op1);
4443 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4444 if (tem)
4446 if (SCALAR_FLOAT_MODE_P (mode))
4448 if (tem == const0_rtx)
4449 return CONST0_RTX (mode);
4450 #ifdef FLOAT_STORE_FLAG_VALUE
4452 REAL_VALUE_TYPE val;
4453 val = FLOAT_STORE_FLAG_VALUE (mode);
4454 return const_double_from_real_value (val, mode);
4456 #else
4457 return NULL_RTX;
4458 #endif
4460 if (VECTOR_MODE_P (mode))
4462 if (tem == const0_rtx)
4463 return CONST0_RTX (mode);
4464 #ifdef VECTOR_STORE_FLAG_VALUE
4466 int i, units;
4467 rtvec v;
4469 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4470 if (val == NULL_RTX)
4471 return NULL_RTX;
4472 if (val == const1_rtx)
4473 return CONST1_RTX (mode);
4475 units = GET_MODE_NUNITS (mode);
4476 v = rtvec_alloc (units);
4477 for (i = 0; i < units; i++)
4478 RTVEC_ELT (v, i) = val;
4479 return gen_rtx_raw_CONST_VECTOR (mode, v);
4481 #else
4482 return NULL_RTX;
4483 #endif
4486 return tem;
4489 /* For the following tests, ensure const0_rtx is op1. */
4490 if (swap_commutative_operands_p (op0, op1)
4491 || (op0 == const0_rtx && op1 != const0_rtx))
4492 std::swap (op0, op1), code = swap_condition (code);
4494 /* If op0 is a compare, extract the comparison arguments from it. */
4495 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4496 return simplify_gen_relational (code, mode, VOIDmode,
4497 XEXP (op0, 0), XEXP (op0, 1));
4499 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4500 || CC0_P (op0))
4501 return NULL_RTX;
4503 trueop0 = avoid_constant_pool_reference (op0);
4504 trueop1 = avoid_constant_pool_reference (op1);
4505 return simplify_relational_operation_1 (code, mode, cmp_mode,
4506 trueop0, trueop1);
4509 /* This part of simplify_relational_operation is only used when CMP_MODE
4510 is not in class MODE_CC (i.e. it is a real comparison).
4512 MODE is the mode of the result, while CMP_MODE specifies in which
4513 mode the comparison is done in, so it is the mode of the operands. */
4515 static rtx
4516 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4517 machine_mode cmp_mode, rtx op0, rtx op1)
4519 enum rtx_code op0code = GET_CODE (op0);
4521 if (op1 == const0_rtx && COMPARISON_P (op0))
4523 /* If op0 is a comparison, extract the comparison arguments
4524 from it. */
4525 if (code == NE)
4527 if (GET_MODE (op0) == mode)
4528 return simplify_rtx (op0);
4529 else
4530 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4531 XEXP (op0, 0), XEXP (op0, 1));
4533 else if (code == EQ)
4535 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4536 if (new_code != UNKNOWN)
4537 return simplify_gen_relational (new_code, mode, VOIDmode,
4538 XEXP (op0, 0), XEXP (op0, 1));
4542 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4543 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4544 if ((code == LTU || code == GEU)
4545 && GET_CODE (op0) == PLUS
4546 && CONST_INT_P (XEXP (op0, 1))
4547 && (rtx_equal_p (op1, XEXP (op0, 0))
4548 || rtx_equal_p (op1, XEXP (op0, 1)))
4549 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4550 && XEXP (op0, 1) != const0_rtx)
4552 rtx new_cmp
4553 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4554 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4555 cmp_mode, XEXP (op0, 0), new_cmp);
4558 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4559 if ((code == LTU || code == GEU)
4560 && GET_CODE (op0) == PLUS
4561 && rtx_equal_p (op1, XEXP (op0, 1))
4562 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4563 && !rtx_equal_p (op1, XEXP (op0, 0)))
4564 return simplify_gen_relational (code, mode, cmp_mode, op0,
4565 copy_rtx (XEXP (op0, 0)));
4567 if (op1 == const0_rtx)
4569 /* Canonicalize (GTU x 0) as (NE x 0). */
4570 if (code == GTU)
4571 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4572 /* Canonicalize (LEU x 0) as (EQ x 0). */
4573 if (code == LEU)
4574 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4576 else if (op1 == const1_rtx)
4578 switch (code)
4580 case GE:
4581 /* Canonicalize (GE x 1) as (GT x 0). */
4582 return simplify_gen_relational (GT, mode, cmp_mode,
4583 op0, const0_rtx);
4584 case GEU:
4585 /* Canonicalize (GEU x 1) as (NE x 0). */
4586 return simplify_gen_relational (NE, mode, cmp_mode,
4587 op0, const0_rtx);
4588 case LT:
4589 /* Canonicalize (LT x 1) as (LE x 0). */
4590 return simplify_gen_relational (LE, mode, cmp_mode,
4591 op0, const0_rtx);
4592 case LTU:
4593 /* Canonicalize (LTU x 1) as (EQ x 0). */
4594 return simplify_gen_relational (EQ, mode, cmp_mode,
4595 op0, const0_rtx);
4596 default:
4597 break;
4600 else if (op1 == constm1_rtx)
4602 /* Canonicalize (LE x -1) as (LT x 0). */
4603 if (code == LE)
4604 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4605 /* Canonicalize (GT x -1) as (GE x 0). */
4606 if (code == GT)
4607 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4610 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4611 if ((code == EQ || code == NE)
4612 && (op0code == PLUS || op0code == MINUS)
4613 && CONSTANT_P (op1)
4614 && CONSTANT_P (XEXP (op0, 1))
4615 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4617 rtx x = XEXP (op0, 0);
4618 rtx c = XEXP (op0, 1);
4619 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4620 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4622 /* Detect an infinite recursive condition, where we oscillate at this
4623 simplification case between:
4624 A + B == C <---> C - B == A,
4625 where A, B, and C are all constants with non-simplifiable expressions,
4626 usually SYMBOL_REFs. */
4627 if (GET_CODE (tem) == invcode
4628 && CONSTANT_P (x)
4629 && rtx_equal_p (c, XEXP (tem, 1)))
4630 return NULL_RTX;
4632 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4635 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4636 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4637 if (code == NE
4638 && op1 == const0_rtx
4639 && GET_MODE_CLASS (mode) == MODE_INT
4640 && cmp_mode != VOIDmode
4641 /* ??? Work-around BImode bugs in the ia64 backend. */
4642 && mode != BImode
4643 && cmp_mode != BImode
4644 && nonzero_bits (op0, cmp_mode) == 1
4645 && STORE_FLAG_VALUE == 1)
4646 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4647 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4648 : lowpart_subreg (mode, op0, cmp_mode);
4650 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4651 if ((code == EQ || code == NE)
4652 && op1 == const0_rtx
4653 && op0code == XOR)
4654 return simplify_gen_relational (code, mode, cmp_mode,
4655 XEXP (op0, 0), XEXP (op0, 1));
4657 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4658 if ((code == EQ || code == NE)
4659 && op0code == XOR
4660 && rtx_equal_p (XEXP (op0, 0), op1)
4661 && !side_effects_p (XEXP (op0, 0)))
4662 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4663 CONST0_RTX (mode));
4665 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4666 if ((code == EQ || code == NE)
4667 && op0code == XOR
4668 && rtx_equal_p (XEXP (op0, 1), op1)
4669 && !side_effects_p (XEXP (op0, 1)))
4670 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4671 CONST0_RTX (mode));
4673 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4674 if ((code == EQ || code == NE)
4675 && op0code == XOR
4676 && CONST_SCALAR_INT_P (op1)
4677 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4678 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4679 simplify_gen_binary (XOR, cmp_mode,
4680 XEXP (op0, 1), op1));
4682 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4683 can be implemented with a BICS instruction on some targets, or
4684 constant-folded if y is a constant. */
4685 if ((code == EQ || code == NE)
4686 && op0code == AND
4687 && rtx_equal_p (XEXP (op0, 0), op1)
4688 && !side_effects_p (op1)
4689 && op1 != CONST0_RTX (cmp_mode))
4691 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4692 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4694 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4695 CONST0_RTX (cmp_mode));
4698 /* Likewise for (eq/ne (and x y) y). */
4699 if ((code == EQ || code == NE)
4700 && op0code == AND
4701 && rtx_equal_p (XEXP (op0, 1), op1)
4702 && !side_effects_p (op1)
4703 && op1 != CONST0_RTX (cmp_mode))
4705 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4706 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4708 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4709 CONST0_RTX (cmp_mode));
4712 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4713 if ((code == EQ || code == NE)
4714 && GET_CODE (op0) == BSWAP
4715 && CONST_SCALAR_INT_P (op1))
4716 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4717 simplify_gen_unary (BSWAP, cmp_mode,
4718 op1, cmp_mode));
4720 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4721 if ((code == EQ || code == NE)
4722 && GET_CODE (op0) == BSWAP
4723 && GET_CODE (op1) == BSWAP)
4724 return simplify_gen_relational (code, mode, cmp_mode,
4725 XEXP (op0, 0), XEXP (op1, 0));
4727 if (op0code == POPCOUNT && op1 == const0_rtx)
4728 switch (code)
4730 case EQ:
4731 case LE:
4732 case LEU:
4733 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4734 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4735 XEXP (op0, 0), const0_rtx);
4737 case NE:
4738 case GT:
4739 case GTU:
4740 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4741 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4742 XEXP (op0, 0), const0_rtx);
4744 default:
4745 break;
4748 return NULL_RTX;
4751 enum
4753 CMP_EQ = 1,
4754 CMP_LT = 2,
4755 CMP_GT = 4,
4756 CMP_LTU = 8,
4757 CMP_GTU = 16
4761 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4762 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4763 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4764 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4765 For floating-point comparisons, assume that the operands were ordered. */
4767 static rtx
4768 comparison_result (enum rtx_code code, int known_results)
4770 switch (code)
4772 case EQ:
4773 case UNEQ:
4774 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4775 case NE:
4776 case LTGT:
4777 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4779 case LT:
4780 case UNLT:
4781 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4782 case GE:
4783 case UNGE:
4784 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4786 case GT:
4787 case UNGT:
4788 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4789 case LE:
4790 case UNLE:
4791 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4793 case LTU:
4794 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4795 case GEU:
4796 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4798 case GTU:
4799 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4800 case LEU:
4801 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4803 case ORDERED:
4804 return const_true_rtx;
4805 case UNORDERED:
4806 return const0_rtx;
4807 default:
4808 gcc_unreachable ();
4812 /* Check if the given comparison (done in the given MODE) is actually
4813 a tautology or a contradiction. If the mode is VOID_mode, the
4814 comparison is done in "infinite precision". If no simplification
4815 is possible, this function returns zero. Otherwise, it returns
4816 either const_true_rtx or const0_rtx. */
4819 simplify_const_relational_operation (enum rtx_code code,
4820 machine_mode mode,
4821 rtx op0, rtx op1)
4823 rtx tem;
4824 rtx trueop0;
4825 rtx trueop1;
4827 gcc_assert (mode != VOIDmode
4828 || (GET_MODE (op0) == VOIDmode
4829 && GET_MODE (op1) == VOIDmode));
4831 /* If op0 is a compare, extract the comparison arguments from it. */
4832 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4834 op1 = XEXP (op0, 1);
4835 op0 = XEXP (op0, 0);
4837 if (GET_MODE (op0) != VOIDmode)
4838 mode = GET_MODE (op0);
4839 else if (GET_MODE (op1) != VOIDmode)
4840 mode = GET_MODE (op1);
4841 else
4842 return 0;
4845 /* We can't simplify MODE_CC values since we don't know what the
4846 actual comparison is. */
4847 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4848 return 0;
4850 /* Make sure the constant is second. */
4851 if (swap_commutative_operands_p (op0, op1))
4853 std::swap (op0, op1);
4854 code = swap_condition (code);
4857 trueop0 = avoid_constant_pool_reference (op0);
4858 trueop1 = avoid_constant_pool_reference (op1);
4860 /* For integer comparisons of A and B maybe we can simplify A - B and can
4861 then simplify a comparison of that with zero. If A and B are both either
4862 a register or a CONST_INT, this can't help; testing for these cases will
4863 prevent infinite recursion here and speed things up.
4865 We can only do this for EQ and NE comparisons as otherwise we may
4866 lose or introduce overflow which we cannot disregard as undefined as
4867 we do not know the signedness of the operation on either the left or
4868 the right hand side of the comparison. */
4870 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4871 && (code == EQ || code == NE)
4872 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4873 && (REG_P (op1) || CONST_INT_P (trueop1)))
4874 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4875 /* We cannot do this if tem is a nonzero address. */
4876 && ! nonzero_address_p (tem))
4877 return simplify_const_relational_operation (signed_condition (code),
4878 mode, tem, const0_rtx);
4880 if (! HONOR_NANS (mode) && code == ORDERED)
4881 return const_true_rtx;
4883 if (! HONOR_NANS (mode) && code == UNORDERED)
4884 return const0_rtx;
4886 /* For modes without NaNs, if the two operands are equal, we know the
4887 result except if they have side-effects. Even with NaNs we know
4888 the result of unordered comparisons and, if signaling NaNs are
4889 irrelevant, also the result of LT/GT/LTGT. */
4890 if ((! HONOR_NANS (trueop0)
4891 || code == UNEQ || code == UNLE || code == UNGE
4892 || ((code == LT || code == GT || code == LTGT)
4893 && ! HONOR_SNANS (trueop0)))
4894 && rtx_equal_p (trueop0, trueop1)
4895 && ! side_effects_p (trueop0))
4896 return comparison_result (code, CMP_EQ);
4898 /* If the operands are floating-point constants, see if we can fold
4899 the result. */
4900 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4901 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4902 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4904 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
4905 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
4907 /* Comparisons are unordered iff at least one of the values is NaN. */
4908 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
4909 switch (code)
4911 case UNEQ:
4912 case UNLT:
4913 case UNGT:
4914 case UNLE:
4915 case UNGE:
4916 case NE:
4917 case UNORDERED:
4918 return const_true_rtx;
4919 case EQ:
4920 case LT:
4921 case GT:
4922 case LE:
4923 case GE:
4924 case LTGT:
4925 case ORDERED:
4926 return const0_rtx;
4927 default:
4928 return 0;
4931 return comparison_result (code,
4932 (real_equal (d0, d1) ? CMP_EQ :
4933 real_less (d0, d1) ? CMP_LT : CMP_GT));
4936 /* Otherwise, see if the operands are both integers. */
4937 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4938 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4940 /* It would be nice if we really had a mode here. However, the
4941 largest int representable on the target is as good as
4942 infinite. */
4943 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4944 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4945 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4947 if (wi::eq_p (ptrueop0, ptrueop1))
4948 return comparison_result (code, CMP_EQ);
4949 else
4951 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4952 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4953 return comparison_result (code, cr);
4957 /* Optimize comparisons with upper and lower bounds. */
4958 if (HWI_COMPUTABLE_MODE_P (mode)
4959 && CONST_INT_P (trueop1)
4960 && !side_effects_p (trueop0))
4962 int sign;
4963 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4964 HOST_WIDE_INT val = INTVAL (trueop1);
4965 HOST_WIDE_INT mmin, mmax;
4967 if (code == GEU
4968 || code == LEU
4969 || code == GTU
4970 || code == LTU)
4971 sign = 0;
4972 else
4973 sign = 1;
4975 /* Get a reduced range if the sign bit is zero. */
4976 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4978 mmin = 0;
4979 mmax = nonzero;
4981 else
4983 rtx mmin_rtx, mmax_rtx;
4984 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4986 mmin = INTVAL (mmin_rtx);
4987 mmax = INTVAL (mmax_rtx);
4988 if (sign)
4990 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4992 mmin >>= (sign_copies - 1);
4993 mmax >>= (sign_copies - 1);
4997 switch (code)
4999 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5000 case GEU:
5001 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5002 return const_true_rtx;
5003 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5004 return const0_rtx;
5005 break;
5006 case GE:
5007 if (val <= mmin)
5008 return const_true_rtx;
5009 if (val > mmax)
5010 return const0_rtx;
5011 break;
5013 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5014 case LEU:
5015 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5016 return const_true_rtx;
5017 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5018 return const0_rtx;
5019 break;
5020 case LE:
5021 if (val >= mmax)
5022 return const_true_rtx;
5023 if (val < mmin)
5024 return const0_rtx;
5025 break;
5027 case EQ:
5028 /* x == y is always false for y out of range. */
5029 if (val < mmin || val > mmax)
5030 return const0_rtx;
5031 break;
5033 /* x > y is always false for y >= mmax, always true for y < mmin. */
5034 case GTU:
5035 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5036 return const0_rtx;
5037 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5038 return const_true_rtx;
5039 break;
5040 case GT:
5041 if (val >= mmax)
5042 return const0_rtx;
5043 if (val < mmin)
5044 return const_true_rtx;
5045 break;
5047 /* x < y is always false for y <= mmin, always true for y > mmax. */
5048 case LTU:
5049 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5050 return const0_rtx;
5051 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5052 return const_true_rtx;
5053 break;
5054 case LT:
5055 if (val <= mmin)
5056 return const0_rtx;
5057 if (val > mmax)
5058 return const_true_rtx;
5059 break;
5061 case NE:
5062 /* x != y is always true for y out of range. */
5063 if (val < mmin || val > mmax)
5064 return const_true_rtx;
5065 break;
5067 default:
5068 break;
5072 /* Optimize integer comparisons with zero. */
5073 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5075 /* Some addresses are known to be nonzero. We don't know
5076 their sign, but equality comparisons are known. */
5077 if (nonzero_address_p (trueop0))
5079 if (code == EQ || code == LEU)
5080 return const0_rtx;
5081 if (code == NE || code == GTU)
5082 return const_true_rtx;
5085 /* See if the first operand is an IOR with a constant. If so, we
5086 may be able to determine the result of this comparison. */
5087 if (GET_CODE (op0) == IOR)
5089 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5090 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5092 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5093 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5094 && (UINTVAL (inner_const)
5095 & ((unsigned HOST_WIDE_INT) 1
5096 << sign_bitnum)));
5098 switch (code)
5100 case EQ:
5101 case LEU:
5102 return const0_rtx;
5103 case NE:
5104 case GTU:
5105 return const_true_rtx;
5106 case LT:
5107 case LE:
5108 if (has_sign)
5109 return const_true_rtx;
5110 break;
5111 case GT:
5112 case GE:
5113 if (has_sign)
5114 return const0_rtx;
5115 break;
5116 default:
5117 break;
5123 /* Optimize comparison of ABS with zero. */
5124 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5125 && (GET_CODE (trueop0) == ABS
5126 || (GET_CODE (trueop0) == FLOAT_EXTEND
5127 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5129 switch (code)
5131 case LT:
5132 /* Optimize abs(x) < 0.0. */
5133 if (!HONOR_SNANS (mode)
5134 && (!INTEGRAL_MODE_P (mode)
5135 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5137 if (INTEGRAL_MODE_P (mode)
5138 && (issue_strict_overflow_warning
5139 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5140 warning (OPT_Wstrict_overflow,
5141 ("assuming signed overflow does not occur when "
5142 "assuming abs (x) < 0 is false"));
5143 return const0_rtx;
5145 break;
5147 case GE:
5148 /* Optimize abs(x) >= 0.0. */
5149 if (!HONOR_NANS (mode)
5150 && (!INTEGRAL_MODE_P (mode)
5151 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5153 if (INTEGRAL_MODE_P (mode)
5154 && (issue_strict_overflow_warning
5155 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5156 warning (OPT_Wstrict_overflow,
5157 ("assuming signed overflow does not occur when "
5158 "assuming abs (x) >= 0 is true"));
5159 return const_true_rtx;
5161 break;
5163 case UNGE:
5164 /* Optimize ! (abs(x) < 0.0). */
5165 return const_true_rtx;
5167 default:
5168 break;
5172 return 0;
5175 /* Simplify CODE, an operation with result mode MODE and three operands,
5176 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5177 a constant. Return 0 if no simplifications is possible. */
5180 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5181 machine_mode op0_mode, rtx op0, rtx op1,
5182 rtx op2)
5184 unsigned int width = GET_MODE_PRECISION (mode);
5185 bool any_change = false;
5186 rtx tem, trueop2;
5188 /* VOIDmode means "infinite" precision. */
5189 if (width == 0)
5190 width = HOST_BITS_PER_WIDE_INT;
5192 switch (code)
5194 case FMA:
5195 /* Simplify negations around the multiplication. */
5196 /* -a * -b + c => a * b + c. */
5197 if (GET_CODE (op0) == NEG)
5199 tem = simplify_unary_operation (NEG, mode, op1, mode);
5200 if (tem)
5201 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5203 else if (GET_CODE (op1) == NEG)
5205 tem = simplify_unary_operation (NEG, mode, op0, mode);
5206 if (tem)
5207 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5210 /* Canonicalize the two multiplication operands. */
5211 /* a * -b + c => -b * a + c. */
5212 if (swap_commutative_operands_p (op0, op1))
5213 std::swap (op0, op1), any_change = true;
5215 if (any_change)
5216 return gen_rtx_FMA (mode, op0, op1, op2);
5217 return NULL_RTX;
5219 case SIGN_EXTRACT:
5220 case ZERO_EXTRACT:
5221 if (CONST_INT_P (op0)
5222 && CONST_INT_P (op1)
5223 && CONST_INT_P (op2)
5224 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5225 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5227 /* Extracting a bit-field from a constant */
5228 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5229 HOST_WIDE_INT op1val = INTVAL (op1);
5230 HOST_WIDE_INT op2val = INTVAL (op2);
5231 if (BITS_BIG_ENDIAN)
5232 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5233 else
5234 val >>= op2val;
5236 if (HOST_BITS_PER_WIDE_INT != op1val)
5238 /* First zero-extend. */
5239 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5240 /* If desired, propagate sign bit. */
5241 if (code == SIGN_EXTRACT
5242 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5243 != 0)
5244 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5247 return gen_int_mode (val, mode);
5249 break;
5251 case IF_THEN_ELSE:
5252 if (CONST_INT_P (op0))
5253 return op0 != const0_rtx ? op1 : op2;
5255 /* Convert c ? a : a into "a". */
5256 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5257 return op1;
5259 /* Convert a != b ? a : b into "a". */
5260 if (GET_CODE (op0) == NE
5261 && ! side_effects_p (op0)
5262 && ! HONOR_NANS (mode)
5263 && ! HONOR_SIGNED_ZEROS (mode)
5264 && ((rtx_equal_p (XEXP (op0, 0), op1)
5265 && rtx_equal_p (XEXP (op0, 1), op2))
5266 || (rtx_equal_p (XEXP (op0, 0), op2)
5267 && rtx_equal_p (XEXP (op0, 1), op1))))
5268 return op1;
5270 /* Convert a == b ? a : b into "b". */
5271 if (GET_CODE (op0) == EQ
5272 && ! side_effects_p (op0)
5273 && ! HONOR_NANS (mode)
5274 && ! HONOR_SIGNED_ZEROS (mode)
5275 && ((rtx_equal_p (XEXP (op0, 0), op1)
5276 && rtx_equal_p (XEXP (op0, 1), op2))
5277 || (rtx_equal_p (XEXP (op0, 0), op2)
5278 && rtx_equal_p (XEXP (op0, 1), op1))))
5279 return op2;
5281 /* Convert (!c) != {0,...,0} ? a : b into
5282 c != {0,...,0} ? b : a for vector modes. */
5283 if (VECTOR_MODE_P (GET_MODE (op1))
5284 && GET_CODE (op0) == NE
5285 && GET_CODE (XEXP (op0, 0)) == NOT
5286 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5288 rtx cv = XEXP (op0, 1);
5289 int nunits = CONST_VECTOR_NUNITS (cv);
5290 bool ok = true;
5291 for (int i = 0; i < nunits; ++i)
5292 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5294 ok = false;
5295 break;
5297 if (ok)
5299 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5300 XEXP (XEXP (op0, 0), 0),
5301 XEXP (op0, 1));
5302 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5303 return retval;
5307 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5309 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5310 ? GET_MODE (XEXP (op0, 1))
5311 : GET_MODE (XEXP (op0, 0)));
5312 rtx temp;
5314 /* Look for happy constants in op1 and op2. */
5315 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5317 HOST_WIDE_INT t = INTVAL (op1);
5318 HOST_WIDE_INT f = INTVAL (op2);
5320 if (t == STORE_FLAG_VALUE && f == 0)
5321 code = GET_CODE (op0);
5322 else if (t == 0 && f == STORE_FLAG_VALUE)
5324 enum rtx_code tmp;
5325 tmp = reversed_comparison_code (op0, NULL_RTX);
5326 if (tmp == UNKNOWN)
5327 break;
5328 code = tmp;
5330 else
5331 break;
5333 return simplify_gen_relational (code, mode, cmp_mode,
5334 XEXP (op0, 0), XEXP (op0, 1));
5337 if (cmp_mode == VOIDmode)
5338 cmp_mode = op0_mode;
5339 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5340 cmp_mode, XEXP (op0, 0),
5341 XEXP (op0, 1));
5343 /* See if any simplifications were possible. */
5344 if (temp)
5346 if (CONST_INT_P (temp))
5347 return temp == const0_rtx ? op2 : op1;
5348 else if (temp)
5349 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5352 break;
5354 case VEC_MERGE:
5355 gcc_assert (GET_MODE (op0) == mode);
5356 gcc_assert (GET_MODE (op1) == mode);
5357 gcc_assert (VECTOR_MODE_P (mode));
5358 trueop2 = avoid_constant_pool_reference (op2);
5359 if (CONST_INT_P (trueop2))
5361 int elt_size = GET_MODE_UNIT_SIZE (mode);
5362 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5363 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5364 unsigned HOST_WIDE_INT mask;
5365 if (n_elts == HOST_BITS_PER_WIDE_INT)
5366 mask = -1;
5367 else
5368 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5370 if (!(sel & mask) && !side_effects_p (op0))
5371 return op1;
5372 if ((sel & mask) == mask && !side_effects_p (op1))
5373 return op0;
5375 rtx trueop0 = avoid_constant_pool_reference (op0);
5376 rtx trueop1 = avoid_constant_pool_reference (op1);
5377 if (GET_CODE (trueop0) == CONST_VECTOR
5378 && GET_CODE (trueop1) == CONST_VECTOR)
5380 rtvec v = rtvec_alloc (n_elts);
5381 unsigned int i;
5383 for (i = 0; i < n_elts; i++)
5384 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5385 ? CONST_VECTOR_ELT (trueop0, i)
5386 : CONST_VECTOR_ELT (trueop1, i));
5387 return gen_rtx_CONST_VECTOR (mode, v);
5390 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5391 if no element from a appears in the result. */
5392 if (GET_CODE (op0) == VEC_MERGE)
5394 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5395 if (CONST_INT_P (tem))
5397 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5398 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5399 return simplify_gen_ternary (code, mode, mode,
5400 XEXP (op0, 1), op1, op2);
5401 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5402 return simplify_gen_ternary (code, mode, mode,
5403 XEXP (op0, 0), op1, op2);
5406 if (GET_CODE (op1) == VEC_MERGE)
5408 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5409 if (CONST_INT_P (tem))
5411 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5412 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5413 return simplify_gen_ternary (code, mode, mode,
5414 op0, XEXP (op1, 1), op2);
5415 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5416 return simplify_gen_ternary (code, mode, mode,
5417 op0, XEXP (op1, 0), op2);
5421 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5422 with a. */
5423 if (GET_CODE (op0) == VEC_DUPLICATE
5424 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5425 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5426 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5428 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5429 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5431 if (XEXP (XEXP (op0, 0), 0) == op1
5432 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5433 return op1;
5438 if (rtx_equal_p (op0, op1)
5439 && !side_effects_p (op2) && !side_effects_p (op1))
5440 return op0;
5442 break;
5444 default:
5445 gcc_unreachable ();
5448 return 0;
5451 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5452 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5453 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5455 Works by unpacking OP into a collection of 8-bit values
5456 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5457 and then repacking them again for OUTERMODE. */
5459 static rtx
5460 simplify_immed_subreg (machine_mode outermode, rtx op,
5461 machine_mode innermode, unsigned int byte)
5463 enum {
5464 value_bit = 8,
5465 value_mask = (1 << value_bit) - 1
5467 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5468 int value_start;
5469 int i;
5470 int elem;
5472 int num_elem;
5473 rtx * elems;
5474 int elem_bitsize;
5475 rtx result_s;
5476 rtvec result_v = NULL;
5477 enum mode_class outer_class;
5478 machine_mode outer_submode;
5479 int max_bitsize;
5481 /* Some ports misuse CCmode. */
5482 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5483 return op;
5485 /* We have no way to represent a complex constant at the rtl level. */
5486 if (COMPLEX_MODE_P (outermode))
5487 return NULL_RTX;
5489 /* We support any size mode. */
5490 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5491 GET_MODE_BITSIZE (innermode));
5493 /* Unpack the value. */
5495 if (GET_CODE (op) == CONST_VECTOR)
5497 num_elem = CONST_VECTOR_NUNITS (op);
5498 elems = &CONST_VECTOR_ELT (op, 0);
5499 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5501 else
5503 num_elem = 1;
5504 elems = &op;
5505 elem_bitsize = max_bitsize;
5507 /* If this asserts, it is too complicated; reducing value_bit may help. */
5508 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5509 /* I don't know how to handle endianness of sub-units. */
5510 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5512 for (elem = 0; elem < num_elem; elem++)
5514 unsigned char * vp;
5515 rtx el = elems[elem];
5517 /* Vectors are kept in target memory order. (This is probably
5518 a mistake.) */
5520 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5521 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5522 / BITS_PER_UNIT);
5523 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5524 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5525 unsigned bytele = (subword_byte % UNITS_PER_WORD
5526 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5527 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5530 switch (GET_CODE (el))
5532 case CONST_INT:
5533 for (i = 0;
5534 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5535 i += value_bit)
5536 *vp++ = INTVAL (el) >> i;
5537 /* CONST_INTs are always logically sign-extended. */
5538 for (; i < elem_bitsize; i += value_bit)
5539 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5540 break;
5542 case CONST_WIDE_INT:
5544 rtx_mode_t val = std::make_pair (el, innermode);
5545 unsigned char extend = wi::sign_mask (val);
5547 for (i = 0; i < elem_bitsize; i += value_bit)
5548 *vp++ = wi::extract_uhwi (val, i, value_bit);
5549 for (; i < elem_bitsize; i += value_bit)
5550 *vp++ = extend;
5552 break;
5554 case CONST_DOUBLE:
5555 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5557 unsigned char extend = 0;
5558 /* If this triggers, someone should have generated a
5559 CONST_INT instead. */
5560 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5562 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5563 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5564 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5566 *vp++
5567 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5568 i += value_bit;
5571 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5572 extend = -1;
5573 for (; i < elem_bitsize; i += value_bit)
5574 *vp++ = extend;
5576 else
5578 /* This is big enough for anything on the platform. */
5579 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5580 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5582 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5583 gcc_assert (bitsize <= elem_bitsize);
5584 gcc_assert (bitsize % value_bit == 0);
5586 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5587 GET_MODE (el));
5589 /* real_to_target produces its result in words affected by
5590 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5591 and use WORDS_BIG_ENDIAN instead; see the documentation
5592 of SUBREG in rtl.texi. */
5593 for (i = 0; i < bitsize; i += value_bit)
5595 int ibase;
5596 if (WORDS_BIG_ENDIAN)
5597 ibase = bitsize - 1 - i;
5598 else
5599 ibase = i;
5600 *vp++ = tmp[ibase / 32] >> i % 32;
5603 /* It shouldn't matter what's done here, so fill it with
5604 zero. */
5605 for (; i < elem_bitsize; i += value_bit)
5606 *vp++ = 0;
5608 break;
5610 case CONST_FIXED:
5611 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5613 for (i = 0; i < elem_bitsize; i += value_bit)
5614 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5616 else
5618 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5619 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5620 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5621 i += value_bit)
5622 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5623 >> (i - HOST_BITS_PER_WIDE_INT);
5624 for (; i < elem_bitsize; i += value_bit)
5625 *vp++ = 0;
5627 break;
5629 default:
5630 gcc_unreachable ();
5634 /* Now, pick the right byte to start with. */
5635 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5636 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5637 will already have offset 0. */
5638 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5640 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5641 - byte);
5642 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5643 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5644 byte = (subword_byte % UNITS_PER_WORD
5645 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5648 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5649 so if it's become negative it will instead be very large.) */
5650 gcc_assert (byte < GET_MODE_SIZE (innermode));
5652 /* Convert from bytes to chunks of size value_bit. */
5653 value_start = byte * (BITS_PER_UNIT / value_bit);
5655 /* Re-pack the value. */
5656 num_elem = GET_MODE_NUNITS (outermode);
5658 if (VECTOR_MODE_P (outermode))
5660 result_v = rtvec_alloc (num_elem);
5661 elems = &RTVEC_ELT (result_v, 0);
5663 else
5664 elems = &result_s;
5666 outer_submode = GET_MODE_INNER (outermode);
5667 outer_class = GET_MODE_CLASS (outer_submode);
5668 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5670 gcc_assert (elem_bitsize % value_bit == 0);
5671 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5673 for (elem = 0; elem < num_elem; elem++)
5675 unsigned char *vp;
5677 /* Vectors are stored in target memory order. (This is probably
5678 a mistake.) */
5680 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5681 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5682 / BITS_PER_UNIT);
5683 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5684 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5685 unsigned bytele = (subword_byte % UNITS_PER_WORD
5686 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5687 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5690 switch (outer_class)
5692 case MODE_INT:
5693 case MODE_PARTIAL_INT:
5695 int u;
5696 int base = 0;
5697 int units
5698 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5699 / HOST_BITS_PER_WIDE_INT;
5700 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5701 wide_int r;
5703 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5704 return NULL_RTX;
5705 for (u = 0; u < units; u++)
5707 unsigned HOST_WIDE_INT buf = 0;
5708 for (i = 0;
5709 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5710 i += value_bit)
5711 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5713 tmp[u] = buf;
5714 base += HOST_BITS_PER_WIDE_INT;
5716 r = wide_int::from_array (tmp, units,
5717 GET_MODE_PRECISION (outer_submode));
5718 #if TARGET_SUPPORTS_WIDE_INT == 0
5719 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5720 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5721 return NULL_RTX;
5722 #endif
5723 elems[elem] = immed_wide_int_const (r, outer_submode);
5725 break;
5727 case MODE_FLOAT:
5728 case MODE_DECIMAL_FLOAT:
5730 REAL_VALUE_TYPE r;
5731 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5733 /* real_from_target wants its input in words affected by
5734 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5735 and use WORDS_BIG_ENDIAN instead; see the documentation
5736 of SUBREG in rtl.texi. */
5737 for (i = 0; i < max_bitsize / 32; i++)
5738 tmp[i] = 0;
5739 for (i = 0; i < elem_bitsize; i += value_bit)
5741 int ibase;
5742 if (WORDS_BIG_ENDIAN)
5743 ibase = elem_bitsize - 1 - i;
5744 else
5745 ibase = i;
5746 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5749 real_from_target (&r, tmp, outer_submode);
5750 elems[elem] = const_double_from_real_value (r, outer_submode);
5752 break;
5754 case MODE_FRACT:
5755 case MODE_UFRACT:
5756 case MODE_ACCUM:
5757 case MODE_UACCUM:
5759 FIXED_VALUE_TYPE f;
5760 f.data.low = 0;
5761 f.data.high = 0;
5762 f.mode = outer_submode;
5764 for (i = 0;
5765 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5766 i += value_bit)
5767 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5768 for (; i < elem_bitsize; i += value_bit)
5769 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5770 << (i - HOST_BITS_PER_WIDE_INT));
5772 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5774 break;
5776 default:
5777 gcc_unreachable ();
5780 if (VECTOR_MODE_P (outermode))
5781 return gen_rtx_CONST_VECTOR (outermode, result_v);
5782 else
5783 return result_s;
5786 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5787 Return 0 if no simplifications are possible. */
5789 simplify_subreg (machine_mode outermode, rtx op,
5790 machine_mode innermode, unsigned int byte)
5792 /* Little bit of sanity checking. */
5793 gcc_assert (innermode != VOIDmode);
5794 gcc_assert (outermode != VOIDmode);
5795 gcc_assert (innermode != BLKmode);
5796 gcc_assert (outermode != BLKmode);
5798 gcc_assert (GET_MODE (op) == innermode
5799 || GET_MODE (op) == VOIDmode);
5801 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5802 return NULL_RTX;
5804 if (byte >= GET_MODE_SIZE (innermode))
5805 return NULL_RTX;
5807 if (outermode == innermode && !byte)
5808 return op;
5810 if (CONST_SCALAR_INT_P (op)
5811 || CONST_DOUBLE_AS_FLOAT_P (op)
5812 || GET_CODE (op) == CONST_FIXED
5813 || GET_CODE (op) == CONST_VECTOR)
5814 return simplify_immed_subreg (outermode, op, innermode, byte);
5816 /* Changing mode twice with SUBREG => just change it once,
5817 or not at all if changing back op starting mode. */
5818 if (GET_CODE (op) == SUBREG)
5820 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5821 int final_offset = byte + SUBREG_BYTE (op);
5822 rtx newx;
5824 if (outermode == innermostmode
5825 && byte == 0 && SUBREG_BYTE (op) == 0)
5826 return SUBREG_REG (op);
5828 /* The SUBREG_BYTE represents offset, as if the value were stored
5829 in memory. Irritating exception is paradoxical subreg, where
5830 we define SUBREG_BYTE to be 0. On big endian machines, this
5831 value should be negative. For a moment, undo this exception. */
5832 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5834 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5835 if (WORDS_BIG_ENDIAN)
5836 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5837 if (BYTES_BIG_ENDIAN)
5838 final_offset += difference % UNITS_PER_WORD;
5840 if (SUBREG_BYTE (op) == 0
5841 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5843 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5844 if (WORDS_BIG_ENDIAN)
5845 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5846 if (BYTES_BIG_ENDIAN)
5847 final_offset += difference % UNITS_PER_WORD;
5850 /* See whether resulting subreg will be paradoxical. */
5851 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5853 /* In nonparadoxical subregs we can't handle negative offsets. */
5854 if (final_offset < 0)
5855 return NULL_RTX;
5856 /* Bail out in case resulting subreg would be incorrect. */
5857 if (final_offset % GET_MODE_SIZE (outermode)
5858 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5859 return NULL_RTX;
5861 else
5863 int offset = 0;
5864 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5866 /* In paradoxical subreg, see if we are still looking on lower part.
5867 If so, our SUBREG_BYTE will be 0. */
5868 if (WORDS_BIG_ENDIAN)
5869 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5870 if (BYTES_BIG_ENDIAN)
5871 offset += difference % UNITS_PER_WORD;
5872 if (offset == final_offset)
5873 final_offset = 0;
5874 else
5875 return NULL_RTX;
5878 /* Recurse for further possible simplifications. */
5879 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5880 final_offset);
5881 if (newx)
5882 return newx;
5883 if (validate_subreg (outermode, innermostmode,
5884 SUBREG_REG (op), final_offset))
5886 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5887 if (SUBREG_PROMOTED_VAR_P (op)
5888 && SUBREG_PROMOTED_SIGN (op) >= 0
5889 && GET_MODE_CLASS (outermode) == MODE_INT
5890 && IN_RANGE (GET_MODE_SIZE (outermode),
5891 GET_MODE_SIZE (innermode),
5892 GET_MODE_SIZE (innermostmode))
5893 && subreg_lowpart_p (newx))
5895 SUBREG_PROMOTED_VAR_P (newx) = 1;
5896 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5898 return newx;
5900 return NULL_RTX;
5903 /* SUBREG of a hard register => just change the register number
5904 and/or mode. If the hard register is not valid in that mode,
5905 suppress this simplification. If the hard register is the stack,
5906 frame, or argument pointer, leave this as a SUBREG. */
5908 if (REG_P (op) && HARD_REGISTER_P (op))
5910 unsigned int regno, final_regno;
5912 regno = REGNO (op);
5913 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5914 if (HARD_REGISTER_NUM_P (final_regno))
5916 rtx x;
5917 int final_offset = byte;
5919 /* Adjust offset for paradoxical subregs. */
5920 if (byte == 0
5921 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5923 int difference = (GET_MODE_SIZE (innermode)
5924 - GET_MODE_SIZE (outermode));
5925 if (WORDS_BIG_ENDIAN)
5926 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5927 if (BYTES_BIG_ENDIAN)
5928 final_offset += difference % UNITS_PER_WORD;
5931 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5933 /* Propagate original regno. We don't have any way to specify
5934 the offset inside original regno, so do so only for lowpart.
5935 The information is used only by alias analysis that can not
5936 grog partial register anyway. */
5938 if (subreg_lowpart_offset (outermode, innermode) == byte)
5939 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5940 return x;
5944 /* If we have a SUBREG of a register that we are replacing and we are
5945 replacing it with a MEM, make a new MEM and try replacing the
5946 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5947 or if we would be widening it. */
5949 if (MEM_P (op)
5950 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5951 /* Allow splitting of volatile memory references in case we don't
5952 have instruction to move the whole thing. */
5953 && (! MEM_VOLATILE_P (op)
5954 || ! have_insn_for (SET, innermode))
5955 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5956 return adjust_address_nv (op, outermode, byte);
5958 /* Handle complex values represented as CONCAT
5959 of real and imaginary part. */
5960 if (GET_CODE (op) == CONCAT)
5962 unsigned int part_size, final_offset;
5963 rtx part, res;
5965 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5966 if (byte < part_size)
5968 part = XEXP (op, 0);
5969 final_offset = byte;
5971 else
5973 part = XEXP (op, 1);
5974 final_offset = byte - part_size;
5977 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5978 return NULL_RTX;
5980 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5981 if (res)
5982 return res;
5983 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5984 return gen_rtx_SUBREG (outermode, part, final_offset);
5985 return NULL_RTX;
5988 /* A SUBREG resulting from a zero extension may fold to zero if
5989 it extracts higher bits that the ZERO_EXTEND's source bits. */
5990 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5992 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5993 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5994 return CONST0_RTX (outermode);
5997 if (SCALAR_INT_MODE_P (outermode)
5998 && SCALAR_INT_MODE_P (innermode)
5999 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6000 && byte == subreg_lowpart_offset (outermode, innermode))
6002 rtx tem = simplify_truncation (outermode, op, innermode);
6003 if (tem)
6004 return tem;
6007 return NULL_RTX;
6010 /* Make a SUBREG operation or equivalent if it folds. */
6013 simplify_gen_subreg (machine_mode outermode, rtx op,
6014 machine_mode innermode, unsigned int byte)
6016 rtx newx;
6018 newx = simplify_subreg (outermode, op, innermode, byte);
6019 if (newx)
6020 return newx;
6022 if (GET_CODE (op) == SUBREG
6023 || GET_CODE (op) == CONCAT
6024 || GET_MODE (op) == VOIDmode)
6025 return NULL_RTX;
6027 if (validate_subreg (outermode, innermode, op, byte))
6028 return gen_rtx_SUBREG (outermode, op, byte);
6030 return NULL_RTX;
6033 /* Generates a subreg to get the least significant part of EXPR (in mode
6034 INNER_MODE) to OUTER_MODE. */
6037 lowpart_subreg (machine_mode outer_mode, rtx expr,
6038 machine_mode inner_mode)
6040 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6041 subreg_lowpart_offset (outer_mode, inner_mode));
6044 /* Simplify X, an rtx expression.
6046 Return the simplified expression or NULL if no simplifications
6047 were possible.
6049 This is the preferred entry point into the simplification routines;
6050 however, we still allow passes to call the more specific routines.
6052 Right now GCC has three (yes, three) major bodies of RTL simplification
6053 code that need to be unified.
6055 1. fold_rtx in cse.c. This code uses various CSE specific
6056 information to aid in RTL simplification.
6058 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6059 it uses combine specific information to aid in RTL
6060 simplification.
6062 3. The routines in this file.
6065 Long term we want to only have one body of simplification code; to
6066 get to that state I recommend the following steps:
6068 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6069 which are not pass dependent state into these routines.
6071 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6072 use this routine whenever possible.
6074 3. Allow for pass dependent state to be provided to these
6075 routines and add simplifications based on the pass dependent
6076 state. Remove code from cse.c & combine.c that becomes
6077 redundant/dead.
6079 It will take time, but ultimately the compiler will be easier to
6080 maintain and improve. It's totally silly that when we add a
6081 simplification that it needs to be added to 4 places (3 for RTL
6082 simplification and 1 for tree simplification. */
6085 simplify_rtx (const_rtx x)
6087 const enum rtx_code code = GET_CODE (x);
6088 const machine_mode mode = GET_MODE (x);
6090 switch (GET_RTX_CLASS (code))
6092 case RTX_UNARY:
6093 return simplify_unary_operation (code, mode,
6094 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6095 case RTX_COMM_ARITH:
6096 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6097 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6099 /* Fall through.... */
6101 case RTX_BIN_ARITH:
6102 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6104 case RTX_TERNARY:
6105 case RTX_BITFIELD_OPS:
6106 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6107 XEXP (x, 0), XEXP (x, 1),
6108 XEXP (x, 2));
6110 case RTX_COMPARE:
6111 case RTX_COMM_COMPARE:
6112 return simplify_relational_operation (code, mode,
6113 ((GET_MODE (XEXP (x, 0))
6114 != VOIDmode)
6115 ? GET_MODE (XEXP (x, 0))
6116 : GET_MODE (XEXP (x, 1))),
6117 XEXP (x, 0),
6118 XEXP (x, 1));
6120 case RTX_EXTRA:
6121 if (code == SUBREG)
6122 return simplify_subreg (mode, SUBREG_REG (x),
6123 GET_MODE (SUBREG_REG (x)),
6124 SUBREG_BYTE (x));
6125 break;
6127 case RTX_OBJ:
6128 if (code == LO_SUM)
6130 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6131 if (GET_CODE (XEXP (x, 0)) == HIGH
6132 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6133 return XEXP (x, 1);
6135 break;
6137 default:
6138 break;
6140 return NULL;