Fixups after merge
[official-gcc.git] / gcc / simplify-rtx.c
blob8094c7584ac12aa718dfee2117fd2077d4811bd9
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "varasm.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "flags.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "hashtab.h"
35 #include "hash-set.h"
36 #include "vec.h"
37 #include "machmode.h"
38 #include "input.h"
39 #include "function.h"
40 #include "insn-codes.h"
41 #include "optabs.h"
42 #include "expr.h"
43 #include "diagnostic-core.h"
44 #include "ggc.h"
45 #include "target.h"
46 #include "predict.h"
48 /* Simplification and canonicalization of RTL. */
50 /* Much code operates on (low, high) pairs; the low value is an
51 unsigned wide int, the high value a signed wide int. We
52 occasionally need to sign extend from low to high as if low were a
53 signed wide int. */
54 #define HWI_SIGN_EXTEND(low) \
55 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
57 static rtx neg_const_int (machine_mode, const_rtx);
58 static bool plus_minus_operand_p (const_rtx);
59 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
60 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
61 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
62 unsigned int);
63 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
64 rtx, rtx);
65 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
66 machine_mode, rtx, rtx);
67 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
68 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
69 rtx, rtx, rtx, rtx);
71 /* Negate a CONST_INT rtx, truncating (because a conversion from a
72 maximally negative number can overflow). */
73 static rtx
74 neg_const_int (machine_mode mode, const_rtx i)
76 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
79 /* Test whether expression, X, is an immediate constant that represents
80 the most significant bit of machine mode MODE. */
82 bool
83 mode_signbit_p (machine_mode mode, const_rtx x)
85 unsigned HOST_WIDE_INT val;
86 unsigned int width;
88 if (GET_MODE_CLASS (mode) != MODE_INT)
89 return false;
91 width = GET_MODE_PRECISION (mode);
92 if (width == 0)
93 return false;
95 if (width <= HOST_BITS_PER_WIDE_INT
96 && CONST_INT_P (x))
97 val = INTVAL (x);
98 #if TARGET_SUPPORTS_WIDE_INT
99 else if (CONST_WIDE_INT_P (x))
101 unsigned int i;
102 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
103 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
104 return false;
105 for (i = 0; i < elts - 1; i++)
106 if (CONST_WIDE_INT_ELT (x, i) != 0)
107 return false;
108 val = CONST_WIDE_INT_ELT (x, elts - 1);
109 width %= HOST_BITS_PER_WIDE_INT;
110 if (width == 0)
111 width = HOST_BITS_PER_WIDE_INT;
113 #else
114 else if (width <= HOST_BITS_PER_DOUBLE_INT
115 && CONST_DOUBLE_AS_INT_P (x)
116 && CONST_DOUBLE_LOW (x) == 0)
118 val = CONST_DOUBLE_HIGH (x);
119 width -= HOST_BITS_PER_WIDE_INT;
121 #endif
122 else
123 /* X is not an integer constant. */
124 return false;
126 if (width < HOST_BITS_PER_WIDE_INT)
127 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
128 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
131 /* Test whether VAL is equal to the most significant bit of mode MODE
132 (after masking with the mode mask of MODE). Returns false if the
133 precision of MODE is too large to handle. */
135 bool
136 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
138 unsigned int width;
140 if (GET_MODE_CLASS (mode) != MODE_INT)
141 return false;
143 width = GET_MODE_PRECISION (mode);
144 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
145 return false;
147 val &= GET_MODE_MASK (mode);
148 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
151 /* Test whether the most significant bit of mode MODE is set in VAL.
152 Returns false if the precision of MODE is too large to handle. */
153 bool
154 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
156 unsigned int width;
158 if (GET_MODE_CLASS (mode) != MODE_INT)
159 return false;
161 width = GET_MODE_PRECISION (mode);
162 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
163 return false;
165 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
166 return val != 0;
169 /* Test whether the most significant bit of mode MODE is clear in VAL.
170 Returns false if the precision of MODE is too large to handle. */
171 bool
172 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
174 unsigned int width;
176 if (GET_MODE_CLASS (mode) != MODE_INT)
177 return false;
179 width = GET_MODE_PRECISION (mode);
180 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
181 return false;
183 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
184 return val == 0;
187 /* Make a binary operation by properly ordering the operands and
188 seeing if the expression folds. */
191 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
192 rtx op1)
194 rtx tem;
196 /* If this simplifies, do it. */
197 tem = simplify_binary_operation (code, mode, op0, op1);
198 if (tem)
199 return tem;
201 /* Put complex operands first and constants second if commutative. */
202 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
203 && swap_commutative_operands_p (op0, op1))
204 tem = op0, op0 = op1, op1 = tem;
206 return gen_rtx_fmt_ee (code, mode, op0, op1);
209 /* If X is a MEM referencing the constant pool, return the real value.
210 Otherwise return X. */
212 avoid_constant_pool_reference (rtx x)
214 rtx c, tmp, addr;
215 machine_mode cmode;
216 HOST_WIDE_INT offset = 0;
218 switch (GET_CODE (x))
220 case MEM:
221 break;
223 case FLOAT_EXTEND:
224 /* Handle float extensions of constant pool references. */
225 tmp = XEXP (x, 0);
226 c = avoid_constant_pool_reference (tmp);
227 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
229 REAL_VALUE_TYPE d;
231 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
232 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
234 return x;
236 default:
237 return x;
240 if (GET_MODE (x) == BLKmode)
241 return x;
243 addr = XEXP (x, 0);
245 /* Call target hook to avoid the effects of -fpic etc.... */
246 addr = targetm.delegitimize_address (addr);
248 /* Split the address into a base and integer offset. */
249 if (GET_CODE (addr) == CONST
250 && GET_CODE (XEXP (addr, 0)) == PLUS
251 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
253 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
254 addr = XEXP (XEXP (addr, 0), 0);
257 if (GET_CODE (addr) == LO_SUM)
258 addr = XEXP (addr, 1);
260 /* If this is a constant pool reference, we can turn it into its
261 constant and hope that simplifications happen. */
262 if (GET_CODE (addr) == SYMBOL_REF
263 && CONSTANT_POOL_ADDRESS_P (addr))
265 c = get_pool_constant (addr);
266 cmode = get_pool_mode (addr);
268 /* If we're accessing the constant in a different mode than it was
269 originally stored, attempt to fix that up via subreg simplifications.
270 If that fails we have no choice but to return the original memory. */
271 if ((offset != 0 || cmode != GET_MODE (x))
272 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
274 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
275 if (tem && CONSTANT_P (tem))
276 return tem;
278 else
279 return c;
282 return x;
285 /* Simplify a MEM based on its attributes. This is the default
286 delegitimize_address target hook, and it's recommended that every
287 overrider call it. */
290 delegitimize_mem_from_attrs (rtx x)
292 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
293 use their base addresses as equivalent. */
294 if (MEM_P (x)
295 && MEM_EXPR (x)
296 && MEM_OFFSET_KNOWN_P (x))
298 tree decl = MEM_EXPR (x);
299 machine_mode mode = GET_MODE (x);
300 HOST_WIDE_INT offset = 0;
302 switch (TREE_CODE (decl))
304 default:
305 decl = NULL;
306 break;
308 case VAR_DECL:
309 break;
311 case ARRAY_REF:
312 case ARRAY_RANGE_REF:
313 case COMPONENT_REF:
314 case BIT_FIELD_REF:
315 case REALPART_EXPR:
316 case IMAGPART_EXPR:
317 case VIEW_CONVERT_EXPR:
319 HOST_WIDE_INT bitsize, bitpos;
320 tree toffset;
321 int unsignedp, volatilep = 0;
323 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
324 &mode, &unsignedp, &volatilep, false);
325 if (bitsize != GET_MODE_BITSIZE (mode)
326 || (bitpos % BITS_PER_UNIT)
327 || (toffset && !tree_fits_shwi_p (toffset)))
328 decl = NULL;
329 else
331 offset += bitpos / BITS_PER_UNIT;
332 if (toffset)
333 offset += tree_to_shwi (toffset);
335 break;
339 if (decl
340 && mode == GET_MODE (x)
341 && TREE_CODE (decl) == VAR_DECL
342 && (TREE_STATIC (decl)
343 || DECL_THREAD_LOCAL_P (decl))
344 && DECL_RTL_SET_P (decl)
345 && MEM_P (DECL_RTL (decl)))
347 rtx newx;
349 offset += MEM_OFFSET (x);
351 newx = DECL_RTL (decl);
353 if (MEM_P (newx))
355 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
357 /* Avoid creating a new MEM needlessly if we already had
358 the same address. We do if there's no OFFSET and the
359 old address X is identical to NEWX, or if X is of the
360 form (plus NEWX OFFSET), or the NEWX is of the form
361 (plus Y (const_int Z)) and X is that with the offset
362 added: (plus Y (const_int Z+OFFSET)). */
363 if (!((offset == 0
364 || (GET_CODE (o) == PLUS
365 && GET_CODE (XEXP (o, 1)) == CONST_INT
366 && (offset == INTVAL (XEXP (o, 1))
367 || (GET_CODE (n) == PLUS
368 && GET_CODE (XEXP (n, 1)) == CONST_INT
369 && (INTVAL (XEXP (n, 1)) + offset
370 == INTVAL (XEXP (o, 1)))
371 && (n = XEXP (n, 0))))
372 && (o = XEXP (o, 0))))
373 && rtx_equal_p (o, n)))
374 x = adjust_address_nv (newx, mode, offset);
376 else if (GET_MODE (x) == GET_MODE (newx)
377 && offset == 0)
378 x = newx;
382 return x;
385 /* Make a unary operation by first seeing if it folds and otherwise making
386 the specified operation. */
389 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
390 machine_mode op_mode)
392 rtx tem;
394 /* If this simplifies, use it. */
395 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
396 return tem;
398 return gen_rtx_fmt_e (code, mode, op);
401 /* Likewise for ternary operations. */
404 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
405 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
407 rtx tem;
409 /* If this simplifies, use it. */
410 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
411 op0, op1, op2)))
412 return tem;
414 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
417 /* Likewise, for relational operations.
418 CMP_MODE specifies mode comparison is done in. */
421 simplify_gen_relational (enum rtx_code code, machine_mode mode,
422 machine_mode cmp_mode, rtx op0, rtx op1)
424 rtx tem;
426 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
427 op0, op1)))
428 return tem;
430 return gen_rtx_fmt_ee (code, mode, op0, op1);
433 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
434 and simplify the result. If FN is non-NULL, call this callback on each
435 X, if it returns non-NULL, replace X with its return value and simplify the
436 result. */
439 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
440 rtx (*fn) (rtx, const_rtx, void *), void *data)
442 enum rtx_code code = GET_CODE (x);
443 machine_mode mode = GET_MODE (x);
444 machine_mode op_mode;
445 const char *fmt;
446 rtx op0, op1, op2, newx, op;
447 rtvec vec, newvec;
448 int i, j;
450 if (__builtin_expect (fn != NULL, 0))
452 newx = fn (x, old_rtx, data);
453 if (newx)
454 return newx;
456 else if (rtx_equal_p (x, old_rtx))
457 return copy_rtx ((rtx) data);
459 switch (GET_RTX_CLASS (code))
461 case RTX_UNARY:
462 op0 = XEXP (x, 0);
463 op_mode = GET_MODE (op0);
464 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
465 if (op0 == XEXP (x, 0))
466 return x;
467 return simplify_gen_unary (code, mode, op0, op_mode);
469 case RTX_BIN_ARITH:
470 case RTX_COMM_ARITH:
471 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
472 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
473 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
474 return x;
475 return simplify_gen_binary (code, mode, op0, op1);
477 case RTX_COMPARE:
478 case RTX_COMM_COMPARE:
479 op0 = XEXP (x, 0);
480 op1 = XEXP (x, 1);
481 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
482 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
483 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
484 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
485 return x;
486 return simplify_gen_relational (code, mode, op_mode, op0, op1);
488 case RTX_TERNARY:
489 case RTX_BITFIELD_OPS:
490 op0 = XEXP (x, 0);
491 op_mode = GET_MODE (op0);
492 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
493 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
494 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
495 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
496 return x;
497 if (op_mode == VOIDmode)
498 op_mode = GET_MODE (op0);
499 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
501 case RTX_EXTRA:
502 if (code == SUBREG)
504 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
505 if (op0 == SUBREG_REG (x))
506 return x;
507 op0 = simplify_gen_subreg (GET_MODE (x), op0,
508 GET_MODE (SUBREG_REG (x)),
509 SUBREG_BYTE (x));
510 return op0 ? op0 : x;
512 break;
514 case RTX_OBJ:
515 if (code == MEM)
517 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
518 if (op0 == XEXP (x, 0))
519 return x;
520 return replace_equiv_address_nv (x, op0);
522 else if (code == LO_SUM)
524 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
525 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
527 /* (lo_sum (high x) x) -> x */
528 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
529 return op1;
531 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
532 return x;
533 return gen_rtx_LO_SUM (mode, op0, op1);
535 break;
537 default:
538 break;
541 newx = x;
542 fmt = GET_RTX_FORMAT (code);
543 for (i = 0; fmt[i]; i++)
544 switch (fmt[i])
546 case 'E':
547 vec = XVEC (x, i);
548 newvec = XVEC (newx, i);
549 for (j = 0; j < GET_NUM_ELEM (vec); j++)
551 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
552 old_rtx, fn, data);
553 if (op != RTVEC_ELT (vec, j))
555 if (newvec == vec)
557 newvec = shallow_copy_rtvec (vec);
558 if (x == newx)
559 newx = shallow_copy_rtx (x);
560 XVEC (newx, i) = newvec;
562 RTVEC_ELT (newvec, j) = op;
565 break;
567 case 'e':
568 if (XEXP (x, i))
570 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
571 if (op != XEXP (x, i))
573 if (x == newx)
574 newx = shallow_copy_rtx (x);
575 XEXP (newx, i) = op;
578 break;
580 return newx;
583 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
584 resulting RTX. Return a new RTX which is as simplified as possible. */
587 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
589 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
592 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
593 Only handle cases where the truncated value is inherently an rvalue.
595 RTL provides two ways of truncating a value:
597 1. a lowpart subreg. This form is only a truncation when both
598 the outer and inner modes (here MODE and OP_MODE respectively)
599 are scalar integers, and only then when the subreg is used as
600 an rvalue.
602 It is only valid to form such truncating subregs if the
603 truncation requires no action by the target. The onus for
604 proving this is on the creator of the subreg -- e.g. the
605 caller to simplify_subreg or simplify_gen_subreg -- and typically
606 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
608 2. a TRUNCATE. This form handles both scalar and compound integers.
610 The first form is preferred where valid. However, the TRUNCATE
611 handling in simplify_unary_operation turns the second form into the
612 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
613 so it is generally safe to form rvalue truncations using:
615 simplify_gen_unary (TRUNCATE, ...)
617 and leave simplify_unary_operation to work out which representation
618 should be used.
620 Because of the proof requirements on (1), simplify_truncation must
621 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
622 regardless of whether the outer truncation came from a SUBREG or a
623 TRUNCATE. For example, if the caller has proven that an SImode
624 truncation of:
626 (and:DI X Y)
628 is a no-op and can be represented as a subreg, it does not follow
629 that SImode truncations of X and Y are also no-ops. On a target
630 like 64-bit MIPS that requires SImode values to be stored in
631 sign-extended form, an SImode truncation of:
633 (and:DI (reg:DI X) (const_int 63))
635 is trivially a no-op because only the lower 6 bits can be set.
636 However, X is still an arbitrary 64-bit number and so we cannot
637 assume that truncating it too is a no-op. */
639 static rtx
640 simplify_truncation (machine_mode mode, rtx op,
641 machine_mode op_mode)
643 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
644 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
645 gcc_assert (precision <= op_precision);
647 /* Optimize truncations of zero and sign extended values. */
648 if (GET_CODE (op) == ZERO_EXTEND
649 || GET_CODE (op) == SIGN_EXTEND)
651 /* There are three possibilities. If MODE is the same as the
652 origmode, we can omit both the extension and the subreg.
653 If MODE is not larger than the origmode, we can apply the
654 truncation without the extension. Finally, if the outermode
655 is larger than the origmode, we can just extend to the appropriate
656 mode. */
657 machine_mode origmode = GET_MODE (XEXP (op, 0));
658 if (mode == origmode)
659 return XEXP (op, 0);
660 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
661 return simplify_gen_unary (TRUNCATE, mode,
662 XEXP (op, 0), origmode);
663 else
664 return simplify_gen_unary (GET_CODE (op), mode,
665 XEXP (op, 0), origmode);
668 /* If the machine can perform operations in the truncated mode, distribute
669 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
670 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
671 if (1
672 #ifdef WORD_REGISTER_OPERATIONS
673 && precision >= BITS_PER_WORD
674 #endif
675 && (GET_CODE (op) == PLUS
676 || GET_CODE (op) == MINUS
677 || GET_CODE (op) == MULT))
679 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
680 if (op0)
682 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
683 if (op1)
684 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
688 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
689 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
690 the outer subreg is effectively a truncation to the original mode. */
691 if ((GET_CODE (op) == LSHIFTRT
692 || GET_CODE (op) == ASHIFTRT)
693 /* Ensure that OP_MODE is at least twice as wide as MODE
694 to avoid the possibility that an outer LSHIFTRT shifts by more
695 than the sign extension's sign_bit_copies and introduces zeros
696 into the high bits of the result. */
697 && 2 * precision <= op_precision
698 && CONST_INT_P (XEXP (op, 1))
699 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
700 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
701 && UINTVAL (XEXP (op, 1)) < precision)
702 return simplify_gen_binary (ASHIFTRT, mode,
703 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
705 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
706 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
707 the outer subreg is effectively a truncation to the original mode. */
708 if ((GET_CODE (op) == LSHIFTRT
709 || GET_CODE (op) == ASHIFTRT)
710 && CONST_INT_P (XEXP (op, 1))
711 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
712 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
713 && UINTVAL (XEXP (op, 1)) < precision)
714 return simplify_gen_binary (LSHIFTRT, mode,
715 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
717 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
718 to (ashift:QI (x:QI) C), where C is a suitable small constant and
719 the outer subreg is effectively a truncation to the original mode. */
720 if (GET_CODE (op) == ASHIFT
721 && CONST_INT_P (XEXP (op, 1))
722 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
723 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
724 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
725 && UINTVAL (XEXP (op, 1)) < precision)
726 return simplify_gen_binary (ASHIFT, mode,
727 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
729 /* Recognize a word extraction from a multi-word subreg. */
730 if ((GET_CODE (op) == LSHIFTRT
731 || GET_CODE (op) == ASHIFTRT)
732 && SCALAR_INT_MODE_P (mode)
733 && SCALAR_INT_MODE_P (op_mode)
734 && precision >= BITS_PER_WORD
735 && 2 * precision <= op_precision
736 && CONST_INT_P (XEXP (op, 1))
737 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
738 && UINTVAL (XEXP (op, 1)) < op_precision)
740 int byte = subreg_lowpart_offset (mode, op_mode);
741 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
742 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
743 (WORDS_BIG_ENDIAN
744 ? byte - shifted_bytes
745 : byte + shifted_bytes));
748 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
749 and try replacing the TRUNCATE and shift with it. Don't do this
750 if the MEM has a mode-dependent address. */
751 if ((GET_CODE (op) == LSHIFTRT
752 || GET_CODE (op) == ASHIFTRT)
753 && SCALAR_INT_MODE_P (op_mode)
754 && MEM_P (XEXP (op, 0))
755 && CONST_INT_P (XEXP (op, 1))
756 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
757 && INTVAL (XEXP (op, 1)) > 0
758 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
759 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
760 MEM_ADDR_SPACE (XEXP (op, 0)))
761 && ! MEM_VOLATILE_P (XEXP (op, 0))
762 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
763 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
765 int byte = subreg_lowpart_offset (mode, op_mode);
766 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
767 return adjust_address_nv (XEXP (op, 0), mode,
768 (WORDS_BIG_ENDIAN
769 ? byte - shifted_bytes
770 : byte + shifted_bytes));
773 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
774 (OP:SI foo:SI) if OP is NEG or ABS. */
775 if ((GET_CODE (op) == ABS
776 || GET_CODE (op) == NEG)
777 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
778 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
779 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
780 return simplify_gen_unary (GET_CODE (op), mode,
781 XEXP (XEXP (op, 0), 0), mode);
783 /* (truncate:A (subreg:B (truncate:C X) 0)) is
784 (truncate:A X). */
785 if (GET_CODE (op) == SUBREG
786 && SCALAR_INT_MODE_P (mode)
787 && SCALAR_INT_MODE_P (op_mode)
788 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
789 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
790 && subreg_lowpart_p (op))
792 rtx inner = XEXP (SUBREG_REG (op), 0);
793 if (GET_MODE_PRECISION (mode)
794 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
795 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
796 else
797 /* If subreg above is paradoxical and C is narrower
798 than A, return (subreg:A (truncate:C X) 0). */
799 return simplify_gen_subreg (mode, SUBREG_REG (op),
800 GET_MODE (SUBREG_REG (op)), 0);
803 /* (truncate:A (truncate:B X)) is (truncate:A X). */
804 if (GET_CODE (op) == TRUNCATE)
805 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
806 GET_MODE (XEXP (op, 0)));
808 return NULL_RTX;
811 /* Try to simplify a unary operation CODE whose output mode is to be
812 MODE with input operand OP whose mode was originally OP_MODE.
813 Return zero if no simplification can be made. */
815 simplify_unary_operation (enum rtx_code code, machine_mode mode,
816 rtx op, machine_mode op_mode)
818 rtx trueop, tem;
820 trueop = avoid_constant_pool_reference (op);
822 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
823 if (tem)
824 return tem;
826 return simplify_unary_operation_1 (code, mode, op);
829 /* Perform some simplifications we can do even if the operands
830 aren't constant. */
831 static rtx
832 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
834 enum rtx_code reversed;
835 rtx temp;
837 switch (code)
839 case NOT:
840 /* (not (not X)) == X. */
841 if (GET_CODE (op) == NOT)
842 return XEXP (op, 0);
844 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
845 comparison is all ones. */
846 if (COMPARISON_P (op)
847 && (mode == BImode || STORE_FLAG_VALUE == -1)
848 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
849 return simplify_gen_relational (reversed, mode, VOIDmode,
850 XEXP (op, 0), XEXP (op, 1));
852 /* (not (plus X -1)) can become (neg X). */
853 if (GET_CODE (op) == PLUS
854 && XEXP (op, 1) == constm1_rtx)
855 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
857 /* Similarly, (not (neg X)) is (plus X -1). */
858 if (GET_CODE (op) == NEG)
859 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
860 CONSTM1_RTX (mode));
862 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
863 if (GET_CODE (op) == XOR
864 && CONST_INT_P (XEXP (op, 1))
865 && (temp = simplify_unary_operation (NOT, mode,
866 XEXP (op, 1), mode)) != 0)
867 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
869 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
870 if (GET_CODE (op) == PLUS
871 && CONST_INT_P (XEXP (op, 1))
872 && mode_signbit_p (mode, XEXP (op, 1))
873 && (temp = simplify_unary_operation (NOT, mode,
874 XEXP (op, 1), mode)) != 0)
875 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
878 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
879 operands other than 1, but that is not valid. We could do a
880 similar simplification for (not (lshiftrt C X)) where C is
881 just the sign bit, but this doesn't seem common enough to
882 bother with. */
883 if (GET_CODE (op) == ASHIFT
884 && XEXP (op, 0) == const1_rtx)
886 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
887 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
890 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
891 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
892 so we can perform the above simplification. */
893 if (STORE_FLAG_VALUE == -1
894 && GET_CODE (op) == ASHIFTRT
895 && CONST_INT_P (XEXP (op, 1))
896 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
897 return simplify_gen_relational (GE, mode, VOIDmode,
898 XEXP (op, 0), const0_rtx);
901 if (GET_CODE (op) == SUBREG
902 && subreg_lowpart_p (op)
903 && (GET_MODE_SIZE (GET_MODE (op))
904 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
905 && GET_CODE (SUBREG_REG (op)) == ASHIFT
906 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
908 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
909 rtx x;
911 x = gen_rtx_ROTATE (inner_mode,
912 simplify_gen_unary (NOT, inner_mode, const1_rtx,
913 inner_mode),
914 XEXP (SUBREG_REG (op), 1));
915 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
916 if (temp)
917 return temp;
920 /* Apply De Morgan's laws to reduce number of patterns for machines
921 with negating logical insns (and-not, nand, etc.). If result has
922 only one NOT, put it first, since that is how the patterns are
923 coded. */
924 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
926 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
927 machine_mode op_mode;
929 op_mode = GET_MODE (in1);
930 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
932 op_mode = GET_MODE (in2);
933 if (op_mode == VOIDmode)
934 op_mode = mode;
935 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
937 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
939 rtx tem = in2;
940 in2 = in1; in1 = tem;
943 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
944 mode, in1, in2);
947 /* (not (bswap x)) -> (bswap (not x)). */
948 if (GET_CODE (op) == BSWAP)
950 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
951 return simplify_gen_unary (BSWAP, mode, x, mode);
953 break;
955 case NEG:
956 /* (neg (neg X)) == X. */
957 if (GET_CODE (op) == NEG)
958 return XEXP (op, 0);
960 /* (neg (plus X 1)) can become (not X). */
961 if (GET_CODE (op) == PLUS
962 && XEXP (op, 1) == const1_rtx)
963 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
965 /* Similarly, (neg (not X)) is (plus X 1). */
966 if (GET_CODE (op) == NOT)
967 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
968 CONST1_RTX (mode));
970 /* (neg (minus X Y)) can become (minus Y X). This transformation
971 isn't safe for modes with signed zeros, since if X and Y are
972 both +0, (minus Y X) is the same as (minus X Y). If the
973 rounding mode is towards +infinity (or -infinity) then the two
974 expressions will be rounded differently. */
975 if (GET_CODE (op) == MINUS
976 && !HONOR_SIGNED_ZEROS (mode)
977 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
978 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
980 if (GET_CODE (op) == PLUS
981 && !HONOR_SIGNED_ZEROS (mode)
982 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
984 /* (neg (plus A C)) is simplified to (minus -C A). */
985 if (CONST_SCALAR_INT_P (XEXP (op, 1))
986 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
988 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
989 if (temp)
990 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
993 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
994 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
995 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
998 /* (neg (mult A B)) becomes (mult A (neg B)).
999 This works even for floating-point values. */
1000 if (GET_CODE (op) == MULT
1001 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1003 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1004 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1007 /* NEG commutes with ASHIFT since it is multiplication. Only do
1008 this if we can then eliminate the NEG (e.g., if the operand
1009 is a constant). */
1010 if (GET_CODE (op) == ASHIFT)
1012 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1013 if (temp)
1014 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1017 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1018 C is equal to the width of MODE minus 1. */
1019 if (GET_CODE (op) == ASHIFTRT
1020 && CONST_INT_P (XEXP (op, 1))
1021 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1022 return simplify_gen_binary (LSHIFTRT, mode,
1023 XEXP (op, 0), XEXP (op, 1));
1025 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1026 C is equal to the width of MODE minus 1. */
1027 if (GET_CODE (op) == LSHIFTRT
1028 && CONST_INT_P (XEXP (op, 1))
1029 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1030 return simplify_gen_binary (ASHIFTRT, mode,
1031 XEXP (op, 0), XEXP (op, 1));
1033 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1034 if (GET_CODE (op) == XOR
1035 && XEXP (op, 1) == const1_rtx
1036 && nonzero_bits (XEXP (op, 0), mode) == 1)
1037 return plus_constant (mode, XEXP (op, 0), -1);
1039 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1040 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1041 if (GET_CODE (op) == LT
1042 && XEXP (op, 1) == const0_rtx
1043 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1045 machine_mode inner = GET_MODE (XEXP (op, 0));
1046 int isize = GET_MODE_PRECISION (inner);
1047 if (STORE_FLAG_VALUE == 1)
1049 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1050 GEN_INT (isize - 1));
1051 if (mode == inner)
1052 return temp;
1053 if (GET_MODE_PRECISION (mode) > isize)
1054 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1055 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1057 else if (STORE_FLAG_VALUE == -1)
1059 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1060 GEN_INT (isize - 1));
1061 if (mode == inner)
1062 return temp;
1063 if (GET_MODE_PRECISION (mode) > isize)
1064 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1065 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1068 break;
1070 case TRUNCATE:
1071 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1072 with the umulXi3_highpart patterns. */
1073 if (GET_CODE (op) == LSHIFTRT
1074 && GET_CODE (XEXP (op, 0)) == MULT)
1075 break;
1077 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1079 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1081 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1082 if (temp)
1083 return temp;
1085 /* We can't handle truncation to a partial integer mode here
1086 because we don't know the real bitsize of the partial
1087 integer mode. */
1088 break;
1091 if (GET_MODE (op) != VOIDmode)
1093 temp = simplify_truncation (mode, op, GET_MODE (op));
1094 if (temp)
1095 return temp;
1098 /* If we know that the value is already truncated, we can
1099 replace the TRUNCATE with a SUBREG. */
1100 if (GET_MODE_NUNITS (mode) == 1
1101 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1102 || truncated_to_mode (mode, op)))
1104 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1105 if (temp)
1106 return temp;
1109 /* A truncate of a comparison can be replaced with a subreg if
1110 STORE_FLAG_VALUE permits. This is like the previous test,
1111 but it works even if the comparison is done in a mode larger
1112 than HOST_BITS_PER_WIDE_INT. */
1113 if (HWI_COMPUTABLE_MODE_P (mode)
1114 && COMPARISON_P (op)
1115 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1117 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1118 if (temp)
1119 return temp;
1122 /* A truncate of a memory is just loading the low part of the memory
1123 if we are not changing the meaning of the address. */
1124 if (GET_CODE (op) == MEM
1125 && !VECTOR_MODE_P (mode)
1126 && !MEM_VOLATILE_P (op)
1127 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1129 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1130 if (temp)
1131 return temp;
1134 break;
1136 case FLOAT_TRUNCATE:
1137 if (DECIMAL_FLOAT_MODE_P (mode))
1138 break;
1140 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1141 if (GET_CODE (op) == FLOAT_EXTEND
1142 && GET_MODE (XEXP (op, 0)) == mode)
1143 return XEXP (op, 0);
1145 /* (float_truncate:SF (float_truncate:DF foo:XF))
1146 = (float_truncate:SF foo:XF).
1147 This may eliminate double rounding, so it is unsafe.
1149 (float_truncate:SF (float_extend:XF foo:DF))
1150 = (float_truncate:SF foo:DF).
1152 (float_truncate:DF (float_extend:XF foo:SF))
1153 = (float_extend:SF foo:DF). */
1154 if ((GET_CODE (op) == FLOAT_TRUNCATE
1155 && flag_unsafe_math_optimizations)
1156 || GET_CODE (op) == FLOAT_EXTEND)
1157 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1158 0)))
1159 > GET_MODE_SIZE (mode)
1160 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1161 mode,
1162 XEXP (op, 0), mode);
1164 /* (float_truncate (float x)) is (float x) */
1165 if (GET_CODE (op) == FLOAT
1166 && (flag_unsafe_math_optimizations
1167 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1168 && ((unsigned)significand_size (GET_MODE (op))
1169 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1170 - num_sign_bit_copies (XEXP (op, 0),
1171 GET_MODE (XEXP (op, 0))))))))
1172 return simplify_gen_unary (FLOAT, mode,
1173 XEXP (op, 0),
1174 GET_MODE (XEXP (op, 0)));
1176 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1177 (OP:SF foo:SF) if OP is NEG or ABS. */
1178 if ((GET_CODE (op) == ABS
1179 || GET_CODE (op) == NEG)
1180 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1181 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1182 return simplify_gen_unary (GET_CODE (op), mode,
1183 XEXP (XEXP (op, 0), 0), mode);
1185 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1186 is (float_truncate:SF x). */
1187 if (GET_CODE (op) == SUBREG
1188 && subreg_lowpart_p (op)
1189 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1190 return SUBREG_REG (op);
1191 break;
1193 case FLOAT_EXTEND:
1194 if (DECIMAL_FLOAT_MODE_P (mode))
1195 break;
1197 /* (float_extend (float_extend x)) is (float_extend x)
1199 (float_extend (float x)) is (float x) assuming that double
1200 rounding can't happen.
1202 if (GET_CODE (op) == FLOAT_EXTEND
1203 || (GET_CODE (op) == FLOAT
1204 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1205 && ((unsigned)significand_size (GET_MODE (op))
1206 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1207 - num_sign_bit_copies (XEXP (op, 0),
1208 GET_MODE (XEXP (op, 0)))))))
1209 return simplify_gen_unary (GET_CODE (op), mode,
1210 XEXP (op, 0),
1211 GET_MODE (XEXP (op, 0)));
1213 break;
1215 case ABS:
1216 /* (abs (neg <foo>)) -> (abs <foo>) */
1217 if (GET_CODE (op) == NEG)
1218 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1219 GET_MODE (XEXP (op, 0)));
1221 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1222 do nothing. */
1223 if (GET_MODE (op) == VOIDmode)
1224 break;
1226 /* If operand is something known to be positive, ignore the ABS. */
1227 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1228 || val_signbit_known_clear_p (GET_MODE (op),
1229 nonzero_bits (op, GET_MODE (op))))
1230 return op;
1232 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1233 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1234 return gen_rtx_NEG (mode, op);
1236 break;
1238 case FFS:
1239 /* (ffs (*_extend <X>)) = (ffs <X>) */
1240 if (GET_CODE (op) == SIGN_EXTEND
1241 || GET_CODE (op) == ZERO_EXTEND)
1242 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1243 GET_MODE (XEXP (op, 0)));
1244 break;
1246 case POPCOUNT:
1247 switch (GET_CODE (op))
1249 case BSWAP:
1250 case ZERO_EXTEND:
1251 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1252 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1253 GET_MODE (XEXP (op, 0)));
1255 case ROTATE:
1256 case ROTATERT:
1257 /* Rotations don't affect popcount. */
1258 if (!side_effects_p (XEXP (op, 1)))
1259 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1260 GET_MODE (XEXP (op, 0)));
1261 break;
1263 default:
1264 break;
1266 break;
1268 case PARITY:
1269 switch (GET_CODE (op))
1271 case NOT:
1272 case BSWAP:
1273 case ZERO_EXTEND:
1274 case SIGN_EXTEND:
1275 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1276 GET_MODE (XEXP (op, 0)));
1278 case ROTATE:
1279 case ROTATERT:
1280 /* Rotations don't affect parity. */
1281 if (!side_effects_p (XEXP (op, 1)))
1282 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1283 GET_MODE (XEXP (op, 0)));
1284 break;
1286 default:
1287 break;
1289 break;
1291 case BSWAP:
1292 /* (bswap (bswap x)) -> x. */
1293 if (GET_CODE (op) == BSWAP)
1294 return XEXP (op, 0);
1295 break;
1297 case FLOAT:
1298 /* (float (sign_extend <X>)) = (float <X>). */
1299 if (GET_CODE (op) == SIGN_EXTEND)
1300 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1301 GET_MODE (XEXP (op, 0)));
1302 break;
1304 case SIGN_EXTEND:
1305 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1306 becomes just the MINUS if its mode is MODE. This allows
1307 folding switch statements on machines using casesi (such as
1308 the VAX). */
1309 if (GET_CODE (op) == TRUNCATE
1310 && GET_MODE (XEXP (op, 0)) == mode
1311 && GET_CODE (XEXP (op, 0)) == MINUS
1312 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1313 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1314 return XEXP (op, 0);
1316 /* Extending a widening multiplication should be canonicalized to
1317 a wider widening multiplication. */
1318 if (GET_CODE (op) == MULT)
1320 rtx lhs = XEXP (op, 0);
1321 rtx rhs = XEXP (op, 1);
1322 enum rtx_code lcode = GET_CODE (lhs);
1323 enum rtx_code rcode = GET_CODE (rhs);
1325 /* Widening multiplies usually extend both operands, but sometimes
1326 they use a shift to extract a portion of a register. */
1327 if ((lcode == SIGN_EXTEND
1328 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1329 && (rcode == SIGN_EXTEND
1330 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1332 machine_mode lmode = GET_MODE (lhs);
1333 machine_mode rmode = GET_MODE (rhs);
1334 int bits;
1336 if (lcode == ASHIFTRT)
1337 /* Number of bits not shifted off the end. */
1338 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1339 else /* lcode == SIGN_EXTEND */
1340 /* Size of inner mode. */
1341 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1343 if (rcode == ASHIFTRT)
1344 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1345 else /* rcode == SIGN_EXTEND */
1346 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1348 /* We can only widen multiplies if the result is mathematiclly
1349 equivalent. I.e. if overflow was impossible. */
1350 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1351 return simplify_gen_binary
1352 (MULT, mode,
1353 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1354 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1358 /* Check for a sign extension of a subreg of a promoted
1359 variable, where the promotion is sign-extended, and the
1360 target mode is the same as the variable's promotion. */
1361 if (GET_CODE (op) == SUBREG
1362 && SUBREG_PROMOTED_VAR_P (op)
1363 && SUBREG_PROMOTED_SIGNED_P (op)
1364 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1366 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1367 if (temp)
1368 return temp;
1371 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1372 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1373 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1375 gcc_assert (GET_MODE_PRECISION (mode)
1376 > GET_MODE_PRECISION (GET_MODE (op)));
1377 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1378 GET_MODE (XEXP (op, 0)));
1381 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1382 is (sign_extend:M (subreg:O <X>)) if there is mode with
1383 GET_MODE_BITSIZE (N) - I bits.
1384 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1385 is similarly (zero_extend:M (subreg:O <X>)). */
1386 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1387 && GET_CODE (XEXP (op, 0)) == ASHIFT
1388 && CONST_INT_P (XEXP (op, 1))
1389 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1390 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1392 machine_mode tmode
1393 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1394 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1395 gcc_assert (GET_MODE_BITSIZE (mode)
1396 > GET_MODE_BITSIZE (GET_MODE (op)));
1397 if (tmode != BLKmode)
1399 rtx inner =
1400 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1401 if (inner)
1402 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1403 ? SIGN_EXTEND : ZERO_EXTEND,
1404 mode, inner, tmode);
1408 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1409 /* As we do not know which address space the pointer is referring to,
1410 we can do this only if the target does not support different pointer
1411 or address modes depending on the address space. */
1412 if (target_default_pointer_address_modes_p ()
1413 && ! POINTERS_EXTEND_UNSIGNED
1414 && mode == Pmode && GET_MODE (op) == ptr_mode
1415 && (CONSTANT_P (op)
1416 || (GET_CODE (op) == SUBREG
1417 && REG_P (SUBREG_REG (op))
1418 && REG_POINTER (SUBREG_REG (op))
1419 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1420 return convert_memory_address (Pmode, op);
1421 #endif
1422 break;
1424 case ZERO_EXTEND:
1425 /* Check for a zero extension of a subreg of a promoted
1426 variable, where the promotion is zero-extended, and the
1427 target mode is the same as the variable's promotion. */
1428 if (GET_CODE (op) == SUBREG
1429 && SUBREG_PROMOTED_VAR_P (op)
1430 && SUBREG_PROMOTED_UNSIGNED_P (op)
1431 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1433 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1434 if (temp)
1435 return temp;
1438 /* Extending a widening multiplication should be canonicalized to
1439 a wider widening multiplication. */
1440 if (GET_CODE (op) == MULT)
1442 rtx lhs = XEXP (op, 0);
1443 rtx rhs = XEXP (op, 1);
1444 enum rtx_code lcode = GET_CODE (lhs);
1445 enum rtx_code rcode = GET_CODE (rhs);
1447 /* Widening multiplies usually extend both operands, but sometimes
1448 they use a shift to extract a portion of a register. */
1449 if ((lcode == ZERO_EXTEND
1450 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1451 && (rcode == ZERO_EXTEND
1452 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1454 machine_mode lmode = GET_MODE (lhs);
1455 machine_mode rmode = GET_MODE (rhs);
1456 int bits;
1458 if (lcode == LSHIFTRT)
1459 /* Number of bits not shifted off the end. */
1460 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1461 else /* lcode == ZERO_EXTEND */
1462 /* Size of inner mode. */
1463 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1465 if (rcode == LSHIFTRT)
1466 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1467 else /* rcode == ZERO_EXTEND */
1468 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1470 /* We can only widen multiplies if the result is mathematiclly
1471 equivalent. I.e. if overflow was impossible. */
1472 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1473 return simplify_gen_binary
1474 (MULT, mode,
1475 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1476 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1480 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1481 if (GET_CODE (op) == ZERO_EXTEND)
1482 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1483 GET_MODE (XEXP (op, 0)));
1485 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1486 is (zero_extend:M (subreg:O <X>)) if there is mode with
1487 GET_MODE_PRECISION (N) - I bits. */
1488 if (GET_CODE (op) == LSHIFTRT
1489 && GET_CODE (XEXP (op, 0)) == ASHIFT
1490 && CONST_INT_P (XEXP (op, 1))
1491 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1492 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1494 machine_mode tmode
1495 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1496 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1497 if (tmode != BLKmode)
1499 rtx inner =
1500 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1501 if (inner)
1502 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1506 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1507 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1508 of mode N. E.g.
1509 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1510 (and:SI (reg:SI) (const_int 63)). */
1511 if (GET_CODE (op) == SUBREG
1512 && GET_MODE_PRECISION (GET_MODE (op))
1513 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1514 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1515 <= HOST_BITS_PER_WIDE_INT
1516 && GET_MODE_PRECISION (mode)
1517 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1518 && subreg_lowpart_p (op)
1519 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1520 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1522 if (GET_MODE_PRECISION (mode)
1523 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1524 return SUBREG_REG (op);
1525 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1526 GET_MODE (SUBREG_REG (op)));
1529 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1530 /* As we do not know which address space the pointer is referring to,
1531 we can do this only if the target does not support different pointer
1532 or address modes depending on the address space. */
1533 if (target_default_pointer_address_modes_p ()
1534 && POINTERS_EXTEND_UNSIGNED > 0
1535 && mode == Pmode && GET_MODE (op) == ptr_mode
1536 && (CONSTANT_P (op)
1537 || (GET_CODE (op) == SUBREG
1538 && REG_P (SUBREG_REG (op))
1539 && REG_POINTER (SUBREG_REG (op))
1540 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1541 return convert_memory_address (Pmode, op);
1542 #endif
1543 break;
1545 default:
1546 break;
1549 return 0;
1552 /* Try to compute the value of a unary operation CODE whose output mode is to
1553 be MODE with input operand OP whose mode was originally OP_MODE.
1554 Return zero if the value cannot be computed. */
1556 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1557 rtx op, machine_mode op_mode)
1559 unsigned int width = GET_MODE_PRECISION (mode);
1561 if (code == VEC_DUPLICATE)
1563 gcc_assert (VECTOR_MODE_P (mode));
1564 if (GET_MODE (op) != VOIDmode)
1566 if (!VECTOR_MODE_P (GET_MODE (op)))
1567 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1568 else
1569 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1570 (GET_MODE (op)));
1572 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1573 || GET_CODE (op) == CONST_VECTOR)
1575 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1576 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1577 rtvec v = rtvec_alloc (n_elts);
1578 unsigned int i;
1580 if (GET_CODE (op) != CONST_VECTOR)
1581 for (i = 0; i < n_elts; i++)
1582 RTVEC_ELT (v, i) = op;
1583 else
1585 machine_mode inmode = GET_MODE (op);
1586 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1587 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1589 gcc_assert (in_n_elts < n_elts);
1590 gcc_assert ((n_elts % in_n_elts) == 0);
1591 for (i = 0; i < n_elts; i++)
1592 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1594 return gen_rtx_CONST_VECTOR (mode, v);
1598 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1600 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1601 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1602 machine_mode opmode = GET_MODE (op);
1603 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1604 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1605 rtvec v = rtvec_alloc (n_elts);
1606 unsigned int i;
1608 gcc_assert (op_n_elts == n_elts);
1609 for (i = 0; i < n_elts; i++)
1611 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1612 CONST_VECTOR_ELT (op, i),
1613 GET_MODE_INNER (opmode));
1614 if (!x)
1615 return 0;
1616 RTVEC_ELT (v, i) = x;
1618 return gen_rtx_CONST_VECTOR (mode, v);
1621 /* The order of these tests is critical so that, for example, we don't
1622 check the wrong mode (input vs. output) for a conversion operation,
1623 such as FIX. At some point, this should be simplified. */
1625 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1627 REAL_VALUE_TYPE d;
1629 if (op_mode == VOIDmode)
1631 /* CONST_INT have VOIDmode as the mode. We assume that all
1632 the bits of the constant are significant, though, this is
1633 a dangerous assumption as many times CONST_INTs are
1634 created and used with garbage in the bits outside of the
1635 precision of the implied mode of the const_int. */
1636 op_mode = MAX_MODE_INT;
1639 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1640 d = real_value_truncate (mode, d);
1641 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1643 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1645 REAL_VALUE_TYPE d;
1647 if (op_mode == VOIDmode)
1649 /* CONST_INT have VOIDmode as the mode. We assume that all
1650 the bits of the constant are significant, though, this is
1651 a dangerous assumption as many times CONST_INTs are
1652 created and used with garbage in the bits outside of the
1653 precision of the implied mode of the const_int. */
1654 op_mode = MAX_MODE_INT;
1657 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1658 d = real_value_truncate (mode, d);
1659 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1662 if (CONST_SCALAR_INT_P (op) && width > 0)
1664 wide_int result;
1665 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1666 rtx_mode_t op0 = std::make_pair (op, imode);
1667 int int_value;
1669 #if TARGET_SUPPORTS_WIDE_INT == 0
1670 /* This assert keeps the simplification from producing a result
1671 that cannot be represented in a CONST_DOUBLE but a lot of
1672 upstream callers expect that this function never fails to
1673 simplify something and so you if you added this to the test
1674 above the code would die later anyway. If this assert
1675 happens, you just need to make the port support wide int. */
1676 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1677 #endif
1679 switch (code)
1681 case NOT:
1682 result = wi::bit_not (op0);
1683 break;
1685 case NEG:
1686 result = wi::neg (op0);
1687 break;
1689 case ABS:
1690 result = wi::abs (op0);
1691 break;
1693 case FFS:
1694 result = wi::shwi (wi::ffs (op0), mode);
1695 break;
1697 case CLZ:
1698 if (wi::ne_p (op0, 0))
1699 int_value = wi::clz (op0);
1700 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1701 int_value = GET_MODE_PRECISION (mode);
1702 result = wi::shwi (int_value, mode);
1703 break;
1705 case CLRSB:
1706 result = wi::shwi (wi::clrsb (op0), mode);
1707 break;
1709 case CTZ:
1710 if (wi::ne_p (op0, 0))
1711 int_value = wi::ctz (op0);
1712 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1713 int_value = GET_MODE_PRECISION (mode);
1714 result = wi::shwi (int_value, mode);
1715 break;
1717 case POPCOUNT:
1718 result = wi::shwi (wi::popcount (op0), mode);
1719 break;
1721 case PARITY:
1722 result = wi::shwi (wi::parity (op0), mode);
1723 break;
1725 case BSWAP:
1726 result = wide_int (op0).bswap ();
1727 break;
1729 case TRUNCATE:
1730 case ZERO_EXTEND:
1731 result = wide_int::from (op0, width, UNSIGNED);
1732 break;
1734 case SIGN_EXTEND:
1735 result = wide_int::from (op0, width, SIGNED);
1736 break;
1738 case SQRT:
1739 default:
1740 return 0;
1743 return immed_wide_int_const (result, mode);
1746 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1747 && SCALAR_FLOAT_MODE_P (mode)
1748 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1750 REAL_VALUE_TYPE d;
1751 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1753 switch (code)
1755 case SQRT:
1756 return 0;
1757 case ABS:
1758 d = real_value_abs (&d);
1759 break;
1760 case NEG:
1761 d = real_value_negate (&d);
1762 break;
1763 case FLOAT_TRUNCATE:
1764 d = real_value_truncate (mode, d);
1765 break;
1766 case FLOAT_EXTEND:
1767 /* All this does is change the mode, unless changing
1768 mode class. */
1769 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1770 real_convert (&d, mode, &d);
1771 break;
1772 case FIX:
1773 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1774 break;
1775 case NOT:
1777 long tmp[4];
1778 int i;
1780 real_to_target (tmp, &d, GET_MODE (op));
1781 for (i = 0; i < 4; i++)
1782 tmp[i] = ~tmp[i];
1783 real_from_target (&d, tmp, mode);
1784 break;
1786 default:
1787 gcc_unreachable ();
1789 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1791 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1792 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1793 && GET_MODE_CLASS (mode) == MODE_INT
1794 && width > 0)
1796 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1797 operators are intentionally left unspecified (to ease implementation
1798 by target backends), for consistency, this routine implements the
1799 same semantics for constant folding as used by the middle-end. */
1801 /* This was formerly used only for non-IEEE float.
1802 eggert@twinsun.com says it is safe for IEEE also. */
1803 REAL_VALUE_TYPE x, t;
1804 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1805 wide_int wmax, wmin;
1806 /* This is part of the abi to real_to_integer, but we check
1807 things before making this call. */
1808 bool fail;
1810 switch (code)
1812 case FIX:
1813 if (REAL_VALUE_ISNAN (x))
1814 return const0_rtx;
1816 /* Test against the signed upper bound. */
1817 wmax = wi::max_value (width, SIGNED);
1818 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1819 if (REAL_VALUES_LESS (t, x))
1820 return immed_wide_int_const (wmax, mode);
1822 /* Test against the signed lower bound. */
1823 wmin = wi::min_value (width, SIGNED);
1824 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1825 if (REAL_VALUES_LESS (x, t))
1826 return immed_wide_int_const (wmin, mode);
1828 return immed_wide_int_const (real_to_integer (&x, &fail, width), mode);
1829 break;
1831 case UNSIGNED_FIX:
1832 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1833 return const0_rtx;
1835 /* Test against the unsigned upper bound. */
1836 wmax = wi::max_value (width, UNSIGNED);
1837 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1838 if (REAL_VALUES_LESS (t, x))
1839 return immed_wide_int_const (wmax, mode);
1841 return immed_wide_int_const (real_to_integer (&x, &fail, width),
1842 mode);
1843 break;
1845 default:
1846 gcc_unreachable ();
1850 return NULL_RTX;
1853 /* Subroutine of simplify_binary_operation to simplify a binary operation
1854 CODE that can commute with byte swapping, with result mode MODE and
1855 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1856 Return zero if no simplification or canonicalization is possible. */
1858 static rtx
1859 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1860 rtx op0, rtx op1)
1862 rtx tem;
1864 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1865 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1867 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1868 simplify_gen_unary (BSWAP, mode, op1, mode));
1869 return simplify_gen_unary (BSWAP, mode, tem, mode);
1872 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1873 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1875 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1876 return simplify_gen_unary (BSWAP, mode, tem, mode);
1879 return NULL_RTX;
1882 /* Subroutine of simplify_binary_operation to simplify a commutative,
1883 associative binary operation CODE with result mode MODE, operating
1884 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1885 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1886 canonicalization is possible. */
1888 static rtx
1889 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1890 rtx op0, rtx op1)
1892 rtx tem;
1894 /* Linearize the operator to the left. */
1895 if (GET_CODE (op1) == code)
1897 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1898 if (GET_CODE (op0) == code)
1900 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1901 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1904 /* "a op (b op c)" becomes "(b op c) op a". */
1905 if (! swap_commutative_operands_p (op1, op0))
1906 return simplify_gen_binary (code, mode, op1, op0);
1908 tem = op0;
1909 op0 = op1;
1910 op1 = tem;
1913 if (GET_CODE (op0) == code)
1915 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1916 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1918 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1919 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1922 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1923 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1924 if (tem != 0)
1925 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1927 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1928 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1929 if (tem != 0)
1930 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1933 return 0;
1937 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1938 and OP1. Return 0 if no simplification is possible.
1940 Don't use this for relational operations such as EQ or LT.
1941 Use simplify_relational_operation instead. */
1943 simplify_binary_operation (enum rtx_code code, machine_mode mode,
1944 rtx op0, rtx op1)
1946 rtx trueop0, trueop1;
1947 rtx tem;
1949 /* Relational operations don't work here. We must know the mode
1950 of the operands in order to do the comparison correctly.
1951 Assuming a full word can give incorrect results.
1952 Consider comparing 128 with -128 in QImode. */
1953 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1954 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1956 /* Make sure the constant is second. */
1957 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1958 && swap_commutative_operands_p (op0, op1))
1960 tem = op0, op0 = op1, op1 = tem;
1963 trueop0 = avoid_constant_pool_reference (op0);
1964 trueop1 = avoid_constant_pool_reference (op1);
1966 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1967 if (tem)
1968 return tem;
1969 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1972 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1973 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1974 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1975 actual constants. */
1977 static rtx
1978 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
1979 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1981 rtx tem, reversed, opleft, opright;
1982 HOST_WIDE_INT val;
1983 unsigned int width = GET_MODE_PRECISION (mode);
1985 /* Even if we can't compute a constant result,
1986 there are some cases worth simplifying. */
1988 switch (code)
1990 case PLUS:
1991 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1992 when x is NaN, infinite, or finite and nonzero. They aren't
1993 when x is -0 and the rounding mode is not towards -infinity,
1994 since (-0) + 0 is then 0. */
1995 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1996 return op0;
1998 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1999 transformations are safe even for IEEE. */
2000 if (GET_CODE (op0) == NEG)
2001 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2002 else if (GET_CODE (op1) == NEG)
2003 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2005 /* (~a) + 1 -> -a */
2006 if (INTEGRAL_MODE_P (mode)
2007 && GET_CODE (op0) == NOT
2008 && trueop1 == const1_rtx)
2009 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2011 /* Handle both-operands-constant cases. We can only add
2012 CONST_INTs to constants since the sum of relocatable symbols
2013 can't be handled by most assemblers. Don't add CONST_INT
2014 to CONST_INT since overflow won't be computed properly if wider
2015 than HOST_BITS_PER_WIDE_INT. */
2017 if ((GET_CODE (op0) == CONST
2018 || GET_CODE (op0) == SYMBOL_REF
2019 || GET_CODE (op0) == LABEL_REF)
2020 && CONST_INT_P (op1))
2021 return plus_constant (mode, op0, INTVAL (op1));
2022 else if ((GET_CODE (op1) == CONST
2023 || GET_CODE (op1) == SYMBOL_REF
2024 || GET_CODE (op1) == LABEL_REF)
2025 && CONST_INT_P (op0))
2026 return plus_constant (mode, op1, INTVAL (op0));
2028 /* See if this is something like X * C - X or vice versa or
2029 if the multiplication is written as a shift. If so, we can
2030 distribute and make a new multiply, shift, or maybe just
2031 have X (if C is 2 in the example above). But don't make
2032 something more expensive than we had before. */
2034 if (SCALAR_INT_MODE_P (mode))
2036 rtx lhs = op0, rhs = op1;
2038 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2039 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2041 if (GET_CODE (lhs) == NEG)
2043 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2044 lhs = XEXP (lhs, 0);
2046 else if (GET_CODE (lhs) == MULT
2047 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2049 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2050 lhs = XEXP (lhs, 0);
2052 else if (GET_CODE (lhs) == ASHIFT
2053 && CONST_INT_P (XEXP (lhs, 1))
2054 && INTVAL (XEXP (lhs, 1)) >= 0
2055 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2057 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2058 GET_MODE_PRECISION (mode));
2059 lhs = XEXP (lhs, 0);
2062 if (GET_CODE (rhs) == NEG)
2064 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2065 rhs = XEXP (rhs, 0);
2067 else if (GET_CODE (rhs) == MULT
2068 && CONST_INT_P (XEXP (rhs, 1)))
2070 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2071 rhs = XEXP (rhs, 0);
2073 else if (GET_CODE (rhs) == ASHIFT
2074 && CONST_INT_P (XEXP (rhs, 1))
2075 && INTVAL (XEXP (rhs, 1)) >= 0
2076 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2078 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2079 GET_MODE_PRECISION (mode));
2080 rhs = XEXP (rhs, 0);
2083 if (rtx_equal_p (lhs, rhs))
2085 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2086 rtx coeff;
2087 bool speed = optimize_function_for_speed_p (cfun);
2089 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2091 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2092 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2093 ? tem : 0;
2097 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2098 if (CONST_SCALAR_INT_P (op1)
2099 && GET_CODE (op0) == XOR
2100 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2101 && mode_signbit_p (mode, op1))
2102 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2103 simplify_gen_binary (XOR, mode, op1,
2104 XEXP (op0, 1)));
2106 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2107 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2108 && GET_CODE (op0) == MULT
2109 && GET_CODE (XEXP (op0, 0)) == NEG)
2111 rtx in1, in2;
2113 in1 = XEXP (XEXP (op0, 0), 0);
2114 in2 = XEXP (op0, 1);
2115 return simplify_gen_binary (MINUS, mode, op1,
2116 simplify_gen_binary (MULT, mode,
2117 in1, in2));
2120 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2121 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2122 is 1. */
2123 if (COMPARISON_P (op0)
2124 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2125 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2126 && (reversed = reversed_comparison (op0, mode)))
2127 return
2128 simplify_gen_unary (NEG, mode, reversed, mode);
2130 /* If one of the operands is a PLUS or a MINUS, see if we can
2131 simplify this by the associative law.
2132 Don't use the associative law for floating point.
2133 The inaccuracy makes it nonassociative,
2134 and subtle programs can break if operations are associated. */
2136 if (INTEGRAL_MODE_P (mode)
2137 && (plus_minus_operand_p (op0)
2138 || plus_minus_operand_p (op1))
2139 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2140 return tem;
2142 /* Reassociate floating point addition only when the user
2143 specifies associative math operations. */
2144 if (FLOAT_MODE_P (mode)
2145 && flag_associative_math)
2147 tem = simplify_associative_operation (code, mode, op0, op1);
2148 if (tem)
2149 return tem;
2151 break;
2153 case COMPARE:
2154 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2155 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2156 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2157 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2159 rtx xop00 = XEXP (op0, 0);
2160 rtx xop10 = XEXP (op1, 0);
2162 #ifdef HAVE_cc0
2163 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2164 #else
2165 if (REG_P (xop00) && REG_P (xop10)
2166 && GET_MODE (xop00) == GET_MODE (xop10)
2167 && REGNO (xop00) == REGNO (xop10)
2168 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2169 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2170 #endif
2171 return xop00;
2173 break;
2175 case MINUS:
2176 /* We can't assume x-x is 0 even with non-IEEE floating point,
2177 but since it is zero except in very strange circumstances, we
2178 will treat it as zero with -ffinite-math-only. */
2179 if (rtx_equal_p (trueop0, trueop1)
2180 && ! side_effects_p (op0)
2181 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2182 return CONST0_RTX (mode);
2184 /* Change subtraction from zero into negation. (0 - x) is the
2185 same as -x when x is NaN, infinite, or finite and nonzero.
2186 But if the mode has signed zeros, and does not round towards
2187 -infinity, then 0 - 0 is 0, not -0. */
2188 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2189 return simplify_gen_unary (NEG, mode, op1, mode);
2191 /* (-1 - a) is ~a. */
2192 if (trueop0 == constm1_rtx)
2193 return simplify_gen_unary (NOT, mode, op1, mode);
2195 /* Subtracting 0 has no effect unless the mode has signed zeros
2196 and supports rounding towards -infinity. In such a case,
2197 0 - 0 is -0. */
2198 if (!(HONOR_SIGNED_ZEROS (mode)
2199 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2200 && trueop1 == CONST0_RTX (mode))
2201 return op0;
2203 /* See if this is something like X * C - X or vice versa or
2204 if the multiplication is written as a shift. If so, we can
2205 distribute and make a new multiply, shift, or maybe just
2206 have X (if C is 2 in the example above). But don't make
2207 something more expensive than we had before. */
2209 if (SCALAR_INT_MODE_P (mode))
2211 rtx lhs = op0, rhs = op1;
2213 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2214 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2216 if (GET_CODE (lhs) == NEG)
2218 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2219 lhs = XEXP (lhs, 0);
2221 else if (GET_CODE (lhs) == MULT
2222 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2224 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2225 lhs = XEXP (lhs, 0);
2227 else if (GET_CODE (lhs) == ASHIFT
2228 && CONST_INT_P (XEXP (lhs, 1))
2229 && INTVAL (XEXP (lhs, 1)) >= 0
2230 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2232 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2233 GET_MODE_PRECISION (mode));
2234 lhs = XEXP (lhs, 0);
2237 if (GET_CODE (rhs) == NEG)
2239 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2240 rhs = XEXP (rhs, 0);
2242 else if (GET_CODE (rhs) == MULT
2243 && CONST_INT_P (XEXP (rhs, 1)))
2245 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2246 rhs = XEXP (rhs, 0);
2248 else if (GET_CODE (rhs) == ASHIFT
2249 && CONST_INT_P (XEXP (rhs, 1))
2250 && INTVAL (XEXP (rhs, 1)) >= 0
2251 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2253 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2254 GET_MODE_PRECISION (mode));
2255 negcoeff1 = -negcoeff1;
2256 rhs = XEXP (rhs, 0);
2259 if (rtx_equal_p (lhs, rhs))
2261 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2262 rtx coeff;
2263 bool speed = optimize_function_for_speed_p (cfun);
2265 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2267 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2268 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2269 ? tem : 0;
2273 /* (a - (-b)) -> (a + b). True even for IEEE. */
2274 if (GET_CODE (op1) == NEG)
2275 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2277 /* (-x - c) may be simplified as (-c - x). */
2278 if (GET_CODE (op0) == NEG
2279 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2281 tem = simplify_unary_operation (NEG, mode, op1, mode);
2282 if (tem)
2283 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2286 /* Don't let a relocatable value get a negative coeff. */
2287 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2288 return simplify_gen_binary (PLUS, mode,
2289 op0,
2290 neg_const_int (mode, op1));
2292 /* (x - (x & y)) -> (x & ~y) */
2293 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2295 if (rtx_equal_p (op0, XEXP (op1, 0)))
2297 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2298 GET_MODE (XEXP (op1, 1)));
2299 return simplify_gen_binary (AND, mode, op0, tem);
2301 if (rtx_equal_p (op0, XEXP (op1, 1)))
2303 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2304 GET_MODE (XEXP (op1, 0)));
2305 return simplify_gen_binary (AND, mode, op0, tem);
2309 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2310 by reversing the comparison code if valid. */
2311 if (STORE_FLAG_VALUE == 1
2312 && trueop0 == const1_rtx
2313 && COMPARISON_P (op1)
2314 && (reversed = reversed_comparison (op1, mode)))
2315 return reversed;
2317 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2318 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2319 && GET_CODE (op1) == MULT
2320 && GET_CODE (XEXP (op1, 0)) == NEG)
2322 rtx in1, in2;
2324 in1 = XEXP (XEXP (op1, 0), 0);
2325 in2 = XEXP (op1, 1);
2326 return simplify_gen_binary (PLUS, mode,
2327 simplify_gen_binary (MULT, mode,
2328 in1, in2),
2329 op0);
2332 /* Canonicalize (minus (neg A) (mult B C)) to
2333 (minus (mult (neg B) C) A). */
2334 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2335 && GET_CODE (op1) == MULT
2336 && GET_CODE (op0) == NEG)
2338 rtx in1, in2;
2340 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2341 in2 = XEXP (op1, 1);
2342 return simplify_gen_binary (MINUS, mode,
2343 simplify_gen_binary (MULT, mode,
2344 in1, in2),
2345 XEXP (op0, 0));
2348 /* If one of the operands is a PLUS or a MINUS, see if we can
2349 simplify this by the associative law. This will, for example,
2350 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2351 Don't use the associative law for floating point.
2352 The inaccuracy makes it nonassociative,
2353 and subtle programs can break if operations are associated. */
2355 if (INTEGRAL_MODE_P (mode)
2356 && (plus_minus_operand_p (op0)
2357 || plus_minus_operand_p (op1))
2358 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2359 return tem;
2360 break;
2362 case MULT:
2363 if (trueop1 == constm1_rtx)
2364 return simplify_gen_unary (NEG, mode, op0, mode);
2366 if (GET_CODE (op0) == NEG)
2368 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2369 /* If op1 is a MULT as well and simplify_unary_operation
2370 just moved the NEG to the second operand, simplify_gen_binary
2371 below could through simplify_associative_operation move
2372 the NEG around again and recurse endlessly. */
2373 if (temp
2374 && GET_CODE (op1) == MULT
2375 && GET_CODE (temp) == MULT
2376 && XEXP (op1, 0) == XEXP (temp, 0)
2377 && GET_CODE (XEXP (temp, 1)) == NEG
2378 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2379 temp = NULL_RTX;
2380 if (temp)
2381 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2383 if (GET_CODE (op1) == NEG)
2385 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2386 /* If op0 is a MULT as well and simplify_unary_operation
2387 just moved the NEG to the second operand, simplify_gen_binary
2388 below could through simplify_associative_operation move
2389 the NEG around again and recurse endlessly. */
2390 if (temp
2391 && GET_CODE (op0) == MULT
2392 && GET_CODE (temp) == MULT
2393 && XEXP (op0, 0) == XEXP (temp, 0)
2394 && GET_CODE (XEXP (temp, 1)) == NEG
2395 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2396 temp = NULL_RTX;
2397 if (temp)
2398 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2401 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2402 x is NaN, since x * 0 is then also NaN. Nor is it valid
2403 when the mode has signed zeros, since multiplying a negative
2404 number by 0 will give -0, not 0. */
2405 if (!HONOR_NANS (mode)
2406 && !HONOR_SIGNED_ZEROS (mode)
2407 && trueop1 == CONST0_RTX (mode)
2408 && ! side_effects_p (op0))
2409 return op1;
2411 /* In IEEE floating point, x*1 is not equivalent to x for
2412 signalling NaNs. */
2413 if (!HONOR_SNANS (mode)
2414 && trueop1 == CONST1_RTX (mode))
2415 return op0;
2417 /* Convert multiply by constant power of two into shift. */
2418 if (CONST_SCALAR_INT_P (trueop1))
2420 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2421 if (val >= 0)
2422 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2425 /* x*2 is x+x and x*(-1) is -x */
2426 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2427 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2428 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2429 && GET_MODE (op0) == mode)
2431 REAL_VALUE_TYPE d;
2432 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2434 if (REAL_VALUES_EQUAL (d, dconst2))
2435 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2437 if (!HONOR_SNANS (mode)
2438 && REAL_VALUES_EQUAL (d, dconstm1))
2439 return simplify_gen_unary (NEG, mode, op0, mode);
2442 /* Optimize -x * -x as x * x. */
2443 if (FLOAT_MODE_P (mode)
2444 && GET_CODE (op0) == NEG
2445 && GET_CODE (op1) == NEG
2446 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2447 && !side_effects_p (XEXP (op0, 0)))
2448 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2450 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2451 if (SCALAR_FLOAT_MODE_P (mode)
2452 && GET_CODE (op0) == ABS
2453 && GET_CODE (op1) == ABS
2454 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2455 && !side_effects_p (XEXP (op0, 0)))
2456 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2458 /* Reassociate multiplication, but for floating point MULTs
2459 only when the user specifies unsafe math optimizations. */
2460 if (! FLOAT_MODE_P (mode)
2461 || flag_unsafe_math_optimizations)
2463 tem = simplify_associative_operation (code, mode, op0, op1);
2464 if (tem)
2465 return tem;
2467 break;
2469 case IOR:
2470 if (trueop1 == CONST0_RTX (mode))
2471 return op0;
2472 if (INTEGRAL_MODE_P (mode)
2473 && trueop1 == CONSTM1_RTX (mode)
2474 && !side_effects_p (op0))
2475 return op1;
2476 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2477 return op0;
2478 /* A | (~A) -> -1 */
2479 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2480 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2481 && ! side_effects_p (op0)
2482 && SCALAR_INT_MODE_P (mode))
2483 return constm1_rtx;
2485 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2486 if (CONST_INT_P (op1)
2487 && HWI_COMPUTABLE_MODE_P (mode)
2488 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2489 && !side_effects_p (op0))
2490 return op1;
2492 /* Canonicalize (X & C1) | C2. */
2493 if (GET_CODE (op0) == AND
2494 && CONST_INT_P (trueop1)
2495 && CONST_INT_P (XEXP (op0, 1)))
2497 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2498 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2499 HOST_WIDE_INT c2 = INTVAL (trueop1);
2501 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2502 if ((c1 & c2) == c1
2503 && !side_effects_p (XEXP (op0, 0)))
2504 return trueop1;
2506 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2507 if (((c1|c2) & mask) == mask)
2508 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2510 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2511 if (((c1 & ~c2) & mask) != (c1 & mask))
2513 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2514 gen_int_mode (c1 & ~c2, mode));
2515 return simplify_gen_binary (IOR, mode, tem, op1);
2519 /* Convert (A & B) | A to A. */
2520 if (GET_CODE (op0) == AND
2521 && (rtx_equal_p (XEXP (op0, 0), op1)
2522 || rtx_equal_p (XEXP (op0, 1), op1))
2523 && ! side_effects_p (XEXP (op0, 0))
2524 && ! side_effects_p (XEXP (op0, 1)))
2525 return op1;
2527 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2528 mode size to (rotate A CX). */
2530 if (GET_CODE (op1) == ASHIFT
2531 || GET_CODE (op1) == SUBREG)
2533 opleft = op1;
2534 opright = op0;
2536 else
2538 opright = op1;
2539 opleft = op0;
2542 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2543 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2544 && CONST_INT_P (XEXP (opleft, 1))
2545 && CONST_INT_P (XEXP (opright, 1))
2546 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2547 == GET_MODE_PRECISION (mode)))
2548 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2550 /* Same, but for ashift that has been "simplified" to a wider mode
2551 by simplify_shift_const. */
2553 if (GET_CODE (opleft) == SUBREG
2554 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2555 && GET_CODE (opright) == LSHIFTRT
2556 && GET_CODE (XEXP (opright, 0)) == SUBREG
2557 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2558 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2559 && (GET_MODE_SIZE (GET_MODE (opleft))
2560 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2561 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2562 SUBREG_REG (XEXP (opright, 0)))
2563 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2564 && CONST_INT_P (XEXP (opright, 1))
2565 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2566 == GET_MODE_PRECISION (mode)))
2567 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2568 XEXP (SUBREG_REG (opleft), 1));
2570 /* If we have (ior (and (X C1) C2)), simplify this by making
2571 C1 as small as possible if C1 actually changes. */
2572 if (CONST_INT_P (op1)
2573 && (HWI_COMPUTABLE_MODE_P (mode)
2574 || INTVAL (op1) > 0)
2575 && GET_CODE (op0) == AND
2576 && CONST_INT_P (XEXP (op0, 1))
2577 && CONST_INT_P (op1)
2578 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2580 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2581 gen_int_mode (UINTVAL (XEXP (op0, 1))
2582 & ~UINTVAL (op1),
2583 mode));
2584 return simplify_gen_binary (IOR, mode, tmp, op1);
2587 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2588 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2589 the PLUS does not affect any of the bits in OP1: then we can do
2590 the IOR as a PLUS and we can associate. This is valid if OP1
2591 can be safely shifted left C bits. */
2592 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2593 && GET_CODE (XEXP (op0, 0)) == PLUS
2594 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2595 && CONST_INT_P (XEXP (op0, 1))
2596 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2598 int count = INTVAL (XEXP (op0, 1));
2599 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2601 if (mask >> count == INTVAL (trueop1)
2602 && trunc_int_for_mode (mask, mode) == mask
2603 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2604 return simplify_gen_binary (ASHIFTRT, mode,
2605 plus_constant (mode, XEXP (op0, 0),
2606 mask),
2607 XEXP (op0, 1));
2610 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2611 if (tem)
2612 return tem;
2614 tem = simplify_associative_operation (code, mode, op0, op1);
2615 if (tem)
2616 return tem;
2617 break;
2619 case XOR:
2620 if (trueop1 == CONST0_RTX (mode))
2621 return op0;
2622 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2623 return simplify_gen_unary (NOT, mode, op0, mode);
2624 if (rtx_equal_p (trueop0, trueop1)
2625 && ! side_effects_p (op0)
2626 && GET_MODE_CLASS (mode) != MODE_CC)
2627 return CONST0_RTX (mode);
2629 /* Canonicalize XOR of the most significant bit to PLUS. */
2630 if (CONST_SCALAR_INT_P (op1)
2631 && mode_signbit_p (mode, op1))
2632 return simplify_gen_binary (PLUS, mode, op0, op1);
2633 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2634 if (CONST_SCALAR_INT_P (op1)
2635 && GET_CODE (op0) == PLUS
2636 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2637 && mode_signbit_p (mode, XEXP (op0, 1)))
2638 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2639 simplify_gen_binary (XOR, mode, op1,
2640 XEXP (op0, 1)));
2642 /* If we are XORing two things that have no bits in common,
2643 convert them into an IOR. This helps to detect rotation encoded
2644 using those methods and possibly other simplifications. */
2646 if (HWI_COMPUTABLE_MODE_P (mode)
2647 && (nonzero_bits (op0, mode)
2648 & nonzero_bits (op1, mode)) == 0)
2649 return (simplify_gen_binary (IOR, mode, op0, op1));
2651 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2652 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2653 (NOT y). */
2655 int num_negated = 0;
2657 if (GET_CODE (op0) == NOT)
2658 num_negated++, op0 = XEXP (op0, 0);
2659 if (GET_CODE (op1) == NOT)
2660 num_negated++, op1 = XEXP (op1, 0);
2662 if (num_negated == 2)
2663 return simplify_gen_binary (XOR, mode, op0, op1);
2664 else if (num_negated == 1)
2665 return simplify_gen_unary (NOT, mode,
2666 simplify_gen_binary (XOR, mode, op0, op1),
2667 mode);
2670 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2671 correspond to a machine insn or result in further simplifications
2672 if B is a constant. */
2674 if (GET_CODE (op0) == AND
2675 && rtx_equal_p (XEXP (op0, 1), op1)
2676 && ! side_effects_p (op1))
2677 return simplify_gen_binary (AND, mode,
2678 simplify_gen_unary (NOT, mode,
2679 XEXP (op0, 0), mode),
2680 op1);
2682 else if (GET_CODE (op0) == AND
2683 && rtx_equal_p (XEXP (op0, 0), op1)
2684 && ! side_effects_p (op1))
2685 return simplify_gen_binary (AND, mode,
2686 simplify_gen_unary (NOT, mode,
2687 XEXP (op0, 1), mode),
2688 op1);
2690 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2691 we can transform like this:
2692 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2693 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2694 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2695 Attempt a few simplifications when B and C are both constants. */
2696 if (GET_CODE (op0) == AND
2697 && CONST_INT_P (op1)
2698 && CONST_INT_P (XEXP (op0, 1)))
2700 rtx a = XEXP (op0, 0);
2701 rtx b = XEXP (op0, 1);
2702 rtx c = op1;
2703 HOST_WIDE_INT bval = INTVAL (b);
2704 HOST_WIDE_INT cval = INTVAL (c);
2706 rtx na_c
2707 = simplify_binary_operation (AND, mode,
2708 simplify_gen_unary (NOT, mode, a, mode),
2710 if ((~cval & bval) == 0)
2712 /* Try to simplify ~A&C | ~B&C. */
2713 if (na_c != NULL_RTX)
2714 return simplify_gen_binary (IOR, mode, na_c,
2715 gen_int_mode (~bval & cval, mode));
2717 else
2719 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2720 if (na_c == const0_rtx)
2722 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2723 gen_int_mode (~cval & bval,
2724 mode));
2725 return simplify_gen_binary (IOR, mode, a_nc_b,
2726 gen_int_mode (~bval & cval,
2727 mode));
2732 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2733 comparison if STORE_FLAG_VALUE is 1. */
2734 if (STORE_FLAG_VALUE == 1
2735 && trueop1 == const1_rtx
2736 && COMPARISON_P (op0)
2737 && (reversed = reversed_comparison (op0, mode)))
2738 return reversed;
2740 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2741 is (lt foo (const_int 0)), so we can perform the above
2742 simplification if STORE_FLAG_VALUE is 1. */
2744 if (STORE_FLAG_VALUE == 1
2745 && trueop1 == const1_rtx
2746 && GET_CODE (op0) == LSHIFTRT
2747 && CONST_INT_P (XEXP (op0, 1))
2748 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2749 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2751 /* (xor (comparison foo bar) (const_int sign-bit))
2752 when STORE_FLAG_VALUE is the sign bit. */
2753 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2754 && trueop1 == const_true_rtx
2755 && COMPARISON_P (op0)
2756 && (reversed = reversed_comparison (op0, mode)))
2757 return reversed;
2759 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2760 if (tem)
2761 return tem;
2763 tem = simplify_associative_operation (code, mode, op0, op1);
2764 if (tem)
2765 return tem;
2766 break;
2768 case AND:
2769 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2770 return trueop1;
2771 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2772 return op0;
2773 if (HWI_COMPUTABLE_MODE_P (mode))
2775 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2776 HOST_WIDE_INT nzop1;
2777 if (CONST_INT_P (trueop1))
2779 HOST_WIDE_INT val1 = INTVAL (trueop1);
2780 /* If we are turning off bits already known off in OP0, we need
2781 not do an AND. */
2782 if ((nzop0 & ~val1) == 0)
2783 return op0;
2785 nzop1 = nonzero_bits (trueop1, mode);
2786 /* If we are clearing all the nonzero bits, the result is zero. */
2787 if ((nzop1 & nzop0) == 0
2788 && !side_effects_p (op0) && !side_effects_p (op1))
2789 return CONST0_RTX (mode);
2791 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2792 && GET_MODE_CLASS (mode) != MODE_CC)
2793 return op0;
2794 /* A & (~A) -> 0 */
2795 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2796 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2797 && ! side_effects_p (op0)
2798 && GET_MODE_CLASS (mode) != MODE_CC)
2799 return CONST0_RTX (mode);
2801 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2802 there are no nonzero bits of C outside of X's mode. */
2803 if ((GET_CODE (op0) == SIGN_EXTEND
2804 || GET_CODE (op0) == ZERO_EXTEND)
2805 && CONST_INT_P (trueop1)
2806 && HWI_COMPUTABLE_MODE_P (mode)
2807 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2808 & UINTVAL (trueop1)) == 0)
2810 machine_mode imode = GET_MODE (XEXP (op0, 0));
2811 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2812 gen_int_mode (INTVAL (trueop1),
2813 imode));
2814 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2817 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2818 we might be able to further simplify the AND with X and potentially
2819 remove the truncation altogether. */
2820 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2822 rtx x = XEXP (op0, 0);
2823 machine_mode xmode = GET_MODE (x);
2824 tem = simplify_gen_binary (AND, xmode, x,
2825 gen_int_mode (INTVAL (trueop1), xmode));
2826 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2829 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2830 if (GET_CODE (op0) == IOR
2831 && CONST_INT_P (trueop1)
2832 && CONST_INT_P (XEXP (op0, 1)))
2834 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2835 return simplify_gen_binary (IOR, mode,
2836 simplify_gen_binary (AND, mode,
2837 XEXP (op0, 0), op1),
2838 gen_int_mode (tmp, mode));
2841 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2842 insn (and may simplify more). */
2843 if (GET_CODE (op0) == XOR
2844 && rtx_equal_p (XEXP (op0, 0), op1)
2845 && ! side_effects_p (op1))
2846 return simplify_gen_binary (AND, mode,
2847 simplify_gen_unary (NOT, mode,
2848 XEXP (op0, 1), mode),
2849 op1);
2851 if (GET_CODE (op0) == XOR
2852 && rtx_equal_p (XEXP (op0, 1), op1)
2853 && ! side_effects_p (op1))
2854 return simplify_gen_binary (AND, mode,
2855 simplify_gen_unary (NOT, mode,
2856 XEXP (op0, 0), mode),
2857 op1);
2859 /* Similarly for (~(A ^ B)) & A. */
2860 if (GET_CODE (op0) == NOT
2861 && GET_CODE (XEXP (op0, 0)) == XOR
2862 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2863 && ! side_effects_p (op1))
2864 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2866 if (GET_CODE (op0) == NOT
2867 && GET_CODE (XEXP (op0, 0)) == XOR
2868 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2869 && ! side_effects_p (op1))
2870 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2872 /* Convert (A | B) & A to A. */
2873 if (GET_CODE (op0) == IOR
2874 && (rtx_equal_p (XEXP (op0, 0), op1)
2875 || rtx_equal_p (XEXP (op0, 1), op1))
2876 && ! side_effects_p (XEXP (op0, 0))
2877 && ! side_effects_p (XEXP (op0, 1)))
2878 return op1;
2880 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2881 ((A & N) + B) & M -> (A + B) & M
2882 Similarly if (N & M) == 0,
2883 ((A | N) + B) & M -> (A + B) & M
2884 and for - instead of + and/or ^ instead of |.
2885 Also, if (N & M) == 0, then
2886 (A +- N) & M -> A & M. */
2887 if (CONST_INT_P (trueop1)
2888 && HWI_COMPUTABLE_MODE_P (mode)
2889 && ~UINTVAL (trueop1)
2890 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2891 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2893 rtx pmop[2];
2894 int which;
2896 pmop[0] = XEXP (op0, 0);
2897 pmop[1] = XEXP (op0, 1);
2899 if (CONST_INT_P (pmop[1])
2900 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2901 return simplify_gen_binary (AND, mode, pmop[0], op1);
2903 for (which = 0; which < 2; which++)
2905 tem = pmop[which];
2906 switch (GET_CODE (tem))
2908 case AND:
2909 if (CONST_INT_P (XEXP (tem, 1))
2910 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2911 == UINTVAL (trueop1))
2912 pmop[which] = XEXP (tem, 0);
2913 break;
2914 case IOR:
2915 case XOR:
2916 if (CONST_INT_P (XEXP (tem, 1))
2917 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2918 pmop[which] = XEXP (tem, 0);
2919 break;
2920 default:
2921 break;
2925 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2927 tem = simplify_gen_binary (GET_CODE (op0), mode,
2928 pmop[0], pmop[1]);
2929 return simplify_gen_binary (code, mode, tem, op1);
2933 /* (and X (ior (not X) Y) -> (and X Y) */
2934 if (GET_CODE (op1) == IOR
2935 && GET_CODE (XEXP (op1, 0)) == NOT
2936 && op0 == XEXP (XEXP (op1, 0), 0))
2937 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2939 /* (and (ior (not X) Y) X) -> (and X Y) */
2940 if (GET_CODE (op0) == IOR
2941 && GET_CODE (XEXP (op0, 0)) == NOT
2942 && op1 == XEXP (XEXP (op0, 0), 0))
2943 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2945 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2946 if (tem)
2947 return tem;
2949 tem = simplify_associative_operation (code, mode, op0, op1);
2950 if (tem)
2951 return tem;
2952 break;
2954 case UDIV:
2955 /* 0/x is 0 (or x&0 if x has side-effects). */
2956 if (trueop0 == CONST0_RTX (mode))
2958 if (side_effects_p (op1))
2959 return simplify_gen_binary (AND, mode, op1, trueop0);
2960 return trueop0;
2962 /* x/1 is x. */
2963 if (trueop1 == CONST1_RTX (mode))
2965 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2966 if (tem)
2967 return tem;
2969 /* Convert divide by power of two into shift. */
2970 if (CONST_INT_P (trueop1)
2971 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2972 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2973 break;
2975 case DIV:
2976 /* Handle floating point and integers separately. */
2977 if (SCALAR_FLOAT_MODE_P (mode))
2979 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2980 safe for modes with NaNs, since 0.0 / 0.0 will then be
2981 NaN rather than 0.0. Nor is it safe for modes with signed
2982 zeros, since dividing 0 by a negative number gives -0.0 */
2983 if (trueop0 == CONST0_RTX (mode)
2984 && !HONOR_NANS (mode)
2985 && !HONOR_SIGNED_ZEROS (mode)
2986 && ! side_effects_p (op1))
2987 return op0;
2988 /* x/1.0 is x. */
2989 if (trueop1 == CONST1_RTX (mode)
2990 && !HONOR_SNANS (mode))
2991 return op0;
2993 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2994 && trueop1 != CONST0_RTX (mode))
2996 REAL_VALUE_TYPE d;
2997 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2999 /* x/-1.0 is -x. */
3000 if (REAL_VALUES_EQUAL (d, dconstm1)
3001 && !HONOR_SNANS (mode))
3002 return simplify_gen_unary (NEG, mode, op0, mode);
3004 /* Change FP division by a constant into multiplication.
3005 Only do this with -freciprocal-math. */
3006 if (flag_reciprocal_math
3007 && !REAL_VALUES_EQUAL (d, dconst0))
3009 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3010 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3011 return simplify_gen_binary (MULT, mode, op0, tem);
3015 else if (SCALAR_INT_MODE_P (mode))
3017 /* 0/x is 0 (or x&0 if x has side-effects). */
3018 if (trueop0 == CONST0_RTX (mode)
3019 && !cfun->can_throw_non_call_exceptions)
3021 if (side_effects_p (op1))
3022 return simplify_gen_binary (AND, mode, op1, trueop0);
3023 return trueop0;
3025 /* x/1 is x. */
3026 if (trueop1 == CONST1_RTX (mode))
3028 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3029 if (tem)
3030 return tem;
3032 /* x/-1 is -x. */
3033 if (trueop1 == constm1_rtx)
3035 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3036 if (x)
3037 return simplify_gen_unary (NEG, mode, x, mode);
3040 break;
3042 case UMOD:
3043 /* 0%x is 0 (or x&0 if x has side-effects). */
3044 if (trueop0 == CONST0_RTX (mode))
3046 if (side_effects_p (op1))
3047 return simplify_gen_binary (AND, mode, op1, trueop0);
3048 return trueop0;
3050 /* x%1 is 0 (of x&0 if x has side-effects). */
3051 if (trueop1 == CONST1_RTX (mode))
3053 if (side_effects_p (op0))
3054 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3055 return CONST0_RTX (mode);
3057 /* Implement modulus by power of two as AND. */
3058 if (CONST_INT_P (trueop1)
3059 && exact_log2 (UINTVAL (trueop1)) > 0)
3060 return simplify_gen_binary (AND, mode, op0,
3061 gen_int_mode (INTVAL (op1) - 1, mode));
3062 break;
3064 case MOD:
3065 /* 0%x is 0 (or x&0 if x has side-effects). */
3066 if (trueop0 == CONST0_RTX (mode))
3068 if (side_effects_p (op1))
3069 return simplify_gen_binary (AND, mode, op1, trueop0);
3070 return trueop0;
3072 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3073 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3075 if (side_effects_p (op0))
3076 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3077 return CONST0_RTX (mode);
3079 break;
3081 case ROTATERT:
3082 case ROTATE:
3083 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3084 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3085 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3086 amount instead. */
3087 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3088 if (CONST_INT_P (trueop1)
3089 && IN_RANGE (INTVAL (trueop1),
3090 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3091 GET_MODE_PRECISION (mode) - 1))
3092 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3093 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3094 - INTVAL (trueop1)));
3095 #endif
3096 /* FALLTHRU */
3097 case ASHIFTRT:
3098 if (trueop1 == CONST0_RTX (mode))
3099 return op0;
3100 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3101 return op0;
3102 /* Rotating ~0 always results in ~0. */
3103 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3104 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3105 && ! side_effects_p (op1))
3106 return op0;
3107 /* Given:
3108 scalar modes M1, M2
3109 scalar constants c1, c2
3110 size (M2) > size (M1)
3111 c1 == size (M2) - size (M1)
3112 optimize:
3113 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2)
3114 (const_int <c1>))
3115 <low_part>)
3116 (const_int <c2>))
3118 (subreg:M1 (ashiftrt:M2 (reg:M2)
3119 (const_int <c1 + c2>))
3120 <low_part>). */
3121 if (!VECTOR_MODE_P (mode)
3122 && SUBREG_P (op0)
3123 && CONST_INT_P (op1)
3124 && (GET_CODE (SUBREG_REG (op0)) == LSHIFTRT)
3125 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3126 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3127 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3128 > GET_MODE_BITSIZE (mode))
3129 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3130 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3131 - GET_MODE_BITSIZE (mode)))
3132 && subreg_lowpart_p (op0))
3134 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3135 + INTVAL (op1));
3136 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3137 tmp = simplify_gen_binary (ASHIFTRT,
3138 GET_MODE (SUBREG_REG (op0)),
3139 XEXP (SUBREG_REG (op0), 0),
3140 tmp);
3141 return simplify_gen_subreg (mode, tmp, inner_mode,
3142 subreg_lowpart_offset (mode,
3143 inner_mode));
3145 canonicalize_shift:
3146 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3148 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3149 if (val != INTVAL (op1))
3150 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3152 break;
3154 case ASHIFT:
3155 case SS_ASHIFT:
3156 case US_ASHIFT:
3157 if (trueop1 == CONST0_RTX (mode))
3158 return op0;
3159 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3160 return op0;
3161 goto canonicalize_shift;
3163 case LSHIFTRT:
3164 if (trueop1 == CONST0_RTX (mode))
3165 return op0;
3166 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3167 return op0;
3168 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3169 if (GET_CODE (op0) == CLZ
3170 && CONST_INT_P (trueop1)
3171 && STORE_FLAG_VALUE == 1
3172 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3174 machine_mode imode = GET_MODE (XEXP (op0, 0));
3175 unsigned HOST_WIDE_INT zero_val = 0;
3177 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3178 && zero_val == GET_MODE_PRECISION (imode)
3179 && INTVAL (trueop1) == exact_log2 (zero_val))
3180 return simplify_gen_relational (EQ, mode, imode,
3181 XEXP (op0, 0), const0_rtx);
3183 goto canonicalize_shift;
3185 case SMIN:
3186 if (width <= HOST_BITS_PER_WIDE_INT
3187 && mode_signbit_p (mode, trueop1)
3188 && ! side_effects_p (op0))
3189 return op1;
3190 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3191 return op0;
3192 tem = simplify_associative_operation (code, mode, op0, op1);
3193 if (tem)
3194 return tem;
3195 break;
3197 case SMAX:
3198 if (width <= HOST_BITS_PER_WIDE_INT
3199 && CONST_INT_P (trueop1)
3200 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3201 && ! side_effects_p (op0))
3202 return op1;
3203 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3204 return op0;
3205 tem = simplify_associative_operation (code, mode, op0, op1);
3206 if (tem)
3207 return tem;
3208 break;
3210 case UMIN:
3211 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3212 return op1;
3213 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3214 return op0;
3215 tem = simplify_associative_operation (code, mode, op0, op1);
3216 if (tem)
3217 return tem;
3218 break;
3220 case UMAX:
3221 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3222 return op1;
3223 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3224 return op0;
3225 tem = simplify_associative_operation (code, mode, op0, op1);
3226 if (tem)
3227 return tem;
3228 break;
3230 case SS_PLUS:
3231 case US_PLUS:
3232 case SS_MINUS:
3233 case US_MINUS:
3234 case SS_MULT:
3235 case US_MULT:
3236 case SS_DIV:
3237 case US_DIV:
3238 /* ??? There are simplifications that can be done. */
3239 return 0;
3241 case VEC_SELECT:
3242 if (!VECTOR_MODE_P (mode))
3244 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3245 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3246 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3247 gcc_assert (XVECLEN (trueop1, 0) == 1);
3248 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3250 if (GET_CODE (trueop0) == CONST_VECTOR)
3251 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3252 (trueop1, 0, 0)));
3254 /* Extract a scalar element from a nested VEC_SELECT expression
3255 (with optional nested VEC_CONCAT expression). Some targets
3256 (i386) extract scalar element from a vector using chain of
3257 nested VEC_SELECT expressions. When input operand is a memory
3258 operand, this operation can be simplified to a simple scalar
3259 load from an offseted memory address. */
3260 if (GET_CODE (trueop0) == VEC_SELECT)
3262 rtx op0 = XEXP (trueop0, 0);
3263 rtx op1 = XEXP (trueop0, 1);
3265 machine_mode opmode = GET_MODE (op0);
3266 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3267 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3269 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3270 int elem;
3272 rtvec vec;
3273 rtx tmp_op, tmp;
3275 gcc_assert (GET_CODE (op1) == PARALLEL);
3276 gcc_assert (i < n_elts);
3278 /* Select element, pointed by nested selector. */
3279 elem = INTVAL (XVECEXP (op1, 0, i));
3281 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3282 if (GET_CODE (op0) == VEC_CONCAT)
3284 rtx op00 = XEXP (op0, 0);
3285 rtx op01 = XEXP (op0, 1);
3287 machine_mode mode00, mode01;
3288 int n_elts00, n_elts01;
3290 mode00 = GET_MODE (op00);
3291 mode01 = GET_MODE (op01);
3293 /* Find out number of elements of each operand. */
3294 if (VECTOR_MODE_P (mode00))
3296 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3297 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3299 else
3300 n_elts00 = 1;
3302 if (VECTOR_MODE_P (mode01))
3304 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3305 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3307 else
3308 n_elts01 = 1;
3310 gcc_assert (n_elts == n_elts00 + n_elts01);
3312 /* Select correct operand of VEC_CONCAT
3313 and adjust selector. */
3314 if (elem < n_elts01)
3315 tmp_op = op00;
3316 else
3318 tmp_op = op01;
3319 elem -= n_elts00;
3322 else
3323 tmp_op = op0;
3325 vec = rtvec_alloc (1);
3326 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3328 tmp = gen_rtx_fmt_ee (code, mode,
3329 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3330 return tmp;
3332 if (GET_CODE (trueop0) == VEC_DUPLICATE
3333 && GET_MODE (XEXP (trueop0, 0)) == mode)
3334 return XEXP (trueop0, 0);
3336 else
3338 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3339 gcc_assert (GET_MODE_INNER (mode)
3340 == GET_MODE_INNER (GET_MODE (trueop0)));
3341 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3343 if (GET_CODE (trueop0) == CONST_VECTOR)
3345 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3346 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3347 rtvec v = rtvec_alloc (n_elts);
3348 unsigned int i;
3350 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3351 for (i = 0; i < n_elts; i++)
3353 rtx x = XVECEXP (trueop1, 0, i);
3355 gcc_assert (CONST_INT_P (x));
3356 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3357 INTVAL (x));
3360 return gen_rtx_CONST_VECTOR (mode, v);
3363 /* Recognize the identity. */
3364 if (GET_MODE (trueop0) == mode)
3366 bool maybe_ident = true;
3367 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3369 rtx j = XVECEXP (trueop1, 0, i);
3370 if (!CONST_INT_P (j) || INTVAL (j) != i)
3372 maybe_ident = false;
3373 break;
3376 if (maybe_ident)
3377 return trueop0;
3380 /* If we build {a,b} then permute it, build the result directly. */
3381 if (XVECLEN (trueop1, 0) == 2
3382 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3383 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3384 && GET_CODE (trueop0) == VEC_CONCAT
3385 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3386 && GET_MODE (XEXP (trueop0, 0)) == mode
3387 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3388 && GET_MODE (XEXP (trueop0, 1)) == mode)
3390 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3391 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3392 rtx subop0, subop1;
3394 gcc_assert (i0 < 4 && i1 < 4);
3395 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3396 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3398 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3401 if (XVECLEN (trueop1, 0) == 2
3402 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3403 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3404 && GET_CODE (trueop0) == VEC_CONCAT
3405 && GET_MODE (trueop0) == mode)
3407 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3408 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3409 rtx subop0, subop1;
3411 gcc_assert (i0 < 2 && i1 < 2);
3412 subop0 = XEXP (trueop0, i0);
3413 subop1 = XEXP (trueop0, i1);
3415 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3418 /* If we select one half of a vec_concat, return that. */
3419 if (GET_CODE (trueop0) == VEC_CONCAT
3420 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3422 rtx subop0 = XEXP (trueop0, 0);
3423 rtx subop1 = XEXP (trueop0, 1);
3424 machine_mode mode0 = GET_MODE (subop0);
3425 machine_mode mode1 = GET_MODE (subop1);
3426 int li = GET_MODE_SIZE (GET_MODE_INNER (mode0));
3427 int l0 = GET_MODE_SIZE (mode0) / li;
3428 int l1 = GET_MODE_SIZE (mode1) / li;
3429 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3430 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3432 bool success = true;
3433 for (int i = 1; i < l0; ++i)
3435 rtx j = XVECEXP (trueop1, 0, i);
3436 if (!CONST_INT_P (j) || INTVAL (j) != i)
3438 success = false;
3439 break;
3442 if (success)
3443 return subop0;
3445 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3447 bool success = true;
3448 for (int i = 1; i < l1; ++i)
3450 rtx j = XVECEXP (trueop1, 0, i);
3451 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3453 success = false;
3454 break;
3457 if (success)
3458 return subop1;
3463 if (XVECLEN (trueop1, 0) == 1
3464 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3465 && GET_CODE (trueop0) == VEC_CONCAT)
3467 rtx vec = trueop0;
3468 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3470 /* Try to find the element in the VEC_CONCAT. */
3471 while (GET_MODE (vec) != mode
3472 && GET_CODE (vec) == VEC_CONCAT)
3474 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3475 if (offset < vec_size)
3476 vec = XEXP (vec, 0);
3477 else
3479 offset -= vec_size;
3480 vec = XEXP (vec, 1);
3482 vec = avoid_constant_pool_reference (vec);
3485 if (GET_MODE (vec) == mode)
3486 return vec;
3489 /* If we select elements in a vec_merge that all come from the same
3490 operand, select from that operand directly. */
3491 if (GET_CODE (op0) == VEC_MERGE)
3493 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3494 if (CONST_INT_P (trueop02))
3496 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3497 bool all_operand0 = true;
3498 bool all_operand1 = true;
3499 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3501 rtx j = XVECEXP (trueop1, 0, i);
3502 if (sel & (1 << UINTVAL (j)))
3503 all_operand1 = false;
3504 else
3505 all_operand0 = false;
3507 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3508 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3509 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3510 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3514 /* If we have two nested selects that are inverses of each
3515 other, replace them with the source operand. */
3516 if (GET_CODE (trueop0) == VEC_SELECT
3517 && GET_MODE (XEXP (trueop0, 0)) == mode)
3519 rtx op0_subop1 = XEXP (trueop0, 1);
3520 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3521 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3523 /* Apply the outer ordering vector to the inner one. (The inner
3524 ordering vector is expressly permitted to be of a different
3525 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3526 then the two VEC_SELECTs cancel. */
3527 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3529 rtx x = XVECEXP (trueop1, 0, i);
3530 if (!CONST_INT_P (x))
3531 return 0;
3532 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3533 if (!CONST_INT_P (y) || i != INTVAL (y))
3534 return 0;
3536 return XEXP (trueop0, 0);
3539 return 0;
3540 case VEC_CONCAT:
3542 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3543 ? GET_MODE (trueop0)
3544 : GET_MODE_INNER (mode));
3545 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3546 ? GET_MODE (trueop1)
3547 : GET_MODE_INNER (mode));
3549 gcc_assert (VECTOR_MODE_P (mode));
3550 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3551 == GET_MODE_SIZE (mode));
3553 if (VECTOR_MODE_P (op0_mode))
3554 gcc_assert (GET_MODE_INNER (mode)
3555 == GET_MODE_INNER (op0_mode));
3556 else
3557 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3559 if (VECTOR_MODE_P (op1_mode))
3560 gcc_assert (GET_MODE_INNER (mode)
3561 == GET_MODE_INNER (op1_mode));
3562 else
3563 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3565 if ((GET_CODE (trueop0) == CONST_VECTOR
3566 || CONST_SCALAR_INT_P (trueop0)
3567 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3568 && (GET_CODE (trueop1) == CONST_VECTOR
3569 || CONST_SCALAR_INT_P (trueop1)
3570 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3572 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3573 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3574 rtvec v = rtvec_alloc (n_elts);
3575 unsigned int i;
3576 unsigned in_n_elts = 1;
3578 if (VECTOR_MODE_P (op0_mode))
3579 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3580 for (i = 0; i < n_elts; i++)
3582 if (i < in_n_elts)
3584 if (!VECTOR_MODE_P (op0_mode))
3585 RTVEC_ELT (v, i) = trueop0;
3586 else
3587 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3589 else
3591 if (!VECTOR_MODE_P (op1_mode))
3592 RTVEC_ELT (v, i) = trueop1;
3593 else
3594 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3595 i - in_n_elts);
3599 return gen_rtx_CONST_VECTOR (mode, v);
3602 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3603 Restrict the transformation to avoid generating a VEC_SELECT with a
3604 mode unrelated to its operand. */
3605 if (GET_CODE (trueop0) == VEC_SELECT
3606 && GET_CODE (trueop1) == VEC_SELECT
3607 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3608 && GET_MODE (XEXP (trueop0, 0)) == mode)
3610 rtx par0 = XEXP (trueop0, 1);
3611 rtx par1 = XEXP (trueop1, 1);
3612 int len0 = XVECLEN (par0, 0);
3613 int len1 = XVECLEN (par1, 0);
3614 rtvec vec = rtvec_alloc (len0 + len1);
3615 for (int i = 0; i < len0; i++)
3616 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3617 for (int i = 0; i < len1; i++)
3618 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3619 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3620 gen_rtx_PARALLEL (VOIDmode, vec));
3623 return 0;
3625 default:
3626 gcc_unreachable ();
3629 return 0;
3633 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3634 rtx op0, rtx op1)
3636 unsigned int width = GET_MODE_PRECISION (mode);
3638 if (VECTOR_MODE_P (mode)
3639 && code != VEC_CONCAT
3640 && GET_CODE (op0) == CONST_VECTOR
3641 && GET_CODE (op1) == CONST_VECTOR)
3643 unsigned n_elts = GET_MODE_NUNITS (mode);
3644 machine_mode op0mode = GET_MODE (op0);
3645 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3646 machine_mode op1mode = GET_MODE (op1);
3647 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3648 rtvec v = rtvec_alloc (n_elts);
3649 unsigned int i;
3651 gcc_assert (op0_n_elts == n_elts);
3652 gcc_assert (op1_n_elts == n_elts);
3653 for (i = 0; i < n_elts; i++)
3655 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3656 CONST_VECTOR_ELT (op0, i),
3657 CONST_VECTOR_ELT (op1, i));
3658 if (!x)
3659 return 0;
3660 RTVEC_ELT (v, i) = x;
3663 return gen_rtx_CONST_VECTOR (mode, v);
3666 if (VECTOR_MODE_P (mode)
3667 && code == VEC_CONCAT
3668 && (CONST_SCALAR_INT_P (op0)
3669 || GET_CODE (op0) == CONST_FIXED
3670 || CONST_DOUBLE_AS_FLOAT_P (op0))
3671 && (CONST_SCALAR_INT_P (op1)
3672 || CONST_DOUBLE_AS_FLOAT_P (op1)
3673 || GET_CODE (op1) == CONST_FIXED))
3675 unsigned n_elts = GET_MODE_NUNITS (mode);
3676 rtvec v = rtvec_alloc (n_elts);
3678 gcc_assert (n_elts >= 2);
3679 if (n_elts == 2)
3681 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3682 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3684 RTVEC_ELT (v, 0) = op0;
3685 RTVEC_ELT (v, 1) = op1;
3687 else
3689 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3690 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3691 unsigned i;
3693 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3694 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3695 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3697 for (i = 0; i < op0_n_elts; ++i)
3698 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3699 for (i = 0; i < op1_n_elts; ++i)
3700 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3703 return gen_rtx_CONST_VECTOR (mode, v);
3706 if (SCALAR_FLOAT_MODE_P (mode)
3707 && CONST_DOUBLE_AS_FLOAT_P (op0)
3708 && CONST_DOUBLE_AS_FLOAT_P (op1)
3709 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3711 if (code == AND
3712 || code == IOR
3713 || code == XOR)
3715 long tmp0[4];
3716 long tmp1[4];
3717 REAL_VALUE_TYPE r;
3718 int i;
3720 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3721 GET_MODE (op0));
3722 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3723 GET_MODE (op1));
3724 for (i = 0; i < 4; i++)
3726 switch (code)
3728 case AND:
3729 tmp0[i] &= tmp1[i];
3730 break;
3731 case IOR:
3732 tmp0[i] |= tmp1[i];
3733 break;
3734 case XOR:
3735 tmp0[i] ^= tmp1[i];
3736 break;
3737 default:
3738 gcc_unreachable ();
3741 real_from_target (&r, tmp0, mode);
3742 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3744 else
3746 REAL_VALUE_TYPE f0, f1, value, result;
3747 bool inexact;
3749 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3750 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3751 real_convert (&f0, mode, &f0);
3752 real_convert (&f1, mode, &f1);
3754 if (HONOR_SNANS (mode)
3755 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3756 return 0;
3758 if (code == DIV
3759 && REAL_VALUES_EQUAL (f1, dconst0)
3760 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3761 return 0;
3763 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3764 && flag_trapping_math
3765 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3767 int s0 = REAL_VALUE_NEGATIVE (f0);
3768 int s1 = REAL_VALUE_NEGATIVE (f1);
3770 switch (code)
3772 case PLUS:
3773 /* Inf + -Inf = NaN plus exception. */
3774 if (s0 != s1)
3775 return 0;
3776 break;
3777 case MINUS:
3778 /* Inf - Inf = NaN plus exception. */
3779 if (s0 == s1)
3780 return 0;
3781 break;
3782 case DIV:
3783 /* Inf / Inf = NaN plus exception. */
3784 return 0;
3785 default:
3786 break;
3790 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3791 && flag_trapping_math
3792 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3793 || (REAL_VALUE_ISINF (f1)
3794 && REAL_VALUES_EQUAL (f0, dconst0))))
3795 /* Inf * 0 = NaN plus exception. */
3796 return 0;
3798 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3799 &f0, &f1);
3800 real_convert (&result, mode, &value);
3802 /* Don't constant fold this floating point operation if
3803 the result has overflowed and flag_trapping_math. */
3805 if (flag_trapping_math
3806 && MODE_HAS_INFINITIES (mode)
3807 && REAL_VALUE_ISINF (result)
3808 && !REAL_VALUE_ISINF (f0)
3809 && !REAL_VALUE_ISINF (f1))
3810 /* Overflow plus exception. */
3811 return 0;
3813 /* Don't constant fold this floating point operation if the
3814 result may dependent upon the run-time rounding mode and
3815 flag_rounding_math is set, or if GCC's software emulation
3816 is unable to accurately represent the result. */
3818 if ((flag_rounding_math
3819 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3820 && (inexact || !real_identical (&result, &value)))
3821 return NULL_RTX;
3823 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3827 /* We can fold some multi-word operations. */
3828 if ((GET_MODE_CLASS (mode) == MODE_INT
3829 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3830 && CONST_SCALAR_INT_P (op0)
3831 && CONST_SCALAR_INT_P (op1))
3833 wide_int result;
3834 bool overflow;
3835 rtx_mode_t pop0 = std::make_pair (op0, mode);
3836 rtx_mode_t pop1 = std::make_pair (op1, mode);
3838 #if TARGET_SUPPORTS_WIDE_INT == 0
3839 /* This assert keeps the simplification from producing a result
3840 that cannot be represented in a CONST_DOUBLE but a lot of
3841 upstream callers expect that this function never fails to
3842 simplify something and so you if you added this to the test
3843 above the code would die later anyway. If this assert
3844 happens, you just need to make the port support wide int. */
3845 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3846 #endif
3847 switch (code)
3849 case MINUS:
3850 result = wi::sub (pop0, pop1);
3851 break;
3853 case PLUS:
3854 result = wi::add (pop0, pop1);
3855 break;
3857 case MULT:
3858 result = wi::mul (pop0, pop1);
3859 break;
3861 case DIV:
3862 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3863 if (overflow)
3864 return NULL_RTX;
3865 break;
3867 case MOD:
3868 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3869 if (overflow)
3870 return NULL_RTX;
3871 break;
3873 case UDIV:
3874 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3875 if (overflow)
3876 return NULL_RTX;
3877 break;
3879 case UMOD:
3880 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3881 if (overflow)
3882 return NULL_RTX;
3883 break;
3885 case AND:
3886 result = wi::bit_and (pop0, pop1);
3887 break;
3889 case IOR:
3890 result = wi::bit_or (pop0, pop1);
3891 break;
3893 case XOR:
3894 result = wi::bit_xor (pop0, pop1);
3895 break;
3897 case SMIN:
3898 result = wi::smin (pop0, pop1);
3899 break;
3901 case SMAX:
3902 result = wi::smax (pop0, pop1);
3903 break;
3905 case UMIN:
3906 result = wi::umin (pop0, pop1);
3907 break;
3909 case UMAX:
3910 result = wi::umax (pop0, pop1);
3911 break;
3913 case LSHIFTRT:
3914 case ASHIFTRT:
3915 case ASHIFT:
3917 wide_int wop1 = pop1;
3918 if (SHIFT_COUNT_TRUNCATED)
3919 wop1 = wi::umod_trunc (wop1, width);
3920 else if (wi::geu_p (wop1, width))
3921 return NULL_RTX;
3923 switch (code)
3925 case LSHIFTRT:
3926 result = wi::lrshift (pop0, wop1);
3927 break;
3929 case ASHIFTRT:
3930 result = wi::arshift (pop0, wop1);
3931 break;
3933 case ASHIFT:
3934 result = wi::lshift (pop0, wop1);
3935 break;
3937 default:
3938 gcc_unreachable ();
3940 break;
3942 case ROTATE:
3943 case ROTATERT:
3945 if (wi::neg_p (pop1))
3946 return NULL_RTX;
3948 switch (code)
3950 case ROTATE:
3951 result = wi::lrotate (pop0, pop1);
3952 break;
3954 case ROTATERT:
3955 result = wi::rrotate (pop0, pop1);
3956 break;
3958 default:
3959 gcc_unreachable ();
3961 break;
3963 default:
3964 return NULL_RTX;
3966 return immed_wide_int_const (result, mode);
3969 return NULL_RTX;
3974 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3975 PLUS or MINUS.
3977 Rather than test for specific case, we do this by a brute-force method
3978 and do all possible simplifications until no more changes occur. Then
3979 we rebuild the operation. */
3981 struct simplify_plus_minus_op_data
3983 rtx op;
3984 short neg;
3987 static bool
3988 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3990 int result;
3992 result = (commutative_operand_precedence (y)
3993 - commutative_operand_precedence (x));
3994 if (result)
3995 return result > 0;
3997 /* Group together equal REGs to do more simplification. */
3998 if (REG_P (x) && REG_P (y))
3999 return REGNO (x) > REGNO (y);
4000 else
4001 return false;
4004 static rtx
4005 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4006 rtx op1)
4008 struct simplify_plus_minus_op_data ops[16];
4009 rtx result, tem;
4010 int n_ops = 2;
4011 int changed, n_constants, canonicalized = 0;
4012 int i, j;
4014 memset (ops, 0, sizeof ops);
4016 /* Set up the two operands and then expand them until nothing has been
4017 changed. If we run out of room in our array, give up; this should
4018 almost never happen. */
4020 ops[0].op = op0;
4021 ops[0].neg = 0;
4022 ops[1].op = op1;
4023 ops[1].neg = (code == MINUS);
4027 changed = 0;
4028 n_constants = 0;
4030 for (i = 0; i < n_ops; i++)
4032 rtx this_op = ops[i].op;
4033 int this_neg = ops[i].neg;
4034 enum rtx_code this_code = GET_CODE (this_op);
4036 switch (this_code)
4038 case PLUS:
4039 case MINUS:
4040 if (n_ops == ARRAY_SIZE (ops))
4041 return NULL_RTX;
4043 ops[n_ops].op = XEXP (this_op, 1);
4044 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4045 n_ops++;
4047 ops[i].op = XEXP (this_op, 0);
4048 changed = 1;
4049 canonicalized |= this_neg || i != n_ops - 2;
4050 break;
4052 case NEG:
4053 ops[i].op = XEXP (this_op, 0);
4054 ops[i].neg = ! this_neg;
4055 changed = 1;
4056 canonicalized = 1;
4057 break;
4059 case CONST:
4060 if (n_ops != ARRAY_SIZE (ops)
4061 && GET_CODE (XEXP (this_op, 0)) == PLUS
4062 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4063 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4065 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4066 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4067 ops[n_ops].neg = this_neg;
4068 n_ops++;
4069 changed = 1;
4070 canonicalized = 1;
4072 break;
4074 case NOT:
4075 /* ~a -> (-a - 1) */
4076 if (n_ops != ARRAY_SIZE (ops))
4078 ops[n_ops].op = CONSTM1_RTX (mode);
4079 ops[n_ops++].neg = this_neg;
4080 ops[i].op = XEXP (this_op, 0);
4081 ops[i].neg = !this_neg;
4082 changed = 1;
4083 canonicalized = 1;
4085 break;
4087 case CONST_INT:
4088 n_constants++;
4089 if (this_neg)
4091 ops[i].op = neg_const_int (mode, this_op);
4092 ops[i].neg = 0;
4093 changed = 1;
4094 canonicalized = 1;
4096 break;
4098 default:
4099 break;
4103 while (changed);
4105 if (n_constants > 1)
4106 canonicalized = 1;
4108 gcc_assert (n_ops >= 2);
4110 /* If we only have two operands, we can avoid the loops. */
4111 if (n_ops == 2)
4113 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4114 rtx lhs, rhs;
4116 /* Get the two operands. Be careful with the order, especially for
4117 the cases where code == MINUS. */
4118 if (ops[0].neg && ops[1].neg)
4120 lhs = gen_rtx_NEG (mode, ops[0].op);
4121 rhs = ops[1].op;
4123 else if (ops[0].neg)
4125 lhs = ops[1].op;
4126 rhs = ops[0].op;
4128 else
4130 lhs = ops[0].op;
4131 rhs = ops[1].op;
4134 return simplify_const_binary_operation (code, mode, lhs, rhs);
4137 /* Now simplify each pair of operands until nothing changes. */
4140 /* Insertion sort is good enough for a small array. */
4141 for (i = 1; i < n_ops; i++)
4143 struct simplify_plus_minus_op_data save;
4144 j = i - 1;
4145 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4146 continue;
4148 canonicalized = 1;
4149 save = ops[i];
4151 ops[j + 1] = ops[j];
4152 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4153 ops[j + 1] = save;
4156 changed = 0;
4157 for (i = n_ops - 1; i > 0; i--)
4158 for (j = i - 1; j >= 0; j--)
4160 rtx lhs = ops[j].op, rhs = ops[i].op;
4161 int lneg = ops[j].neg, rneg = ops[i].neg;
4163 if (lhs != 0 && rhs != 0)
4165 enum rtx_code ncode = PLUS;
4167 if (lneg != rneg)
4169 ncode = MINUS;
4170 if (lneg)
4171 tem = lhs, lhs = rhs, rhs = tem;
4173 else if (swap_commutative_operands_p (lhs, rhs))
4174 tem = lhs, lhs = rhs, rhs = tem;
4176 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4177 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4179 rtx tem_lhs, tem_rhs;
4181 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4182 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4183 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4185 if (tem && !CONSTANT_P (tem))
4186 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4188 else
4189 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4191 if (tem)
4193 /* Reject "simplifications" that just wrap the two
4194 arguments in a CONST. Failure to do so can result
4195 in infinite recursion with simplify_binary_operation
4196 when it calls us to simplify CONST operations.
4197 Also, if we find such a simplification, don't try
4198 any more combinations with this rhs: We must have
4199 something like symbol+offset, ie. one of the
4200 trivial CONST expressions we handle later. */
4201 if (GET_CODE (tem) == CONST
4202 && GET_CODE (XEXP (tem, 0)) == ncode
4203 && XEXP (XEXP (tem, 0), 0) == lhs
4204 && XEXP (XEXP (tem, 0), 1) == rhs)
4205 break;
4206 lneg &= rneg;
4207 if (GET_CODE (tem) == NEG)
4208 tem = XEXP (tem, 0), lneg = !lneg;
4209 if (CONST_INT_P (tem) && lneg)
4210 tem = neg_const_int (mode, tem), lneg = 0;
4212 ops[i].op = tem;
4213 ops[i].neg = lneg;
4214 ops[j].op = NULL_RTX;
4215 changed = 1;
4216 canonicalized = 1;
4221 /* If nothing changed, fail. */
4222 if (!canonicalized)
4223 return NULL_RTX;
4225 /* Pack all the operands to the lower-numbered entries. */
4226 for (i = 0, j = 0; j < n_ops; j++)
4227 if (ops[j].op)
4229 ops[i] = ops[j];
4230 i++;
4232 n_ops = i;
4234 while (changed);
4236 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4237 if (n_ops == 2
4238 && CONST_INT_P (ops[1].op)
4239 && CONSTANT_P (ops[0].op)
4240 && ops[0].neg)
4241 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4243 /* We suppressed creation of trivial CONST expressions in the
4244 combination loop to avoid recursion. Create one manually now.
4245 The combination loop should have ensured that there is exactly
4246 one CONST_INT, and the sort will have ensured that it is last
4247 in the array and that any other constant will be next-to-last. */
4249 if (n_ops > 1
4250 && CONST_INT_P (ops[n_ops - 1].op)
4251 && CONSTANT_P (ops[n_ops - 2].op))
4253 rtx value = ops[n_ops - 1].op;
4254 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4255 value = neg_const_int (mode, value);
4256 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4257 INTVAL (value));
4258 n_ops--;
4261 /* Put a non-negated operand first, if possible. */
4263 for (i = 0; i < n_ops && ops[i].neg; i++)
4264 continue;
4265 if (i == n_ops)
4266 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4267 else if (i != 0)
4269 tem = ops[0].op;
4270 ops[0] = ops[i];
4271 ops[i].op = tem;
4272 ops[i].neg = 1;
4275 /* Now make the result by performing the requested operations. */
4276 result = ops[0].op;
4277 for (i = 1; i < n_ops; i++)
4278 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4279 mode, result, ops[i].op);
4281 return result;
4284 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4285 static bool
4286 plus_minus_operand_p (const_rtx x)
4288 return GET_CODE (x) == PLUS
4289 || GET_CODE (x) == MINUS
4290 || (GET_CODE (x) == CONST
4291 && GET_CODE (XEXP (x, 0)) == PLUS
4292 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4293 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4296 /* Like simplify_binary_operation except used for relational operators.
4297 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4298 not also be VOIDmode.
4300 CMP_MODE specifies in which mode the comparison is done in, so it is
4301 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4302 the operands or, if both are VOIDmode, the operands are compared in
4303 "infinite precision". */
4305 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4306 machine_mode cmp_mode, rtx op0, rtx op1)
4308 rtx tem, trueop0, trueop1;
4310 if (cmp_mode == VOIDmode)
4311 cmp_mode = GET_MODE (op0);
4312 if (cmp_mode == VOIDmode)
4313 cmp_mode = GET_MODE (op1);
4315 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4316 if (tem)
4318 if (SCALAR_FLOAT_MODE_P (mode))
4320 if (tem == const0_rtx)
4321 return CONST0_RTX (mode);
4322 #ifdef FLOAT_STORE_FLAG_VALUE
4324 REAL_VALUE_TYPE val;
4325 val = FLOAT_STORE_FLAG_VALUE (mode);
4326 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4328 #else
4329 return NULL_RTX;
4330 #endif
4332 if (VECTOR_MODE_P (mode))
4334 if (tem == const0_rtx)
4335 return CONST0_RTX (mode);
4336 #ifdef VECTOR_STORE_FLAG_VALUE
4338 int i, units;
4339 rtvec v;
4341 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4342 if (val == NULL_RTX)
4343 return NULL_RTX;
4344 if (val == const1_rtx)
4345 return CONST1_RTX (mode);
4347 units = GET_MODE_NUNITS (mode);
4348 v = rtvec_alloc (units);
4349 for (i = 0; i < units; i++)
4350 RTVEC_ELT (v, i) = val;
4351 return gen_rtx_raw_CONST_VECTOR (mode, v);
4353 #else
4354 return NULL_RTX;
4355 #endif
4358 return tem;
4361 /* For the following tests, ensure const0_rtx is op1. */
4362 if (swap_commutative_operands_p (op0, op1)
4363 || (op0 == const0_rtx && op1 != const0_rtx))
4364 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4366 /* If op0 is a compare, extract the comparison arguments from it. */
4367 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4368 return simplify_gen_relational (code, mode, VOIDmode,
4369 XEXP (op0, 0), XEXP (op0, 1));
4371 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4372 || CC0_P (op0))
4373 return NULL_RTX;
4375 trueop0 = avoid_constant_pool_reference (op0);
4376 trueop1 = avoid_constant_pool_reference (op1);
4377 return simplify_relational_operation_1 (code, mode, cmp_mode,
4378 trueop0, trueop1);
4381 /* This part of simplify_relational_operation is only used when CMP_MODE
4382 is not in class MODE_CC (i.e. it is a real comparison).
4384 MODE is the mode of the result, while CMP_MODE specifies in which
4385 mode the comparison is done in, so it is the mode of the operands. */
4387 static rtx
4388 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4389 machine_mode cmp_mode, rtx op0, rtx op1)
4391 enum rtx_code op0code = GET_CODE (op0);
4393 if (op1 == const0_rtx && COMPARISON_P (op0))
4395 /* If op0 is a comparison, extract the comparison arguments
4396 from it. */
4397 if (code == NE)
4399 if (GET_MODE (op0) == mode)
4400 return simplify_rtx (op0);
4401 else
4402 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4403 XEXP (op0, 0), XEXP (op0, 1));
4405 else if (code == EQ)
4407 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4408 if (new_code != UNKNOWN)
4409 return simplify_gen_relational (new_code, mode, VOIDmode,
4410 XEXP (op0, 0), XEXP (op0, 1));
4414 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4415 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4416 if ((code == LTU || code == GEU)
4417 && GET_CODE (op0) == PLUS
4418 && CONST_INT_P (XEXP (op0, 1))
4419 && (rtx_equal_p (op1, XEXP (op0, 0))
4420 || rtx_equal_p (op1, XEXP (op0, 1)))
4421 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4422 && XEXP (op0, 1) != const0_rtx)
4424 rtx new_cmp
4425 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4426 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4427 cmp_mode, XEXP (op0, 0), new_cmp);
4430 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4431 if ((code == LTU || code == GEU)
4432 && GET_CODE (op0) == PLUS
4433 && rtx_equal_p (op1, XEXP (op0, 1))
4434 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4435 && !rtx_equal_p (op1, XEXP (op0, 0)))
4436 return simplify_gen_relational (code, mode, cmp_mode, op0,
4437 copy_rtx (XEXP (op0, 0)));
4439 if (op1 == const0_rtx)
4441 /* Canonicalize (GTU x 0) as (NE x 0). */
4442 if (code == GTU)
4443 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4444 /* Canonicalize (LEU x 0) as (EQ x 0). */
4445 if (code == LEU)
4446 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4448 else if (op1 == const1_rtx)
4450 switch (code)
4452 case GE:
4453 /* Canonicalize (GE x 1) as (GT x 0). */
4454 return simplify_gen_relational (GT, mode, cmp_mode,
4455 op0, const0_rtx);
4456 case GEU:
4457 /* Canonicalize (GEU x 1) as (NE x 0). */
4458 return simplify_gen_relational (NE, mode, cmp_mode,
4459 op0, const0_rtx);
4460 case LT:
4461 /* Canonicalize (LT x 1) as (LE x 0). */
4462 return simplify_gen_relational (LE, mode, cmp_mode,
4463 op0, const0_rtx);
4464 case LTU:
4465 /* Canonicalize (LTU x 1) as (EQ x 0). */
4466 return simplify_gen_relational (EQ, mode, cmp_mode,
4467 op0, const0_rtx);
4468 default:
4469 break;
4472 else if (op1 == constm1_rtx)
4474 /* Canonicalize (LE x -1) as (LT x 0). */
4475 if (code == LE)
4476 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4477 /* Canonicalize (GT x -1) as (GE x 0). */
4478 if (code == GT)
4479 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4482 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4483 if ((code == EQ || code == NE)
4484 && (op0code == PLUS || op0code == MINUS)
4485 && CONSTANT_P (op1)
4486 && CONSTANT_P (XEXP (op0, 1))
4487 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4489 rtx x = XEXP (op0, 0);
4490 rtx c = XEXP (op0, 1);
4491 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4492 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4494 /* Detect an infinite recursive condition, where we oscillate at this
4495 simplification case between:
4496 A + B == C <---> C - B == A,
4497 where A, B, and C are all constants with non-simplifiable expressions,
4498 usually SYMBOL_REFs. */
4499 if (GET_CODE (tem) == invcode
4500 && CONSTANT_P (x)
4501 && rtx_equal_p (c, XEXP (tem, 1)))
4502 return NULL_RTX;
4504 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4507 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4508 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4509 if (code == NE
4510 && op1 == const0_rtx
4511 && GET_MODE_CLASS (mode) == MODE_INT
4512 && cmp_mode != VOIDmode
4513 /* ??? Work-around BImode bugs in the ia64 backend. */
4514 && mode != BImode
4515 && cmp_mode != BImode
4516 && nonzero_bits (op0, cmp_mode) == 1
4517 && STORE_FLAG_VALUE == 1)
4518 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4519 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4520 : lowpart_subreg (mode, op0, cmp_mode);
4522 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4523 if ((code == EQ || code == NE)
4524 && op1 == const0_rtx
4525 && op0code == XOR)
4526 return simplify_gen_relational (code, mode, cmp_mode,
4527 XEXP (op0, 0), XEXP (op0, 1));
4529 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4530 if ((code == EQ || code == NE)
4531 && op0code == XOR
4532 && rtx_equal_p (XEXP (op0, 0), op1)
4533 && !side_effects_p (XEXP (op0, 0)))
4534 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4535 CONST0_RTX (mode));
4537 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4538 if ((code == EQ || code == NE)
4539 && op0code == XOR
4540 && rtx_equal_p (XEXP (op0, 1), op1)
4541 && !side_effects_p (XEXP (op0, 1)))
4542 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4543 CONST0_RTX (mode));
4545 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4546 if ((code == EQ || code == NE)
4547 && op0code == XOR
4548 && CONST_SCALAR_INT_P (op1)
4549 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4550 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4551 simplify_gen_binary (XOR, cmp_mode,
4552 XEXP (op0, 1), op1));
4554 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4555 if ((code == EQ || code == NE)
4556 && GET_CODE (op0) == BSWAP
4557 && CONST_SCALAR_INT_P (op1))
4558 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4559 simplify_gen_unary (BSWAP, cmp_mode,
4560 op1, cmp_mode));
4562 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4563 if ((code == EQ || code == NE)
4564 && GET_CODE (op0) == BSWAP
4565 && GET_CODE (op1) == BSWAP)
4566 return simplify_gen_relational (code, mode, cmp_mode,
4567 XEXP (op0, 0), XEXP (op1, 0));
4569 if (op0code == POPCOUNT && op1 == const0_rtx)
4570 switch (code)
4572 case EQ:
4573 case LE:
4574 case LEU:
4575 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4576 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4577 XEXP (op0, 0), const0_rtx);
4579 case NE:
4580 case GT:
4581 case GTU:
4582 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4583 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4584 XEXP (op0, 0), const0_rtx);
4586 default:
4587 break;
4590 return NULL_RTX;
4593 enum
4595 CMP_EQ = 1,
4596 CMP_LT = 2,
4597 CMP_GT = 4,
4598 CMP_LTU = 8,
4599 CMP_GTU = 16
4603 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4604 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4605 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4606 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4607 For floating-point comparisons, assume that the operands were ordered. */
4609 static rtx
4610 comparison_result (enum rtx_code code, int known_results)
4612 switch (code)
4614 case EQ:
4615 case UNEQ:
4616 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4617 case NE:
4618 case LTGT:
4619 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4621 case LT:
4622 case UNLT:
4623 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4624 case GE:
4625 case UNGE:
4626 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4628 case GT:
4629 case UNGT:
4630 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4631 case LE:
4632 case UNLE:
4633 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4635 case LTU:
4636 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4637 case GEU:
4638 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4640 case GTU:
4641 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4642 case LEU:
4643 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4645 case ORDERED:
4646 return const_true_rtx;
4647 case UNORDERED:
4648 return const0_rtx;
4649 default:
4650 gcc_unreachable ();
4654 /* Check if the given comparison (done in the given MODE) is actually
4655 a tautology or a contradiction. If the mode is VOID_mode, the
4656 comparison is done in "infinite precision". If no simplification
4657 is possible, this function returns zero. Otherwise, it returns
4658 either const_true_rtx or const0_rtx. */
4661 simplify_const_relational_operation (enum rtx_code code,
4662 machine_mode mode,
4663 rtx op0, rtx op1)
4665 rtx tem;
4666 rtx trueop0;
4667 rtx trueop1;
4669 gcc_assert (mode != VOIDmode
4670 || (GET_MODE (op0) == VOIDmode
4671 && GET_MODE (op1) == VOIDmode));
4673 /* If op0 is a compare, extract the comparison arguments from it. */
4674 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4676 op1 = XEXP (op0, 1);
4677 op0 = XEXP (op0, 0);
4679 if (GET_MODE (op0) != VOIDmode)
4680 mode = GET_MODE (op0);
4681 else if (GET_MODE (op1) != VOIDmode)
4682 mode = GET_MODE (op1);
4683 else
4684 return 0;
4687 /* We can't simplify MODE_CC values since we don't know what the
4688 actual comparison is. */
4689 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4690 return 0;
4692 /* Make sure the constant is second. */
4693 if (swap_commutative_operands_p (op0, op1))
4695 tem = op0, op0 = op1, op1 = tem;
4696 code = swap_condition (code);
4699 trueop0 = avoid_constant_pool_reference (op0);
4700 trueop1 = avoid_constant_pool_reference (op1);
4702 /* For integer comparisons of A and B maybe we can simplify A - B and can
4703 then simplify a comparison of that with zero. If A and B are both either
4704 a register or a CONST_INT, this can't help; testing for these cases will
4705 prevent infinite recursion here and speed things up.
4707 We can only do this for EQ and NE comparisons as otherwise we may
4708 lose or introduce overflow which we cannot disregard as undefined as
4709 we do not know the signedness of the operation on either the left or
4710 the right hand side of the comparison. */
4712 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4713 && (code == EQ || code == NE)
4714 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4715 && (REG_P (op1) || CONST_INT_P (trueop1)))
4716 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4717 /* We cannot do this if tem is a nonzero address. */
4718 && ! nonzero_address_p (tem))
4719 return simplify_const_relational_operation (signed_condition (code),
4720 mode, tem, const0_rtx);
4722 if (! HONOR_NANS (mode) && code == ORDERED)
4723 return const_true_rtx;
4725 if (! HONOR_NANS (mode) && code == UNORDERED)
4726 return const0_rtx;
4728 /* For modes without NaNs, if the two operands are equal, we know the
4729 result except if they have side-effects. Even with NaNs we know
4730 the result of unordered comparisons and, if signaling NaNs are
4731 irrelevant, also the result of LT/GT/LTGT. */
4732 if ((! HONOR_NANS (GET_MODE (trueop0))
4733 || code == UNEQ || code == UNLE || code == UNGE
4734 || ((code == LT || code == GT || code == LTGT)
4735 && ! HONOR_SNANS (GET_MODE (trueop0))))
4736 && rtx_equal_p (trueop0, trueop1)
4737 && ! side_effects_p (trueop0))
4738 return comparison_result (code, CMP_EQ);
4740 /* If the operands are floating-point constants, see if we can fold
4741 the result. */
4742 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4743 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4744 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4746 REAL_VALUE_TYPE d0, d1;
4748 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4749 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4751 /* Comparisons are unordered iff at least one of the values is NaN. */
4752 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4753 switch (code)
4755 case UNEQ:
4756 case UNLT:
4757 case UNGT:
4758 case UNLE:
4759 case UNGE:
4760 case NE:
4761 case UNORDERED:
4762 return const_true_rtx;
4763 case EQ:
4764 case LT:
4765 case GT:
4766 case LE:
4767 case GE:
4768 case LTGT:
4769 case ORDERED:
4770 return const0_rtx;
4771 default:
4772 return 0;
4775 return comparison_result (code,
4776 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4777 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4780 /* Otherwise, see if the operands are both integers. */
4781 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4782 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4784 /* It would be nice if we really had a mode here. However, the
4785 largest int representable on the target is as good as
4786 infinite. */
4787 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4788 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4789 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4791 if (wi::eq_p (ptrueop0, ptrueop1))
4792 return comparison_result (code, CMP_EQ);
4793 else
4795 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4796 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4797 return comparison_result (code, cr);
4801 /* Optimize comparisons with upper and lower bounds. */
4802 if (HWI_COMPUTABLE_MODE_P (mode)
4803 && CONST_INT_P (trueop1))
4805 int sign;
4806 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4807 HOST_WIDE_INT val = INTVAL (trueop1);
4808 HOST_WIDE_INT mmin, mmax;
4810 if (code == GEU
4811 || code == LEU
4812 || code == GTU
4813 || code == LTU)
4814 sign = 0;
4815 else
4816 sign = 1;
4818 /* Get a reduced range if the sign bit is zero. */
4819 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4821 mmin = 0;
4822 mmax = nonzero;
4824 else
4826 rtx mmin_rtx, mmax_rtx;
4827 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4829 mmin = INTVAL (mmin_rtx);
4830 mmax = INTVAL (mmax_rtx);
4831 if (sign)
4833 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4835 mmin >>= (sign_copies - 1);
4836 mmax >>= (sign_copies - 1);
4840 switch (code)
4842 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4843 case GEU:
4844 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4845 return const_true_rtx;
4846 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4847 return const0_rtx;
4848 break;
4849 case GE:
4850 if (val <= mmin)
4851 return const_true_rtx;
4852 if (val > mmax)
4853 return const0_rtx;
4854 break;
4856 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4857 case LEU:
4858 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4859 return const_true_rtx;
4860 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4861 return const0_rtx;
4862 break;
4863 case LE:
4864 if (val >= mmax)
4865 return const_true_rtx;
4866 if (val < mmin)
4867 return const0_rtx;
4868 break;
4870 case EQ:
4871 /* x == y is always false for y out of range. */
4872 if (val < mmin || val > mmax)
4873 return const0_rtx;
4874 break;
4876 /* x > y is always false for y >= mmax, always true for y < mmin. */
4877 case GTU:
4878 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4879 return const0_rtx;
4880 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4881 return const_true_rtx;
4882 break;
4883 case GT:
4884 if (val >= mmax)
4885 return const0_rtx;
4886 if (val < mmin)
4887 return const_true_rtx;
4888 break;
4890 /* x < y is always false for y <= mmin, always true for y > mmax. */
4891 case LTU:
4892 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4893 return const0_rtx;
4894 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4895 return const_true_rtx;
4896 break;
4897 case LT:
4898 if (val <= mmin)
4899 return const0_rtx;
4900 if (val > mmax)
4901 return const_true_rtx;
4902 break;
4904 case NE:
4905 /* x != y is always true for y out of range. */
4906 if (val < mmin || val > mmax)
4907 return const_true_rtx;
4908 break;
4910 default:
4911 break;
4915 /* Optimize integer comparisons with zero. */
4916 if (trueop1 == const0_rtx)
4918 /* Some addresses are known to be nonzero. We don't know
4919 their sign, but equality comparisons are known. */
4920 if (nonzero_address_p (trueop0))
4922 if (code == EQ || code == LEU)
4923 return const0_rtx;
4924 if (code == NE || code == GTU)
4925 return const_true_rtx;
4928 /* See if the first operand is an IOR with a constant. If so, we
4929 may be able to determine the result of this comparison. */
4930 if (GET_CODE (op0) == IOR)
4932 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4933 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4935 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
4936 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4937 && (UINTVAL (inner_const)
4938 & ((unsigned HOST_WIDE_INT) 1
4939 << sign_bitnum)));
4941 switch (code)
4943 case EQ:
4944 case LEU:
4945 return const0_rtx;
4946 case NE:
4947 case GTU:
4948 return const_true_rtx;
4949 case LT:
4950 case LE:
4951 if (has_sign)
4952 return const_true_rtx;
4953 break;
4954 case GT:
4955 case GE:
4956 if (has_sign)
4957 return const0_rtx;
4958 break;
4959 default:
4960 break;
4966 /* Optimize comparison of ABS with zero. */
4967 if (trueop1 == CONST0_RTX (mode)
4968 && (GET_CODE (trueop0) == ABS
4969 || (GET_CODE (trueop0) == FLOAT_EXTEND
4970 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4972 switch (code)
4974 case LT:
4975 /* Optimize abs(x) < 0.0. */
4976 if (!HONOR_SNANS (mode)
4977 && (!INTEGRAL_MODE_P (mode)
4978 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4980 if (INTEGRAL_MODE_P (mode)
4981 && (issue_strict_overflow_warning
4982 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4983 warning (OPT_Wstrict_overflow,
4984 ("assuming signed overflow does not occur when "
4985 "assuming abs (x) < 0 is false"));
4986 return const0_rtx;
4988 break;
4990 case GE:
4991 /* Optimize abs(x) >= 0.0. */
4992 if (!HONOR_NANS (mode)
4993 && (!INTEGRAL_MODE_P (mode)
4994 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4996 if (INTEGRAL_MODE_P (mode)
4997 && (issue_strict_overflow_warning
4998 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4999 warning (OPT_Wstrict_overflow,
5000 ("assuming signed overflow does not occur when "
5001 "assuming abs (x) >= 0 is true"));
5002 return const_true_rtx;
5004 break;
5006 case UNGE:
5007 /* Optimize ! (abs(x) < 0.0). */
5008 return const_true_rtx;
5010 default:
5011 break;
5015 return 0;
5018 /* Simplify CODE, an operation with result mode MODE and three operands,
5019 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5020 a constant. Return 0 if no simplifications is possible. */
5023 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5024 machine_mode op0_mode, rtx op0, rtx op1,
5025 rtx op2)
5027 unsigned int width = GET_MODE_PRECISION (mode);
5028 bool any_change = false;
5029 rtx tem, trueop2;
5031 /* VOIDmode means "infinite" precision. */
5032 if (width == 0)
5033 width = HOST_BITS_PER_WIDE_INT;
5035 switch (code)
5037 case FMA:
5038 /* Simplify negations around the multiplication. */
5039 /* -a * -b + c => a * b + c. */
5040 if (GET_CODE (op0) == NEG)
5042 tem = simplify_unary_operation (NEG, mode, op1, mode);
5043 if (tem)
5044 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5046 else if (GET_CODE (op1) == NEG)
5048 tem = simplify_unary_operation (NEG, mode, op0, mode);
5049 if (tem)
5050 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5053 /* Canonicalize the two multiplication operands. */
5054 /* a * -b + c => -b * a + c. */
5055 if (swap_commutative_operands_p (op0, op1))
5056 tem = op0, op0 = op1, op1 = tem, any_change = true;
5058 if (any_change)
5059 return gen_rtx_FMA (mode, op0, op1, op2);
5060 return NULL_RTX;
5062 case SIGN_EXTRACT:
5063 case ZERO_EXTRACT:
5064 if (CONST_INT_P (op0)
5065 && CONST_INT_P (op1)
5066 && CONST_INT_P (op2)
5067 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5068 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5070 /* Extracting a bit-field from a constant */
5071 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5072 HOST_WIDE_INT op1val = INTVAL (op1);
5073 HOST_WIDE_INT op2val = INTVAL (op2);
5074 if (BITS_BIG_ENDIAN)
5075 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5076 else
5077 val >>= op2val;
5079 if (HOST_BITS_PER_WIDE_INT != op1val)
5081 /* First zero-extend. */
5082 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5083 /* If desired, propagate sign bit. */
5084 if (code == SIGN_EXTRACT
5085 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5086 != 0)
5087 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5090 return gen_int_mode (val, mode);
5092 break;
5094 case IF_THEN_ELSE:
5095 if (CONST_INT_P (op0))
5096 return op0 != const0_rtx ? op1 : op2;
5098 /* Convert c ? a : a into "a". */
5099 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5100 return op1;
5102 /* Convert a != b ? a : b into "a". */
5103 if (GET_CODE (op0) == NE
5104 && ! side_effects_p (op0)
5105 && ! HONOR_NANS (mode)
5106 && ! HONOR_SIGNED_ZEROS (mode)
5107 && ((rtx_equal_p (XEXP (op0, 0), op1)
5108 && rtx_equal_p (XEXP (op0, 1), op2))
5109 || (rtx_equal_p (XEXP (op0, 0), op2)
5110 && rtx_equal_p (XEXP (op0, 1), op1))))
5111 return op1;
5113 /* Convert a == b ? a : b into "b". */
5114 if (GET_CODE (op0) == EQ
5115 && ! side_effects_p (op0)
5116 && ! HONOR_NANS (mode)
5117 && ! HONOR_SIGNED_ZEROS (mode)
5118 && ((rtx_equal_p (XEXP (op0, 0), op1)
5119 && rtx_equal_p (XEXP (op0, 1), op2))
5120 || (rtx_equal_p (XEXP (op0, 0), op2)
5121 && rtx_equal_p (XEXP (op0, 1), op1))))
5122 return op2;
5124 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5126 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5127 ? GET_MODE (XEXP (op0, 1))
5128 : GET_MODE (XEXP (op0, 0)));
5129 rtx temp;
5131 /* Look for happy constants in op1 and op2. */
5132 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5134 HOST_WIDE_INT t = INTVAL (op1);
5135 HOST_WIDE_INT f = INTVAL (op2);
5137 if (t == STORE_FLAG_VALUE && f == 0)
5138 code = GET_CODE (op0);
5139 else if (t == 0 && f == STORE_FLAG_VALUE)
5141 enum rtx_code tmp;
5142 tmp = reversed_comparison_code (op0, NULL_RTX);
5143 if (tmp == UNKNOWN)
5144 break;
5145 code = tmp;
5147 else
5148 break;
5150 return simplify_gen_relational (code, mode, cmp_mode,
5151 XEXP (op0, 0), XEXP (op0, 1));
5154 if (cmp_mode == VOIDmode)
5155 cmp_mode = op0_mode;
5156 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5157 cmp_mode, XEXP (op0, 0),
5158 XEXP (op0, 1));
5160 /* See if any simplifications were possible. */
5161 if (temp)
5163 if (CONST_INT_P (temp))
5164 return temp == const0_rtx ? op2 : op1;
5165 else if (temp)
5166 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5169 break;
5171 case VEC_MERGE:
5172 gcc_assert (GET_MODE (op0) == mode);
5173 gcc_assert (GET_MODE (op1) == mode);
5174 gcc_assert (VECTOR_MODE_P (mode));
5175 trueop2 = avoid_constant_pool_reference (op2);
5176 if (CONST_INT_P (trueop2))
5178 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5179 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5180 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5181 unsigned HOST_WIDE_INT mask;
5182 if (n_elts == HOST_BITS_PER_WIDE_INT)
5183 mask = -1;
5184 else
5185 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5187 if (!(sel & mask) && !side_effects_p (op0))
5188 return op1;
5189 if ((sel & mask) == mask && !side_effects_p (op1))
5190 return op0;
5192 rtx trueop0 = avoid_constant_pool_reference (op0);
5193 rtx trueop1 = avoid_constant_pool_reference (op1);
5194 if (GET_CODE (trueop0) == CONST_VECTOR
5195 && GET_CODE (trueop1) == CONST_VECTOR)
5197 rtvec v = rtvec_alloc (n_elts);
5198 unsigned int i;
5200 for (i = 0; i < n_elts; i++)
5201 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5202 ? CONST_VECTOR_ELT (trueop0, i)
5203 : CONST_VECTOR_ELT (trueop1, i));
5204 return gen_rtx_CONST_VECTOR (mode, v);
5207 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5208 if no element from a appears in the result. */
5209 if (GET_CODE (op0) == VEC_MERGE)
5211 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5212 if (CONST_INT_P (tem))
5214 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5215 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5216 return simplify_gen_ternary (code, mode, mode,
5217 XEXP (op0, 1), op1, op2);
5218 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5219 return simplify_gen_ternary (code, mode, mode,
5220 XEXP (op0, 0), op1, op2);
5223 if (GET_CODE (op1) == VEC_MERGE)
5225 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5226 if (CONST_INT_P (tem))
5228 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5229 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5230 return simplify_gen_ternary (code, mode, mode,
5231 op0, XEXP (op1, 1), op2);
5232 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5233 return simplify_gen_ternary (code, mode, mode,
5234 op0, XEXP (op1, 0), op2);
5239 if (rtx_equal_p (op0, op1)
5240 && !side_effects_p (op2) && !side_effects_p (op1))
5241 return op0;
5243 break;
5245 default:
5246 gcc_unreachable ();
5249 return 0;
5252 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5253 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5254 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5256 Works by unpacking OP into a collection of 8-bit values
5257 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5258 and then repacking them again for OUTERMODE. */
5260 static rtx
5261 simplify_immed_subreg (machine_mode outermode, rtx op,
5262 machine_mode innermode, unsigned int byte)
5264 enum {
5265 value_bit = 8,
5266 value_mask = (1 << value_bit) - 1
5268 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5269 int value_start;
5270 int i;
5271 int elem;
5273 int num_elem;
5274 rtx * elems;
5275 int elem_bitsize;
5276 rtx result_s;
5277 rtvec result_v = NULL;
5278 enum mode_class outer_class;
5279 machine_mode outer_submode;
5280 int max_bitsize;
5282 /* Some ports misuse CCmode. */
5283 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5284 return op;
5286 /* We have no way to represent a complex constant at the rtl level. */
5287 if (COMPLEX_MODE_P (outermode))
5288 return NULL_RTX;
5290 /* We support any size mode. */
5291 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5292 GET_MODE_BITSIZE (innermode));
5294 /* Unpack the value. */
5296 if (GET_CODE (op) == CONST_VECTOR)
5298 num_elem = CONST_VECTOR_NUNITS (op);
5299 elems = &CONST_VECTOR_ELT (op, 0);
5300 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5302 else
5304 num_elem = 1;
5305 elems = &op;
5306 elem_bitsize = max_bitsize;
5308 /* If this asserts, it is too complicated; reducing value_bit may help. */
5309 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5310 /* I don't know how to handle endianness of sub-units. */
5311 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5313 for (elem = 0; elem < num_elem; elem++)
5315 unsigned char * vp;
5316 rtx el = elems[elem];
5318 /* Vectors are kept in target memory order. (This is probably
5319 a mistake.) */
5321 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5322 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5323 / BITS_PER_UNIT);
5324 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5325 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5326 unsigned bytele = (subword_byte % UNITS_PER_WORD
5327 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5328 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5331 switch (GET_CODE (el))
5333 case CONST_INT:
5334 for (i = 0;
5335 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5336 i += value_bit)
5337 *vp++ = INTVAL (el) >> i;
5338 /* CONST_INTs are always logically sign-extended. */
5339 for (; i < elem_bitsize; i += value_bit)
5340 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5341 break;
5343 case CONST_WIDE_INT:
5345 rtx_mode_t val = std::make_pair (el, innermode);
5346 unsigned char extend = wi::sign_mask (val);
5348 for (i = 0; i < elem_bitsize; i += value_bit)
5349 *vp++ = wi::extract_uhwi (val, i, value_bit);
5350 for (; i < elem_bitsize; i += value_bit)
5351 *vp++ = extend;
5353 break;
5355 case CONST_DOUBLE:
5356 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5358 unsigned char extend = 0;
5359 /* If this triggers, someone should have generated a
5360 CONST_INT instead. */
5361 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5363 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5364 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5365 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5367 *vp++
5368 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5369 i += value_bit;
5372 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5373 extend = -1;
5374 for (; i < elem_bitsize; i += value_bit)
5375 *vp++ = extend;
5377 else
5379 /* This is big enough for anything on the platform. */
5380 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5381 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5383 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5384 gcc_assert (bitsize <= elem_bitsize);
5385 gcc_assert (bitsize % value_bit == 0);
5387 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5388 GET_MODE (el));
5390 /* real_to_target produces its result in words affected by
5391 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5392 and use WORDS_BIG_ENDIAN instead; see the documentation
5393 of SUBREG in rtl.texi. */
5394 for (i = 0; i < bitsize; i += value_bit)
5396 int ibase;
5397 if (WORDS_BIG_ENDIAN)
5398 ibase = bitsize - 1 - i;
5399 else
5400 ibase = i;
5401 *vp++ = tmp[ibase / 32] >> i % 32;
5404 /* It shouldn't matter what's done here, so fill it with
5405 zero. */
5406 for (; i < elem_bitsize; i += value_bit)
5407 *vp++ = 0;
5409 break;
5411 case CONST_FIXED:
5412 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5414 for (i = 0; i < elem_bitsize; i += value_bit)
5415 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5417 else
5419 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5420 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5421 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5422 i += value_bit)
5423 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5424 >> (i - HOST_BITS_PER_WIDE_INT);
5425 for (; i < elem_bitsize; i += value_bit)
5426 *vp++ = 0;
5428 break;
5430 default:
5431 gcc_unreachable ();
5435 /* Now, pick the right byte to start with. */
5436 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5437 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5438 will already have offset 0. */
5439 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5441 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5442 - byte);
5443 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5444 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5445 byte = (subword_byte % UNITS_PER_WORD
5446 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5449 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5450 so if it's become negative it will instead be very large.) */
5451 gcc_assert (byte < GET_MODE_SIZE (innermode));
5453 /* Convert from bytes to chunks of size value_bit. */
5454 value_start = byte * (BITS_PER_UNIT / value_bit);
5456 /* Re-pack the value. */
5458 if (VECTOR_MODE_P (outermode))
5460 num_elem = GET_MODE_NUNITS (outermode);
5461 result_v = rtvec_alloc (num_elem);
5462 elems = &RTVEC_ELT (result_v, 0);
5463 outer_submode = GET_MODE_INNER (outermode);
5465 else
5467 num_elem = 1;
5468 elems = &result_s;
5469 outer_submode = outermode;
5472 outer_class = GET_MODE_CLASS (outer_submode);
5473 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5475 gcc_assert (elem_bitsize % value_bit == 0);
5476 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5478 for (elem = 0; elem < num_elem; elem++)
5480 unsigned char *vp;
5482 /* Vectors are stored in target memory order. (This is probably
5483 a mistake.) */
5485 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5486 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5487 / BITS_PER_UNIT);
5488 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5489 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5490 unsigned bytele = (subword_byte % UNITS_PER_WORD
5491 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5492 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5495 switch (outer_class)
5497 case MODE_INT:
5498 case MODE_PARTIAL_INT:
5500 int u;
5501 int base = 0;
5502 int units
5503 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5504 / HOST_BITS_PER_WIDE_INT;
5505 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5506 wide_int r;
5508 for (u = 0; u < units; u++)
5510 unsigned HOST_WIDE_INT buf = 0;
5511 for (i = 0;
5512 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5513 i += value_bit)
5514 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5516 tmp[u] = buf;
5517 base += HOST_BITS_PER_WIDE_INT;
5519 gcc_assert (GET_MODE_PRECISION (outer_submode)
5520 <= MAX_BITSIZE_MODE_ANY_INT);
5521 r = wide_int::from_array (tmp, units,
5522 GET_MODE_PRECISION (outer_submode));
5523 elems[elem] = immed_wide_int_const (r, outer_submode);
5525 break;
5527 case MODE_FLOAT:
5528 case MODE_DECIMAL_FLOAT:
5530 REAL_VALUE_TYPE r;
5531 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5533 /* real_from_target wants its input in words affected by
5534 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5535 and use WORDS_BIG_ENDIAN instead; see the documentation
5536 of SUBREG in rtl.texi. */
5537 for (i = 0; i < max_bitsize / 32; i++)
5538 tmp[i] = 0;
5539 for (i = 0; i < elem_bitsize; i += value_bit)
5541 int ibase;
5542 if (WORDS_BIG_ENDIAN)
5543 ibase = elem_bitsize - 1 - i;
5544 else
5545 ibase = i;
5546 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5549 real_from_target (&r, tmp, outer_submode);
5550 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5552 break;
5554 case MODE_FRACT:
5555 case MODE_UFRACT:
5556 case MODE_ACCUM:
5557 case MODE_UACCUM:
5559 FIXED_VALUE_TYPE f;
5560 f.data.low = 0;
5561 f.data.high = 0;
5562 f.mode = outer_submode;
5564 for (i = 0;
5565 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5566 i += value_bit)
5567 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5568 for (; i < elem_bitsize; i += value_bit)
5569 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5570 << (i - HOST_BITS_PER_WIDE_INT));
5572 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5574 break;
5576 default:
5577 gcc_unreachable ();
5580 if (VECTOR_MODE_P (outermode))
5581 return gen_rtx_CONST_VECTOR (outermode, result_v);
5582 else
5583 return result_s;
5586 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5587 Return 0 if no simplifications are possible. */
5589 simplify_subreg (machine_mode outermode, rtx op,
5590 machine_mode innermode, unsigned int byte)
5592 /* Little bit of sanity checking. */
5593 gcc_assert (innermode != VOIDmode);
5594 gcc_assert (outermode != VOIDmode);
5595 gcc_assert (innermode != BLKmode);
5596 gcc_assert (outermode != BLKmode);
5598 gcc_assert (GET_MODE (op) == innermode
5599 || GET_MODE (op) == VOIDmode);
5601 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5602 return NULL_RTX;
5604 if (byte >= GET_MODE_SIZE (innermode))
5605 return NULL_RTX;
5607 if (outermode == innermode && !byte)
5608 return op;
5610 if (CONST_SCALAR_INT_P (op)
5611 || CONST_DOUBLE_AS_FLOAT_P (op)
5612 || GET_CODE (op) == CONST_FIXED
5613 || GET_CODE (op) == CONST_VECTOR)
5614 return simplify_immed_subreg (outermode, op, innermode, byte);
5616 /* Changing mode twice with SUBREG => just change it once,
5617 or not at all if changing back op starting mode. */
5618 if (GET_CODE (op) == SUBREG)
5620 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5621 int final_offset = byte + SUBREG_BYTE (op);
5622 rtx newx;
5624 if (outermode == innermostmode
5625 && byte == 0 && SUBREG_BYTE (op) == 0)
5626 return SUBREG_REG (op);
5628 /* The SUBREG_BYTE represents offset, as if the value were stored
5629 in memory. Irritating exception is paradoxical subreg, where
5630 we define SUBREG_BYTE to be 0. On big endian machines, this
5631 value should be negative. For a moment, undo this exception. */
5632 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5634 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5635 if (WORDS_BIG_ENDIAN)
5636 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5637 if (BYTES_BIG_ENDIAN)
5638 final_offset += difference % UNITS_PER_WORD;
5640 if (SUBREG_BYTE (op) == 0
5641 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5643 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5644 if (WORDS_BIG_ENDIAN)
5645 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5646 if (BYTES_BIG_ENDIAN)
5647 final_offset += difference % UNITS_PER_WORD;
5650 /* See whether resulting subreg will be paradoxical. */
5651 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5653 /* In nonparadoxical subregs we can't handle negative offsets. */
5654 if (final_offset < 0)
5655 return NULL_RTX;
5656 /* Bail out in case resulting subreg would be incorrect. */
5657 if (final_offset % GET_MODE_SIZE (outermode)
5658 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5659 return NULL_RTX;
5661 else
5663 int offset = 0;
5664 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5666 /* In paradoxical subreg, see if we are still looking on lower part.
5667 If so, our SUBREG_BYTE will be 0. */
5668 if (WORDS_BIG_ENDIAN)
5669 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5670 if (BYTES_BIG_ENDIAN)
5671 offset += difference % UNITS_PER_WORD;
5672 if (offset == final_offset)
5673 final_offset = 0;
5674 else
5675 return NULL_RTX;
5678 /* Recurse for further possible simplifications. */
5679 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5680 final_offset);
5681 if (newx)
5682 return newx;
5683 if (validate_subreg (outermode, innermostmode,
5684 SUBREG_REG (op), final_offset))
5686 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5687 if (SUBREG_PROMOTED_VAR_P (op)
5688 && SUBREG_PROMOTED_SIGN (op) >= 0
5689 && GET_MODE_CLASS (outermode) == MODE_INT
5690 && IN_RANGE (GET_MODE_SIZE (outermode),
5691 GET_MODE_SIZE (innermode),
5692 GET_MODE_SIZE (innermostmode))
5693 && subreg_lowpart_p (newx))
5695 SUBREG_PROMOTED_VAR_P (newx) = 1;
5696 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5698 return newx;
5700 return NULL_RTX;
5703 /* SUBREG of a hard register => just change the register number
5704 and/or mode. If the hard register is not valid in that mode,
5705 suppress this simplification. If the hard register is the stack,
5706 frame, or argument pointer, leave this as a SUBREG. */
5708 if (REG_P (op) && HARD_REGISTER_P (op))
5710 unsigned int regno, final_regno;
5712 regno = REGNO (op);
5713 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5714 if (HARD_REGISTER_NUM_P (final_regno))
5716 rtx x;
5717 int final_offset = byte;
5719 /* Adjust offset for paradoxical subregs. */
5720 if (byte == 0
5721 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5723 int difference = (GET_MODE_SIZE (innermode)
5724 - GET_MODE_SIZE (outermode));
5725 if (WORDS_BIG_ENDIAN)
5726 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5727 if (BYTES_BIG_ENDIAN)
5728 final_offset += difference % UNITS_PER_WORD;
5731 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5733 /* Propagate original regno. We don't have any way to specify
5734 the offset inside original regno, so do so only for lowpart.
5735 The information is used only by alias analysis that can not
5736 grog partial register anyway. */
5738 if (subreg_lowpart_offset (outermode, innermode) == byte)
5739 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5740 return x;
5744 /* If we have a SUBREG of a register that we are replacing and we are
5745 replacing it with a MEM, make a new MEM and try replacing the
5746 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5747 or if we would be widening it. */
5749 if (MEM_P (op)
5750 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5751 /* Allow splitting of volatile memory references in case we don't
5752 have instruction to move the whole thing. */
5753 && (! MEM_VOLATILE_P (op)
5754 || ! have_insn_for (SET, innermode))
5755 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5756 return adjust_address_nv (op, outermode, byte);
5758 /* Handle complex values represented as CONCAT
5759 of real and imaginary part. */
5760 if (GET_CODE (op) == CONCAT)
5762 unsigned int part_size, final_offset;
5763 rtx part, res;
5765 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5766 if (byte < part_size)
5768 part = XEXP (op, 0);
5769 final_offset = byte;
5771 else
5773 part = XEXP (op, 1);
5774 final_offset = byte - part_size;
5777 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5778 return NULL_RTX;
5780 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5781 if (res)
5782 return res;
5783 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5784 return gen_rtx_SUBREG (outermode, part, final_offset);
5785 return NULL_RTX;
5788 /* A SUBREG resulting from a zero extension may fold to zero if
5789 it extracts higher bits that the ZERO_EXTEND's source bits. */
5790 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5792 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5793 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5794 return CONST0_RTX (outermode);
5797 if (SCALAR_INT_MODE_P (outermode)
5798 && SCALAR_INT_MODE_P (innermode)
5799 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5800 && byte == subreg_lowpart_offset (outermode, innermode))
5802 rtx tem = simplify_truncation (outermode, op, innermode);
5803 if (tem)
5804 return tem;
5807 return NULL_RTX;
5810 /* Make a SUBREG operation or equivalent if it folds. */
5813 simplify_gen_subreg (machine_mode outermode, rtx op,
5814 machine_mode innermode, unsigned int byte)
5816 rtx newx;
5818 newx = simplify_subreg (outermode, op, innermode, byte);
5819 if (newx)
5820 return newx;
5822 if (GET_CODE (op) == SUBREG
5823 || GET_CODE (op) == CONCAT
5824 || GET_MODE (op) == VOIDmode)
5825 return NULL_RTX;
5827 if (validate_subreg (outermode, innermode, op, byte))
5828 return gen_rtx_SUBREG (outermode, op, byte);
5830 return NULL_RTX;
5833 /* Simplify X, an rtx expression.
5835 Return the simplified expression or NULL if no simplifications
5836 were possible.
5838 This is the preferred entry point into the simplification routines;
5839 however, we still allow passes to call the more specific routines.
5841 Right now GCC has three (yes, three) major bodies of RTL simplification
5842 code that need to be unified.
5844 1. fold_rtx in cse.c. This code uses various CSE specific
5845 information to aid in RTL simplification.
5847 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5848 it uses combine specific information to aid in RTL
5849 simplification.
5851 3. The routines in this file.
5854 Long term we want to only have one body of simplification code; to
5855 get to that state I recommend the following steps:
5857 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5858 which are not pass dependent state into these routines.
5860 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5861 use this routine whenever possible.
5863 3. Allow for pass dependent state to be provided to these
5864 routines and add simplifications based on the pass dependent
5865 state. Remove code from cse.c & combine.c that becomes
5866 redundant/dead.
5868 It will take time, but ultimately the compiler will be easier to
5869 maintain and improve. It's totally silly that when we add a
5870 simplification that it needs to be added to 4 places (3 for RTL
5871 simplification and 1 for tree simplification. */
5874 simplify_rtx (const_rtx x)
5876 const enum rtx_code code = GET_CODE (x);
5877 const machine_mode mode = GET_MODE (x);
5879 switch (GET_RTX_CLASS (code))
5881 case RTX_UNARY:
5882 return simplify_unary_operation (code, mode,
5883 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5884 case RTX_COMM_ARITH:
5885 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5886 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5888 /* Fall through.... */
5890 case RTX_BIN_ARITH:
5891 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5893 case RTX_TERNARY:
5894 case RTX_BITFIELD_OPS:
5895 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5896 XEXP (x, 0), XEXP (x, 1),
5897 XEXP (x, 2));
5899 case RTX_COMPARE:
5900 case RTX_COMM_COMPARE:
5901 return simplify_relational_operation (code, mode,
5902 ((GET_MODE (XEXP (x, 0))
5903 != VOIDmode)
5904 ? GET_MODE (XEXP (x, 0))
5905 : GET_MODE (XEXP (x, 1))),
5906 XEXP (x, 0),
5907 XEXP (x, 1));
5909 case RTX_EXTRA:
5910 if (code == SUBREG)
5911 return simplify_subreg (mode, SUBREG_REG (x),
5912 GET_MODE (SUBREG_REG (x)),
5913 SUBREG_BYTE (x));
5914 break;
5916 case RTX_OBJ:
5917 if (code == LO_SUM)
5919 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5920 if (GET_CODE (XEXP (x, 0)) == HIGH
5921 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5922 return XEXP (x, 1);
5924 break;
5926 default:
5927 break;
5929 return NULL;