libiberty/ChangeLog:
[official-gcc.git] / gcc / simplify-rtx.c
blob277288a512c593b76ab9d67f91661cc209ddc7cc
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "varasm.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "flags.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "hashtab.h"
35 #include "hash-set.h"
36 #include "vec.h"
37 #include "machmode.h"
38 #include "input.h"
39 #include "function.h"
40 #include "insn-codes.h"
41 #include "optabs.h"
42 #include "expr.h"
43 #include "diagnostic-core.h"
44 #include "ggc.h"
45 #include "target.h"
46 #include "predict.h"
48 /* Simplification and canonicalization of RTL. */
50 /* Much code operates on (low, high) pairs; the low value is an
51 unsigned wide int, the high value a signed wide int. We
52 occasionally need to sign extend from low to high as if low were a
53 signed wide int. */
54 #define HWI_SIGN_EXTEND(low) \
55 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
57 static rtx neg_const_int (machine_mode, const_rtx);
58 static bool plus_minus_operand_p (const_rtx);
59 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
60 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
61 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
62 unsigned int);
63 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
64 rtx, rtx);
65 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
66 machine_mode, rtx, rtx);
67 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
68 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
69 rtx, rtx, rtx, rtx);
71 /* Negate a CONST_INT rtx, truncating (because a conversion from a
72 maximally negative number can overflow). */
73 static rtx
74 neg_const_int (machine_mode mode, const_rtx i)
76 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
79 /* Test whether expression, X, is an immediate constant that represents
80 the most significant bit of machine mode MODE. */
82 bool
83 mode_signbit_p (machine_mode mode, const_rtx x)
85 unsigned HOST_WIDE_INT val;
86 unsigned int width;
88 if (GET_MODE_CLASS (mode) != MODE_INT)
89 return false;
91 width = GET_MODE_PRECISION (mode);
92 if (width == 0)
93 return false;
95 if (width <= HOST_BITS_PER_WIDE_INT
96 && CONST_INT_P (x))
97 val = INTVAL (x);
98 #if TARGET_SUPPORTS_WIDE_INT
99 else if (CONST_WIDE_INT_P (x))
101 unsigned int i;
102 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
103 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
104 return false;
105 for (i = 0; i < elts - 1; i++)
106 if (CONST_WIDE_INT_ELT (x, i) != 0)
107 return false;
108 val = CONST_WIDE_INT_ELT (x, elts - 1);
109 width %= HOST_BITS_PER_WIDE_INT;
110 if (width == 0)
111 width = HOST_BITS_PER_WIDE_INT;
113 #else
114 else if (width <= HOST_BITS_PER_DOUBLE_INT
115 && CONST_DOUBLE_AS_INT_P (x)
116 && CONST_DOUBLE_LOW (x) == 0)
118 val = CONST_DOUBLE_HIGH (x);
119 width -= HOST_BITS_PER_WIDE_INT;
121 #endif
122 else
123 /* X is not an integer constant. */
124 return false;
126 if (width < HOST_BITS_PER_WIDE_INT)
127 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
128 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
131 /* Test whether VAL is equal to the most significant bit of mode MODE
132 (after masking with the mode mask of MODE). Returns false if the
133 precision of MODE is too large to handle. */
135 bool
136 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
138 unsigned int width;
140 if (GET_MODE_CLASS (mode) != MODE_INT)
141 return false;
143 width = GET_MODE_PRECISION (mode);
144 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
145 return false;
147 val &= GET_MODE_MASK (mode);
148 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
151 /* Test whether the most significant bit of mode MODE is set in VAL.
152 Returns false if the precision of MODE is too large to handle. */
153 bool
154 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
156 unsigned int width;
158 if (GET_MODE_CLASS (mode) != MODE_INT)
159 return false;
161 width = GET_MODE_PRECISION (mode);
162 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
163 return false;
165 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
166 return val != 0;
169 /* Test whether the most significant bit of mode MODE is clear in VAL.
170 Returns false if the precision of MODE is too large to handle. */
171 bool
172 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
174 unsigned int width;
176 if (GET_MODE_CLASS (mode) != MODE_INT)
177 return false;
179 width = GET_MODE_PRECISION (mode);
180 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
181 return false;
183 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
184 return val == 0;
187 /* Make a binary operation by properly ordering the operands and
188 seeing if the expression folds. */
191 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
192 rtx op1)
194 rtx tem;
196 /* If this simplifies, do it. */
197 tem = simplify_binary_operation (code, mode, op0, op1);
198 if (tem)
199 return tem;
201 /* Put complex operands first and constants second if commutative. */
202 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
203 && swap_commutative_operands_p (op0, op1))
204 tem = op0, op0 = op1, op1 = tem;
206 return gen_rtx_fmt_ee (code, mode, op0, op1);
209 /* If X is a MEM referencing the constant pool, return the real value.
210 Otherwise return X. */
212 avoid_constant_pool_reference (rtx x)
214 rtx c, tmp, addr;
215 machine_mode cmode;
216 HOST_WIDE_INT offset = 0;
218 switch (GET_CODE (x))
220 case MEM:
221 break;
223 case FLOAT_EXTEND:
224 /* Handle float extensions of constant pool references. */
225 tmp = XEXP (x, 0);
226 c = avoid_constant_pool_reference (tmp);
227 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
229 REAL_VALUE_TYPE d;
231 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
232 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
234 return x;
236 default:
237 return x;
240 if (GET_MODE (x) == BLKmode)
241 return x;
243 addr = XEXP (x, 0);
245 /* Call target hook to avoid the effects of -fpic etc.... */
246 addr = targetm.delegitimize_address (addr);
248 /* Split the address into a base and integer offset. */
249 if (GET_CODE (addr) == CONST
250 && GET_CODE (XEXP (addr, 0)) == PLUS
251 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
253 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
254 addr = XEXP (XEXP (addr, 0), 0);
257 if (GET_CODE (addr) == LO_SUM)
258 addr = XEXP (addr, 1);
260 /* If this is a constant pool reference, we can turn it into its
261 constant and hope that simplifications happen. */
262 if (GET_CODE (addr) == SYMBOL_REF
263 && CONSTANT_POOL_ADDRESS_P (addr))
265 c = get_pool_constant (addr);
266 cmode = get_pool_mode (addr);
268 /* If we're accessing the constant in a different mode than it was
269 originally stored, attempt to fix that up via subreg simplifications.
270 If that fails we have no choice but to return the original memory. */
271 if ((offset != 0 || cmode != GET_MODE (x))
272 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
274 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
275 if (tem && CONSTANT_P (tem))
276 return tem;
278 else
279 return c;
282 return x;
285 /* Simplify a MEM based on its attributes. This is the default
286 delegitimize_address target hook, and it's recommended that every
287 overrider call it. */
290 delegitimize_mem_from_attrs (rtx x)
292 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
293 use their base addresses as equivalent. */
294 if (MEM_P (x)
295 && MEM_EXPR (x)
296 && MEM_OFFSET_KNOWN_P (x))
298 tree decl = MEM_EXPR (x);
299 machine_mode mode = GET_MODE (x);
300 HOST_WIDE_INT offset = 0;
302 switch (TREE_CODE (decl))
304 default:
305 decl = NULL;
306 break;
308 case VAR_DECL:
309 break;
311 case ARRAY_REF:
312 case ARRAY_RANGE_REF:
313 case COMPONENT_REF:
314 case BIT_FIELD_REF:
315 case REALPART_EXPR:
316 case IMAGPART_EXPR:
317 case VIEW_CONVERT_EXPR:
319 HOST_WIDE_INT bitsize, bitpos;
320 tree toffset;
321 int unsignedp, volatilep = 0;
323 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
324 &mode, &unsignedp, &volatilep, false);
325 if (bitsize != GET_MODE_BITSIZE (mode)
326 || (bitpos % BITS_PER_UNIT)
327 || (toffset && !tree_fits_shwi_p (toffset)))
328 decl = NULL;
329 else
331 offset += bitpos / BITS_PER_UNIT;
332 if (toffset)
333 offset += tree_to_shwi (toffset);
335 break;
339 if (decl
340 && mode == GET_MODE (x)
341 && TREE_CODE (decl) == VAR_DECL
342 && (TREE_STATIC (decl)
343 || DECL_THREAD_LOCAL_P (decl))
344 && DECL_RTL_SET_P (decl)
345 && MEM_P (DECL_RTL (decl)))
347 rtx newx;
349 offset += MEM_OFFSET (x);
351 newx = DECL_RTL (decl);
353 if (MEM_P (newx))
355 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
357 /* Avoid creating a new MEM needlessly if we already had
358 the same address. We do if there's no OFFSET and the
359 old address X is identical to NEWX, or if X is of the
360 form (plus NEWX OFFSET), or the NEWX is of the form
361 (plus Y (const_int Z)) and X is that with the offset
362 added: (plus Y (const_int Z+OFFSET)). */
363 if (!((offset == 0
364 || (GET_CODE (o) == PLUS
365 && GET_CODE (XEXP (o, 1)) == CONST_INT
366 && (offset == INTVAL (XEXP (o, 1))
367 || (GET_CODE (n) == PLUS
368 && GET_CODE (XEXP (n, 1)) == CONST_INT
369 && (INTVAL (XEXP (n, 1)) + offset
370 == INTVAL (XEXP (o, 1)))
371 && (n = XEXP (n, 0))))
372 && (o = XEXP (o, 0))))
373 && rtx_equal_p (o, n)))
374 x = adjust_address_nv (newx, mode, offset);
376 else if (GET_MODE (x) == GET_MODE (newx)
377 && offset == 0)
378 x = newx;
382 return x;
385 /* Make a unary operation by first seeing if it folds and otherwise making
386 the specified operation. */
389 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
390 machine_mode op_mode)
392 rtx tem;
394 /* If this simplifies, use it. */
395 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
396 return tem;
398 return gen_rtx_fmt_e (code, mode, op);
401 /* Likewise for ternary operations. */
404 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
405 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
407 rtx tem;
409 /* If this simplifies, use it. */
410 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
411 op0, op1, op2)))
412 return tem;
414 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
417 /* Likewise, for relational operations.
418 CMP_MODE specifies mode comparison is done in. */
421 simplify_gen_relational (enum rtx_code code, machine_mode mode,
422 machine_mode cmp_mode, rtx op0, rtx op1)
424 rtx tem;
426 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
427 op0, op1)))
428 return tem;
430 return gen_rtx_fmt_ee (code, mode, op0, op1);
433 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
434 and simplify the result. If FN is non-NULL, call this callback on each
435 X, if it returns non-NULL, replace X with its return value and simplify the
436 result. */
439 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
440 rtx (*fn) (rtx, const_rtx, void *), void *data)
442 enum rtx_code code = GET_CODE (x);
443 machine_mode mode = GET_MODE (x);
444 machine_mode op_mode;
445 const char *fmt;
446 rtx op0, op1, op2, newx, op;
447 rtvec vec, newvec;
448 int i, j;
450 if (__builtin_expect (fn != NULL, 0))
452 newx = fn (x, old_rtx, data);
453 if (newx)
454 return newx;
456 else if (rtx_equal_p (x, old_rtx))
457 return copy_rtx ((rtx) data);
459 switch (GET_RTX_CLASS (code))
461 case RTX_UNARY:
462 op0 = XEXP (x, 0);
463 op_mode = GET_MODE (op0);
464 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
465 if (op0 == XEXP (x, 0))
466 return x;
467 return simplify_gen_unary (code, mode, op0, op_mode);
469 case RTX_BIN_ARITH:
470 case RTX_COMM_ARITH:
471 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
472 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
473 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
474 return x;
475 return simplify_gen_binary (code, mode, op0, op1);
477 case RTX_COMPARE:
478 case RTX_COMM_COMPARE:
479 op0 = XEXP (x, 0);
480 op1 = XEXP (x, 1);
481 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
482 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
483 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
484 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
485 return x;
486 return simplify_gen_relational (code, mode, op_mode, op0, op1);
488 case RTX_TERNARY:
489 case RTX_BITFIELD_OPS:
490 op0 = XEXP (x, 0);
491 op_mode = GET_MODE (op0);
492 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
493 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
494 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
495 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
496 return x;
497 if (op_mode == VOIDmode)
498 op_mode = GET_MODE (op0);
499 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
501 case RTX_EXTRA:
502 if (code == SUBREG)
504 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
505 if (op0 == SUBREG_REG (x))
506 return x;
507 op0 = simplify_gen_subreg (GET_MODE (x), op0,
508 GET_MODE (SUBREG_REG (x)),
509 SUBREG_BYTE (x));
510 return op0 ? op0 : x;
512 break;
514 case RTX_OBJ:
515 if (code == MEM)
517 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
518 if (op0 == XEXP (x, 0))
519 return x;
520 return replace_equiv_address_nv (x, op0);
522 else if (code == LO_SUM)
524 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
525 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
527 /* (lo_sum (high x) x) -> x */
528 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
529 return op1;
531 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
532 return x;
533 return gen_rtx_LO_SUM (mode, op0, op1);
535 break;
537 default:
538 break;
541 newx = x;
542 fmt = GET_RTX_FORMAT (code);
543 for (i = 0; fmt[i]; i++)
544 switch (fmt[i])
546 case 'E':
547 vec = XVEC (x, i);
548 newvec = XVEC (newx, i);
549 for (j = 0; j < GET_NUM_ELEM (vec); j++)
551 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
552 old_rtx, fn, data);
553 if (op != RTVEC_ELT (vec, j))
555 if (newvec == vec)
557 newvec = shallow_copy_rtvec (vec);
558 if (x == newx)
559 newx = shallow_copy_rtx (x);
560 XVEC (newx, i) = newvec;
562 RTVEC_ELT (newvec, j) = op;
565 break;
567 case 'e':
568 if (XEXP (x, i))
570 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
571 if (op != XEXP (x, i))
573 if (x == newx)
574 newx = shallow_copy_rtx (x);
575 XEXP (newx, i) = op;
578 break;
580 return newx;
583 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
584 resulting RTX. Return a new RTX which is as simplified as possible. */
587 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
589 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
592 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
593 Only handle cases where the truncated value is inherently an rvalue.
595 RTL provides two ways of truncating a value:
597 1. a lowpart subreg. This form is only a truncation when both
598 the outer and inner modes (here MODE and OP_MODE respectively)
599 are scalar integers, and only then when the subreg is used as
600 an rvalue.
602 It is only valid to form such truncating subregs if the
603 truncation requires no action by the target. The onus for
604 proving this is on the creator of the subreg -- e.g. the
605 caller to simplify_subreg or simplify_gen_subreg -- and typically
606 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
608 2. a TRUNCATE. This form handles both scalar and compound integers.
610 The first form is preferred where valid. However, the TRUNCATE
611 handling in simplify_unary_operation turns the second form into the
612 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
613 so it is generally safe to form rvalue truncations using:
615 simplify_gen_unary (TRUNCATE, ...)
617 and leave simplify_unary_operation to work out which representation
618 should be used.
620 Because of the proof requirements on (1), simplify_truncation must
621 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
622 regardless of whether the outer truncation came from a SUBREG or a
623 TRUNCATE. For example, if the caller has proven that an SImode
624 truncation of:
626 (and:DI X Y)
628 is a no-op and can be represented as a subreg, it does not follow
629 that SImode truncations of X and Y are also no-ops. On a target
630 like 64-bit MIPS that requires SImode values to be stored in
631 sign-extended form, an SImode truncation of:
633 (and:DI (reg:DI X) (const_int 63))
635 is trivially a no-op because only the lower 6 bits can be set.
636 However, X is still an arbitrary 64-bit number and so we cannot
637 assume that truncating it too is a no-op. */
639 static rtx
640 simplify_truncation (machine_mode mode, rtx op,
641 machine_mode op_mode)
643 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
644 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
645 gcc_assert (precision <= op_precision);
647 /* Optimize truncations of zero and sign extended values. */
648 if (GET_CODE (op) == ZERO_EXTEND
649 || GET_CODE (op) == SIGN_EXTEND)
651 /* There are three possibilities. If MODE is the same as the
652 origmode, we can omit both the extension and the subreg.
653 If MODE is not larger than the origmode, we can apply the
654 truncation without the extension. Finally, if the outermode
655 is larger than the origmode, we can just extend to the appropriate
656 mode. */
657 machine_mode origmode = GET_MODE (XEXP (op, 0));
658 if (mode == origmode)
659 return XEXP (op, 0);
660 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
661 return simplify_gen_unary (TRUNCATE, mode,
662 XEXP (op, 0), origmode);
663 else
664 return simplify_gen_unary (GET_CODE (op), mode,
665 XEXP (op, 0), origmode);
668 /* If the machine can perform operations in the truncated mode, distribute
669 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
670 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
671 if (1
672 #ifdef WORD_REGISTER_OPERATIONS
673 && precision >= BITS_PER_WORD
674 #endif
675 && (GET_CODE (op) == PLUS
676 || GET_CODE (op) == MINUS
677 || GET_CODE (op) == MULT))
679 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
680 if (op0)
682 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
683 if (op1)
684 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
688 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
689 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
690 the outer subreg is effectively a truncation to the original mode. */
691 if ((GET_CODE (op) == LSHIFTRT
692 || GET_CODE (op) == ASHIFTRT)
693 /* Ensure that OP_MODE is at least twice as wide as MODE
694 to avoid the possibility that an outer LSHIFTRT shifts by more
695 than the sign extension's sign_bit_copies and introduces zeros
696 into the high bits of the result. */
697 && 2 * precision <= op_precision
698 && CONST_INT_P (XEXP (op, 1))
699 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
700 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
701 && UINTVAL (XEXP (op, 1)) < precision)
702 return simplify_gen_binary (ASHIFTRT, mode,
703 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
705 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
706 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
707 the outer subreg is effectively a truncation to the original mode. */
708 if ((GET_CODE (op) == LSHIFTRT
709 || GET_CODE (op) == ASHIFTRT)
710 && CONST_INT_P (XEXP (op, 1))
711 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
712 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
713 && UINTVAL (XEXP (op, 1)) < precision)
714 return simplify_gen_binary (LSHIFTRT, mode,
715 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
717 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
718 to (ashift:QI (x:QI) C), where C is a suitable small constant and
719 the outer subreg is effectively a truncation to the original mode. */
720 if (GET_CODE (op) == ASHIFT
721 && CONST_INT_P (XEXP (op, 1))
722 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
723 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
724 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
725 && UINTVAL (XEXP (op, 1)) < precision)
726 return simplify_gen_binary (ASHIFT, mode,
727 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
729 /* Recognize a word extraction from a multi-word subreg. */
730 if ((GET_CODE (op) == LSHIFTRT
731 || GET_CODE (op) == ASHIFTRT)
732 && SCALAR_INT_MODE_P (mode)
733 && SCALAR_INT_MODE_P (op_mode)
734 && precision >= BITS_PER_WORD
735 && 2 * precision <= op_precision
736 && CONST_INT_P (XEXP (op, 1))
737 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
738 && UINTVAL (XEXP (op, 1)) < op_precision)
740 int byte = subreg_lowpart_offset (mode, op_mode);
741 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
742 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
743 (WORDS_BIG_ENDIAN
744 ? byte - shifted_bytes
745 : byte + shifted_bytes));
748 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
749 and try replacing the TRUNCATE and shift with it. Don't do this
750 if the MEM has a mode-dependent address. */
751 if ((GET_CODE (op) == LSHIFTRT
752 || GET_CODE (op) == ASHIFTRT)
753 && SCALAR_INT_MODE_P (op_mode)
754 && MEM_P (XEXP (op, 0))
755 && CONST_INT_P (XEXP (op, 1))
756 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
757 && INTVAL (XEXP (op, 1)) > 0
758 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
759 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
760 MEM_ADDR_SPACE (XEXP (op, 0)))
761 && ! MEM_VOLATILE_P (XEXP (op, 0))
762 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
763 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
765 int byte = subreg_lowpart_offset (mode, op_mode);
766 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
767 return adjust_address_nv (XEXP (op, 0), mode,
768 (WORDS_BIG_ENDIAN
769 ? byte - shifted_bytes
770 : byte + shifted_bytes));
773 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
774 (OP:SI foo:SI) if OP is NEG or ABS. */
775 if ((GET_CODE (op) == ABS
776 || GET_CODE (op) == NEG)
777 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
778 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
779 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
780 return simplify_gen_unary (GET_CODE (op), mode,
781 XEXP (XEXP (op, 0), 0), mode);
783 /* (truncate:A (subreg:B (truncate:C X) 0)) is
784 (truncate:A X). */
785 if (GET_CODE (op) == SUBREG
786 && SCALAR_INT_MODE_P (mode)
787 && SCALAR_INT_MODE_P (op_mode)
788 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
789 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
790 && subreg_lowpart_p (op))
792 rtx inner = XEXP (SUBREG_REG (op), 0);
793 if (GET_MODE_PRECISION (mode)
794 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
795 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
796 else
797 /* If subreg above is paradoxical and C is narrower
798 than A, return (subreg:A (truncate:C X) 0). */
799 return simplify_gen_subreg (mode, SUBREG_REG (op),
800 GET_MODE (SUBREG_REG (op)), 0);
803 /* (truncate:A (truncate:B X)) is (truncate:A X). */
804 if (GET_CODE (op) == TRUNCATE)
805 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
806 GET_MODE (XEXP (op, 0)));
808 return NULL_RTX;
811 /* Try to simplify a unary operation CODE whose output mode is to be
812 MODE with input operand OP whose mode was originally OP_MODE.
813 Return zero if no simplification can be made. */
815 simplify_unary_operation (enum rtx_code code, machine_mode mode,
816 rtx op, machine_mode op_mode)
818 rtx trueop, tem;
820 trueop = avoid_constant_pool_reference (op);
822 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
823 if (tem)
824 return tem;
826 return simplify_unary_operation_1 (code, mode, op);
829 /* Perform some simplifications we can do even if the operands
830 aren't constant. */
831 static rtx
832 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
834 enum rtx_code reversed;
835 rtx temp;
837 switch (code)
839 case NOT:
840 /* (not (not X)) == X. */
841 if (GET_CODE (op) == NOT)
842 return XEXP (op, 0);
844 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
845 comparison is all ones. */
846 if (COMPARISON_P (op)
847 && (mode == BImode || STORE_FLAG_VALUE == -1)
848 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
849 return simplify_gen_relational (reversed, mode, VOIDmode,
850 XEXP (op, 0), XEXP (op, 1));
852 /* (not (plus X -1)) can become (neg X). */
853 if (GET_CODE (op) == PLUS
854 && XEXP (op, 1) == constm1_rtx)
855 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
857 /* Similarly, (not (neg X)) is (plus X -1). */
858 if (GET_CODE (op) == NEG)
859 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
860 CONSTM1_RTX (mode));
862 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
863 if (GET_CODE (op) == XOR
864 && CONST_INT_P (XEXP (op, 1))
865 && (temp = simplify_unary_operation (NOT, mode,
866 XEXP (op, 1), mode)) != 0)
867 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
869 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
870 if (GET_CODE (op) == PLUS
871 && CONST_INT_P (XEXP (op, 1))
872 && mode_signbit_p (mode, XEXP (op, 1))
873 && (temp = simplify_unary_operation (NOT, mode,
874 XEXP (op, 1), mode)) != 0)
875 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
878 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
879 operands other than 1, but that is not valid. We could do a
880 similar simplification for (not (lshiftrt C X)) where C is
881 just the sign bit, but this doesn't seem common enough to
882 bother with. */
883 if (GET_CODE (op) == ASHIFT
884 && XEXP (op, 0) == const1_rtx)
886 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
887 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
890 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
891 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
892 so we can perform the above simplification. */
893 if (STORE_FLAG_VALUE == -1
894 && GET_CODE (op) == ASHIFTRT
895 && CONST_INT_P (XEXP (op, 1))
896 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
897 return simplify_gen_relational (GE, mode, VOIDmode,
898 XEXP (op, 0), const0_rtx);
901 if (GET_CODE (op) == SUBREG
902 && subreg_lowpart_p (op)
903 && (GET_MODE_SIZE (GET_MODE (op))
904 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
905 && GET_CODE (SUBREG_REG (op)) == ASHIFT
906 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
908 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
909 rtx x;
911 x = gen_rtx_ROTATE (inner_mode,
912 simplify_gen_unary (NOT, inner_mode, const1_rtx,
913 inner_mode),
914 XEXP (SUBREG_REG (op), 1));
915 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
916 if (temp)
917 return temp;
920 /* Apply De Morgan's laws to reduce number of patterns for machines
921 with negating logical insns (and-not, nand, etc.). If result has
922 only one NOT, put it first, since that is how the patterns are
923 coded. */
924 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
926 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
927 machine_mode op_mode;
929 op_mode = GET_MODE (in1);
930 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
932 op_mode = GET_MODE (in2);
933 if (op_mode == VOIDmode)
934 op_mode = mode;
935 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
937 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
939 rtx tem = in2;
940 in2 = in1; in1 = tem;
943 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
944 mode, in1, in2);
947 /* (not (bswap x)) -> (bswap (not x)). */
948 if (GET_CODE (op) == BSWAP)
950 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
951 return simplify_gen_unary (BSWAP, mode, x, mode);
953 break;
955 case NEG:
956 /* (neg (neg X)) == X. */
957 if (GET_CODE (op) == NEG)
958 return XEXP (op, 0);
960 /* (neg (plus X 1)) can become (not X). */
961 if (GET_CODE (op) == PLUS
962 && XEXP (op, 1) == const1_rtx)
963 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
965 /* Similarly, (neg (not X)) is (plus X 1). */
966 if (GET_CODE (op) == NOT)
967 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
968 CONST1_RTX (mode));
970 /* (neg (minus X Y)) can become (minus Y X). This transformation
971 isn't safe for modes with signed zeros, since if X and Y are
972 both +0, (minus Y X) is the same as (minus X Y). If the
973 rounding mode is towards +infinity (or -infinity) then the two
974 expressions will be rounded differently. */
975 if (GET_CODE (op) == MINUS
976 && !HONOR_SIGNED_ZEROS (mode)
977 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
978 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
980 if (GET_CODE (op) == PLUS
981 && !HONOR_SIGNED_ZEROS (mode)
982 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
984 /* (neg (plus A C)) is simplified to (minus -C A). */
985 if (CONST_SCALAR_INT_P (XEXP (op, 1))
986 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
988 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
989 if (temp)
990 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
993 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
994 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
995 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
998 /* (neg (mult A B)) becomes (mult A (neg B)).
999 This works even for floating-point values. */
1000 if (GET_CODE (op) == MULT
1001 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1003 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1004 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1007 /* NEG commutes with ASHIFT since it is multiplication. Only do
1008 this if we can then eliminate the NEG (e.g., if the operand
1009 is a constant). */
1010 if (GET_CODE (op) == ASHIFT)
1012 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1013 if (temp)
1014 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1017 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1018 C is equal to the width of MODE minus 1. */
1019 if (GET_CODE (op) == ASHIFTRT
1020 && CONST_INT_P (XEXP (op, 1))
1021 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1022 return simplify_gen_binary (LSHIFTRT, mode,
1023 XEXP (op, 0), XEXP (op, 1));
1025 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1026 C is equal to the width of MODE minus 1. */
1027 if (GET_CODE (op) == LSHIFTRT
1028 && CONST_INT_P (XEXP (op, 1))
1029 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1030 return simplify_gen_binary (ASHIFTRT, mode,
1031 XEXP (op, 0), XEXP (op, 1));
1033 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1034 if (GET_CODE (op) == XOR
1035 && XEXP (op, 1) == const1_rtx
1036 && nonzero_bits (XEXP (op, 0), mode) == 1)
1037 return plus_constant (mode, XEXP (op, 0), -1);
1039 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1040 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1041 if (GET_CODE (op) == LT
1042 && XEXP (op, 1) == const0_rtx
1043 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1045 machine_mode inner = GET_MODE (XEXP (op, 0));
1046 int isize = GET_MODE_PRECISION (inner);
1047 if (STORE_FLAG_VALUE == 1)
1049 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1050 GEN_INT (isize - 1));
1051 if (mode == inner)
1052 return temp;
1053 if (GET_MODE_PRECISION (mode) > isize)
1054 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1055 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1057 else if (STORE_FLAG_VALUE == -1)
1059 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1060 GEN_INT (isize - 1));
1061 if (mode == inner)
1062 return temp;
1063 if (GET_MODE_PRECISION (mode) > isize)
1064 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1065 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1068 break;
1070 case TRUNCATE:
1071 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1072 with the umulXi3_highpart patterns. */
1073 if (GET_CODE (op) == LSHIFTRT
1074 && GET_CODE (XEXP (op, 0)) == MULT)
1075 break;
1077 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1079 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1081 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1082 if (temp)
1083 return temp;
1085 /* We can't handle truncation to a partial integer mode here
1086 because we don't know the real bitsize of the partial
1087 integer mode. */
1088 break;
1091 if (GET_MODE (op) != VOIDmode)
1093 temp = simplify_truncation (mode, op, GET_MODE (op));
1094 if (temp)
1095 return temp;
1098 /* If we know that the value is already truncated, we can
1099 replace the TRUNCATE with a SUBREG. */
1100 if (GET_MODE_NUNITS (mode) == 1
1101 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1102 || truncated_to_mode (mode, op)))
1104 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1105 if (temp)
1106 return temp;
1109 /* A truncate of a comparison can be replaced with a subreg if
1110 STORE_FLAG_VALUE permits. This is like the previous test,
1111 but it works even if the comparison is done in a mode larger
1112 than HOST_BITS_PER_WIDE_INT. */
1113 if (HWI_COMPUTABLE_MODE_P (mode)
1114 && COMPARISON_P (op)
1115 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1117 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1118 if (temp)
1119 return temp;
1122 /* A truncate of a memory is just loading the low part of the memory
1123 if we are not changing the meaning of the address. */
1124 if (GET_CODE (op) == MEM
1125 && !VECTOR_MODE_P (mode)
1126 && !MEM_VOLATILE_P (op)
1127 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1129 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1130 if (temp)
1131 return temp;
1134 break;
1136 case FLOAT_TRUNCATE:
1137 if (DECIMAL_FLOAT_MODE_P (mode))
1138 break;
1140 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1141 if (GET_CODE (op) == FLOAT_EXTEND
1142 && GET_MODE (XEXP (op, 0)) == mode)
1143 return XEXP (op, 0);
1145 /* (float_truncate:SF (float_truncate:DF foo:XF))
1146 = (float_truncate:SF foo:XF).
1147 This may eliminate double rounding, so it is unsafe.
1149 (float_truncate:SF (float_extend:XF foo:DF))
1150 = (float_truncate:SF foo:DF).
1152 (float_truncate:DF (float_extend:XF foo:SF))
1153 = (float_extend:SF foo:DF). */
1154 if ((GET_CODE (op) == FLOAT_TRUNCATE
1155 && flag_unsafe_math_optimizations)
1156 || GET_CODE (op) == FLOAT_EXTEND)
1157 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1158 0)))
1159 > GET_MODE_SIZE (mode)
1160 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1161 mode,
1162 XEXP (op, 0), mode);
1164 /* (float_truncate (float x)) is (float x) */
1165 if (GET_CODE (op) == FLOAT
1166 && (flag_unsafe_math_optimizations
1167 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1168 && ((unsigned)significand_size (GET_MODE (op))
1169 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1170 - num_sign_bit_copies (XEXP (op, 0),
1171 GET_MODE (XEXP (op, 0))))))))
1172 return simplify_gen_unary (FLOAT, mode,
1173 XEXP (op, 0),
1174 GET_MODE (XEXP (op, 0)));
1176 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1177 (OP:SF foo:SF) if OP is NEG or ABS. */
1178 if ((GET_CODE (op) == ABS
1179 || GET_CODE (op) == NEG)
1180 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1181 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1182 return simplify_gen_unary (GET_CODE (op), mode,
1183 XEXP (XEXP (op, 0), 0), mode);
1185 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1186 is (float_truncate:SF x). */
1187 if (GET_CODE (op) == SUBREG
1188 && subreg_lowpart_p (op)
1189 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1190 return SUBREG_REG (op);
1191 break;
1193 case FLOAT_EXTEND:
1194 if (DECIMAL_FLOAT_MODE_P (mode))
1195 break;
1197 /* (float_extend (float_extend x)) is (float_extend x)
1199 (float_extend (float x)) is (float x) assuming that double
1200 rounding can't happen.
1202 if (GET_CODE (op) == FLOAT_EXTEND
1203 || (GET_CODE (op) == FLOAT
1204 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1205 && ((unsigned)significand_size (GET_MODE (op))
1206 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1207 - num_sign_bit_copies (XEXP (op, 0),
1208 GET_MODE (XEXP (op, 0)))))))
1209 return simplify_gen_unary (GET_CODE (op), mode,
1210 XEXP (op, 0),
1211 GET_MODE (XEXP (op, 0)));
1213 break;
1215 case ABS:
1216 /* (abs (neg <foo>)) -> (abs <foo>) */
1217 if (GET_CODE (op) == NEG)
1218 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1219 GET_MODE (XEXP (op, 0)));
1221 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1222 do nothing. */
1223 if (GET_MODE (op) == VOIDmode)
1224 break;
1226 /* If operand is something known to be positive, ignore the ABS. */
1227 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1228 || val_signbit_known_clear_p (GET_MODE (op),
1229 nonzero_bits (op, GET_MODE (op))))
1230 return op;
1232 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1233 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1234 return gen_rtx_NEG (mode, op);
1236 break;
1238 case FFS:
1239 /* (ffs (*_extend <X>)) = (ffs <X>) */
1240 if (GET_CODE (op) == SIGN_EXTEND
1241 || GET_CODE (op) == ZERO_EXTEND)
1242 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1243 GET_MODE (XEXP (op, 0)));
1244 break;
1246 case POPCOUNT:
1247 switch (GET_CODE (op))
1249 case BSWAP:
1250 case ZERO_EXTEND:
1251 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1252 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1253 GET_MODE (XEXP (op, 0)));
1255 case ROTATE:
1256 case ROTATERT:
1257 /* Rotations don't affect popcount. */
1258 if (!side_effects_p (XEXP (op, 1)))
1259 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1260 GET_MODE (XEXP (op, 0)));
1261 break;
1263 default:
1264 break;
1266 break;
1268 case PARITY:
1269 switch (GET_CODE (op))
1271 case NOT:
1272 case BSWAP:
1273 case ZERO_EXTEND:
1274 case SIGN_EXTEND:
1275 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1276 GET_MODE (XEXP (op, 0)));
1278 case ROTATE:
1279 case ROTATERT:
1280 /* Rotations don't affect parity. */
1281 if (!side_effects_p (XEXP (op, 1)))
1282 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1283 GET_MODE (XEXP (op, 0)));
1284 break;
1286 default:
1287 break;
1289 break;
1291 case BSWAP:
1292 /* (bswap (bswap x)) -> x. */
1293 if (GET_CODE (op) == BSWAP)
1294 return XEXP (op, 0);
1295 break;
1297 case FLOAT:
1298 /* (float (sign_extend <X>)) = (float <X>). */
1299 if (GET_CODE (op) == SIGN_EXTEND)
1300 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1301 GET_MODE (XEXP (op, 0)));
1302 break;
1304 case SIGN_EXTEND:
1305 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1306 becomes just the MINUS if its mode is MODE. This allows
1307 folding switch statements on machines using casesi (such as
1308 the VAX). */
1309 if (GET_CODE (op) == TRUNCATE
1310 && GET_MODE (XEXP (op, 0)) == mode
1311 && GET_CODE (XEXP (op, 0)) == MINUS
1312 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1313 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1314 return XEXP (op, 0);
1316 /* Extending a widening multiplication should be canonicalized to
1317 a wider widening multiplication. */
1318 if (GET_CODE (op) == MULT)
1320 rtx lhs = XEXP (op, 0);
1321 rtx rhs = XEXP (op, 1);
1322 enum rtx_code lcode = GET_CODE (lhs);
1323 enum rtx_code rcode = GET_CODE (rhs);
1325 /* Widening multiplies usually extend both operands, but sometimes
1326 they use a shift to extract a portion of a register. */
1327 if ((lcode == SIGN_EXTEND
1328 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1329 && (rcode == SIGN_EXTEND
1330 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1332 machine_mode lmode = GET_MODE (lhs);
1333 machine_mode rmode = GET_MODE (rhs);
1334 int bits;
1336 if (lcode == ASHIFTRT)
1337 /* Number of bits not shifted off the end. */
1338 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1339 else /* lcode == SIGN_EXTEND */
1340 /* Size of inner mode. */
1341 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1343 if (rcode == ASHIFTRT)
1344 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1345 else /* rcode == SIGN_EXTEND */
1346 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1348 /* We can only widen multiplies if the result is mathematiclly
1349 equivalent. I.e. if overflow was impossible. */
1350 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1351 return simplify_gen_binary
1352 (MULT, mode,
1353 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1354 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1358 /* Check for a sign extension of a subreg of a promoted
1359 variable, where the promotion is sign-extended, and the
1360 target mode is the same as the variable's promotion. */
1361 if (GET_CODE (op) == SUBREG
1362 && SUBREG_PROMOTED_VAR_P (op)
1363 && SUBREG_PROMOTED_SIGNED_P (op)
1364 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1366 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1367 if (temp)
1368 return temp;
1371 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1372 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1373 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1375 gcc_assert (GET_MODE_PRECISION (mode)
1376 > GET_MODE_PRECISION (GET_MODE (op)));
1377 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1378 GET_MODE (XEXP (op, 0)));
1381 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1382 is (sign_extend:M (subreg:O <X>)) if there is mode with
1383 GET_MODE_BITSIZE (N) - I bits.
1384 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1385 is similarly (zero_extend:M (subreg:O <X>)). */
1386 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1387 && GET_CODE (XEXP (op, 0)) == ASHIFT
1388 && CONST_INT_P (XEXP (op, 1))
1389 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1390 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1392 machine_mode tmode
1393 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1394 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1395 gcc_assert (GET_MODE_BITSIZE (mode)
1396 > GET_MODE_BITSIZE (GET_MODE (op)));
1397 if (tmode != BLKmode)
1399 rtx inner =
1400 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1401 if (inner)
1402 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1403 ? SIGN_EXTEND : ZERO_EXTEND,
1404 mode, inner, tmode);
1408 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1409 /* As we do not know which address space the pointer is referring to,
1410 we can do this only if the target does not support different pointer
1411 or address modes depending on the address space. */
1412 if (target_default_pointer_address_modes_p ()
1413 && ! POINTERS_EXTEND_UNSIGNED
1414 && mode == Pmode && GET_MODE (op) == ptr_mode
1415 && (CONSTANT_P (op)
1416 || (GET_CODE (op) == SUBREG
1417 && REG_P (SUBREG_REG (op))
1418 && REG_POINTER (SUBREG_REG (op))
1419 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1420 return convert_memory_address (Pmode, op);
1421 #endif
1422 break;
1424 case ZERO_EXTEND:
1425 /* Check for a zero extension of a subreg of a promoted
1426 variable, where the promotion is zero-extended, and the
1427 target mode is the same as the variable's promotion. */
1428 if (GET_CODE (op) == SUBREG
1429 && SUBREG_PROMOTED_VAR_P (op)
1430 && SUBREG_PROMOTED_UNSIGNED_P (op)
1431 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1433 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1434 if (temp)
1435 return temp;
1438 /* Extending a widening multiplication should be canonicalized to
1439 a wider widening multiplication. */
1440 if (GET_CODE (op) == MULT)
1442 rtx lhs = XEXP (op, 0);
1443 rtx rhs = XEXP (op, 1);
1444 enum rtx_code lcode = GET_CODE (lhs);
1445 enum rtx_code rcode = GET_CODE (rhs);
1447 /* Widening multiplies usually extend both operands, but sometimes
1448 they use a shift to extract a portion of a register. */
1449 if ((lcode == ZERO_EXTEND
1450 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1451 && (rcode == ZERO_EXTEND
1452 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1454 machine_mode lmode = GET_MODE (lhs);
1455 machine_mode rmode = GET_MODE (rhs);
1456 int bits;
1458 if (lcode == LSHIFTRT)
1459 /* Number of bits not shifted off the end. */
1460 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1461 else /* lcode == ZERO_EXTEND */
1462 /* Size of inner mode. */
1463 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1465 if (rcode == LSHIFTRT)
1466 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1467 else /* rcode == ZERO_EXTEND */
1468 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1470 /* We can only widen multiplies if the result is mathematiclly
1471 equivalent. I.e. if overflow was impossible. */
1472 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1473 return simplify_gen_binary
1474 (MULT, mode,
1475 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1476 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1480 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1481 if (GET_CODE (op) == ZERO_EXTEND)
1482 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1483 GET_MODE (XEXP (op, 0)));
1485 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1486 is (zero_extend:M (subreg:O <X>)) if there is mode with
1487 GET_MODE_PRECISION (N) - I bits. */
1488 if (GET_CODE (op) == LSHIFTRT
1489 && GET_CODE (XEXP (op, 0)) == ASHIFT
1490 && CONST_INT_P (XEXP (op, 1))
1491 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1492 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1494 machine_mode tmode
1495 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1496 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1497 if (tmode != BLKmode)
1499 rtx inner =
1500 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1501 if (inner)
1502 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1506 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1507 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1508 of mode N. E.g.
1509 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1510 (and:SI (reg:SI) (const_int 63)). */
1511 if (GET_CODE (op) == SUBREG
1512 && GET_MODE_PRECISION (GET_MODE (op))
1513 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1514 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1515 <= HOST_BITS_PER_WIDE_INT
1516 && GET_MODE_PRECISION (mode)
1517 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1518 && subreg_lowpart_p (op)
1519 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1520 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1522 if (GET_MODE_PRECISION (mode)
1523 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1524 return SUBREG_REG (op);
1525 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1526 GET_MODE (SUBREG_REG (op)));
1529 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1530 /* As we do not know which address space the pointer is referring to,
1531 we can do this only if the target does not support different pointer
1532 or address modes depending on the address space. */
1533 if (target_default_pointer_address_modes_p ()
1534 && POINTERS_EXTEND_UNSIGNED > 0
1535 && mode == Pmode && GET_MODE (op) == ptr_mode
1536 && (CONSTANT_P (op)
1537 || (GET_CODE (op) == SUBREG
1538 && REG_P (SUBREG_REG (op))
1539 && REG_POINTER (SUBREG_REG (op))
1540 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1541 return convert_memory_address (Pmode, op);
1542 #endif
1543 break;
1545 default:
1546 break;
1549 return 0;
1552 /* Try to compute the value of a unary operation CODE whose output mode is to
1553 be MODE with input operand OP whose mode was originally OP_MODE.
1554 Return zero if the value cannot be computed. */
1556 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1557 rtx op, machine_mode op_mode)
1559 unsigned int width = GET_MODE_PRECISION (mode);
1561 if (code == VEC_DUPLICATE)
1563 gcc_assert (VECTOR_MODE_P (mode));
1564 if (GET_MODE (op) != VOIDmode)
1566 if (!VECTOR_MODE_P (GET_MODE (op)))
1567 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1568 else
1569 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1570 (GET_MODE (op)));
1572 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1573 || GET_CODE (op) == CONST_VECTOR)
1575 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1576 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1577 rtvec v = rtvec_alloc (n_elts);
1578 unsigned int i;
1580 if (GET_CODE (op) != CONST_VECTOR)
1581 for (i = 0; i < n_elts; i++)
1582 RTVEC_ELT (v, i) = op;
1583 else
1585 machine_mode inmode = GET_MODE (op);
1586 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1587 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1589 gcc_assert (in_n_elts < n_elts);
1590 gcc_assert ((n_elts % in_n_elts) == 0);
1591 for (i = 0; i < n_elts; i++)
1592 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1594 return gen_rtx_CONST_VECTOR (mode, v);
1598 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1600 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1601 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1602 machine_mode opmode = GET_MODE (op);
1603 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1604 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1605 rtvec v = rtvec_alloc (n_elts);
1606 unsigned int i;
1608 gcc_assert (op_n_elts == n_elts);
1609 for (i = 0; i < n_elts; i++)
1611 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1612 CONST_VECTOR_ELT (op, i),
1613 GET_MODE_INNER (opmode));
1614 if (!x)
1615 return 0;
1616 RTVEC_ELT (v, i) = x;
1618 return gen_rtx_CONST_VECTOR (mode, v);
1621 /* The order of these tests is critical so that, for example, we don't
1622 check the wrong mode (input vs. output) for a conversion operation,
1623 such as FIX. At some point, this should be simplified. */
1625 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1627 REAL_VALUE_TYPE d;
1629 if (op_mode == VOIDmode)
1631 /* CONST_INT have VOIDmode as the mode. We assume that all
1632 the bits of the constant are significant, though, this is
1633 a dangerous assumption as many times CONST_INTs are
1634 created and used with garbage in the bits outside of the
1635 precision of the implied mode of the const_int. */
1636 op_mode = MAX_MODE_INT;
1639 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1640 d = real_value_truncate (mode, d);
1641 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1643 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1645 REAL_VALUE_TYPE d;
1647 if (op_mode == VOIDmode)
1649 /* CONST_INT have VOIDmode as the mode. We assume that all
1650 the bits of the constant are significant, though, this is
1651 a dangerous assumption as many times CONST_INTs are
1652 created and used with garbage in the bits outside of the
1653 precision of the implied mode of the const_int. */
1654 op_mode = MAX_MODE_INT;
1657 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1658 d = real_value_truncate (mode, d);
1659 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1662 if (CONST_SCALAR_INT_P (op) && width > 0)
1664 wide_int result;
1665 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1666 rtx_mode_t op0 = std::make_pair (op, imode);
1667 int int_value;
1669 #if TARGET_SUPPORTS_WIDE_INT == 0
1670 /* This assert keeps the simplification from producing a result
1671 that cannot be represented in a CONST_DOUBLE but a lot of
1672 upstream callers expect that this function never fails to
1673 simplify something and so you if you added this to the test
1674 above the code would die later anyway. If this assert
1675 happens, you just need to make the port support wide int. */
1676 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1677 #endif
1679 switch (code)
1681 case NOT:
1682 result = wi::bit_not (op0);
1683 break;
1685 case NEG:
1686 result = wi::neg (op0);
1687 break;
1689 case ABS:
1690 result = wi::abs (op0);
1691 break;
1693 case FFS:
1694 result = wi::shwi (wi::ffs (op0), mode);
1695 break;
1697 case CLZ:
1698 if (wi::ne_p (op0, 0))
1699 int_value = wi::clz (op0);
1700 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1701 int_value = GET_MODE_PRECISION (mode);
1702 result = wi::shwi (int_value, mode);
1703 break;
1705 case CLRSB:
1706 result = wi::shwi (wi::clrsb (op0), mode);
1707 break;
1709 case CTZ:
1710 if (wi::ne_p (op0, 0))
1711 int_value = wi::ctz (op0);
1712 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1713 int_value = GET_MODE_PRECISION (mode);
1714 result = wi::shwi (int_value, mode);
1715 break;
1717 case POPCOUNT:
1718 result = wi::shwi (wi::popcount (op0), mode);
1719 break;
1721 case PARITY:
1722 result = wi::shwi (wi::parity (op0), mode);
1723 break;
1725 case BSWAP:
1726 result = wide_int (op0).bswap ();
1727 break;
1729 case TRUNCATE:
1730 case ZERO_EXTEND:
1731 result = wide_int::from (op0, width, UNSIGNED);
1732 break;
1734 case SIGN_EXTEND:
1735 result = wide_int::from (op0, width, SIGNED);
1736 break;
1738 case SQRT:
1739 default:
1740 return 0;
1743 return immed_wide_int_const (result, mode);
1746 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1747 && SCALAR_FLOAT_MODE_P (mode)
1748 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1750 REAL_VALUE_TYPE d;
1751 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1753 switch (code)
1755 case SQRT:
1756 return 0;
1757 case ABS:
1758 d = real_value_abs (&d);
1759 break;
1760 case NEG:
1761 d = real_value_negate (&d);
1762 break;
1763 case FLOAT_TRUNCATE:
1764 d = real_value_truncate (mode, d);
1765 break;
1766 case FLOAT_EXTEND:
1767 /* All this does is change the mode, unless changing
1768 mode class. */
1769 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1770 real_convert (&d, mode, &d);
1771 break;
1772 case FIX:
1773 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1774 break;
1775 case NOT:
1777 long tmp[4];
1778 int i;
1780 real_to_target (tmp, &d, GET_MODE (op));
1781 for (i = 0; i < 4; i++)
1782 tmp[i] = ~tmp[i];
1783 real_from_target (&d, tmp, mode);
1784 break;
1786 default:
1787 gcc_unreachable ();
1789 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1791 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1792 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1793 && GET_MODE_CLASS (mode) == MODE_INT
1794 && width > 0)
1796 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1797 operators are intentionally left unspecified (to ease implementation
1798 by target backends), for consistency, this routine implements the
1799 same semantics for constant folding as used by the middle-end. */
1801 /* This was formerly used only for non-IEEE float.
1802 eggert@twinsun.com says it is safe for IEEE also. */
1803 REAL_VALUE_TYPE x, t;
1804 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1805 wide_int wmax, wmin;
1806 /* This is part of the abi to real_to_integer, but we check
1807 things before making this call. */
1808 bool fail;
1810 switch (code)
1812 case FIX:
1813 if (REAL_VALUE_ISNAN (x))
1814 return const0_rtx;
1816 /* Test against the signed upper bound. */
1817 wmax = wi::max_value (width, SIGNED);
1818 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1819 if (REAL_VALUES_LESS (t, x))
1820 return immed_wide_int_const (wmax, mode);
1822 /* Test against the signed lower bound. */
1823 wmin = wi::min_value (width, SIGNED);
1824 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1825 if (REAL_VALUES_LESS (x, t))
1826 return immed_wide_int_const (wmin, mode);
1828 return immed_wide_int_const (real_to_integer (&x, &fail, width), mode);
1829 break;
1831 case UNSIGNED_FIX:
1832 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1833 return const0_rtx;
1835 /* Test against the unsigned upper bound. */
1836 wmax = wi::max_value (width, UNSIGNED);
1837 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1838 if (REAL_VALUES_LESS (t, x))
1839 return immed_wide_int_const (wmax, mode);
1841 return immed_wide_int_const (real_to_integer (&x, &fail, width),
1842 mode);
1843 break;
1845 default:
1846 gcc_unreachable ();
1850 return NULL_RTX;
1853 /* Subroutine of simplify_binary_operation to simplify a binary operation
1854 CODE that can commute with byte swapping, with result mode MODE and
1855 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1856 Return zero if no simplification or canonicalization is possible. */
1858 static rtx
1859 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1860 rtx op0, rtx op1)
1862 rtx tem;
1864 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1865 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1867 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1868 simplify_gen_unary (BSWAP, mode, op1, mode));
1869 return simplify_gen_unary (BSWAP, mode, tem, mode);
1872 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1873 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1875 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1876 return simplify_gen_unary (BSWAP, mode, tem, mode);
1879 return NULL_RTX;
1882 /* Subroutine of simplify_binary_operation to simplify a commutative,
1883 associative binary operation CODE with result mode MODE, operating
1884 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1885 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1886 canonicalization is possible. */
1888 static rtx
1889 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1890 rtx op0, rtx op1)
1892 rtx tem;
1894 /* Linearize the operator to the left. */
1895 if (GET_CODE (op1) == code)
1897 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1898 if (GET_CODE (op0) == code)
1900 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1901 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1904 /* "a op (b op c)" becomes "(b op c) op a". */
1905 if (! swap_commutative_operands_p (op1, op0))
1906 return simplify_gen_binary (code, mode, op1, op0);
1908 tem = op0;
1909 op0 = op1;
1910 op1 = tem;
1913 if (GET_CODE (op0) == code)
1915 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1916 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1918 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1919 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1922 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1923 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1924 if (tem != 0)
1925 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1927 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1928 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1929 if (tem != 0)
1930 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1933 return 0;
1937 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1938 and OP1. Return 0 if no simplification is possible.
1940 Don't use this for relational operations such as EQ or LT.
1941 Use simplify_relational_operation instead. */
1943 simplify_binary_operation (enum rtx_code code, machine_mode mode,
1944 rtx op0, rtx op1)
1946 rtx trueop0, trueop1;
1947 rtx tem;
1949 /* Relational operations don't work here. We must know the mode
1950 of the operands in order to do the comparison correctly.
1951 Assuming a full word can give incorrect results.
1952 Consider comparing 128 with -128 in QImode. */
1953 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1954 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1956 /* Make sure the constant is second. */
1957 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1958 && swap_commutative_operands_p (op0, op1))
1960 tem = op0, op0 = op1, op1 = tem;
1963 trueop0 = avoid_constant_pool_reference (op0);
1964 trueop1 = avoid_constant_pool_reference (op1);
1966 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1967 if (tem)
1968 return tem;
1969 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1972 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1973 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1974 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1975 actual constants. */
1977 static rtx
1978 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
1979 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1981 rtx tem, reversed, opleft, opright;
1982 HOST_WIDE_INT val;
1983 unsigned int width = GET_MODE_PRECISION (mode);
1985 /* Even if we can't compute a constant result,
1986 there are some cases worth simplifying. */
1988 switch (code)
1990 case PLUS:
1991 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1992 when x is NaN, infinite, or finite and nonzero. They aren't
1993 when x is -0 and the rounding mode is not towards -infinity,
1994 since (-0) + 0 is then 0. */
1995 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1996 return op0;
1998 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1999 transformations are safe even for IEEE. */
2000 if (GET_CODE (op0) == NEG)
2001 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2002 else if (GET_CODE (op1) == NEG)
2003 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2005 /* (~a) + 1 -> -a */
2006 if (INTEGRAL_MODE_P (mode)
2007 && GET_CODE (op0) == NOT
2008 && trueop1 == const1_rtx)
2009 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2011 /* Handle both-operands-constant cases. We can only add
2012 CONST_INTs to constants since the sum of relocatable symbols
2013 can't be handled by most assemblers. Don't add CONST_INT
2014 to CONST_INT since overflow won't be computed properly if wider
2015 than HOST_BITS_PER_WIDE_INT. */
2017 if ((GET_CODE (op0) == CONST
2018 || GET_CODE (op0) == SYMBOL_REF
2019 || GET_CODE (op0) == LABEL_REF)
2020 && CONST_INT_P (op1))
2021 return plus_constant (mode, op0, INTVAL (op1));
2022 else if ((GET_CODE (op1) == CONST
2023 || GET_CODE (op1) == SYMBOL_REF
2024 || GET_CODE (op1) == LABEL_REF)
2025 && CONST_INT_P (op0))
2026 return plus_constant (mode, op1, INTVAL (op0));
2028 /* See if this is something like X * C - X or vice versa or
2029 if the multiplication is written as a shift. If so, we can
2030 distribute and make a new multiply, shift, or maybe just
2031 have X (if C is 2 in the example above). But don't make
2032 something more expensive than we had before. */
2034 if (SCALAR_INT_MODE_P (mode))
2036 rtx lhs = op0, rhs = op1;
2038 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2039 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2041 if (GET_CODE (lhs) == NEG)
2043 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2044 lhs = XEXP (lhs, 0);
2046 else if (GET_CODE (lhs) == MULT
2047 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2049 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2050 lhs = XEXP (lhs, 0);
2052 else if (GET_CODE (lhs) == ASHIFT
2053 && CONST_INT_P (XEXP (lhs, 1))
2054 && INTVAL (XEXP (lhs, 1)) >= 0
2055 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2057 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2058 GET_MODE_PRECISION (mode));
2059 lhs = XEXP (lhs, 0);
2062 if (GET_CODE (rhs) == NEG)
2064 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2065 rhs = XEXP (rhs, 0);
2067 else if (GET_CODE (rhs) == MULT
2068 && CONST_INT_P (XEXP (rhs, 1)))
2070 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2071 rhs = XEXP (rhs, 0);
2073 else if (GET_CODE (rhs) == ASHIFT
2074 && CONST_INT_P (XEXP (rhs, 1))
2075 && INTVAL (XEXP (rhs, 1)) >= 0
2076 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2078 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2079 GET_MODE_PRECISION (mode));
2080 rhs = XEXP (rhs, 0);
2083 if (rtx_equal_p (lhs, rhs))
2085 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2086 rtx coeff;
2087 bool speed = optimize_function_for_speed_p (cfun);
2089 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2091 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2092 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2093 ? tem : 0;
2097 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2098 if (CONST_SCALAR_INT_P (op1)
2099 && GET_CODE (op0) == XOR
2100 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2101 && mode_signbit_p (mode, op1))
2102 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2103 simplify_gen_binary (XOR, mode, op1,
2104 XEXP (op0, 1)));
2106 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2107 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2108 && GET_CODE (op0) == MULT
2109 && GET_CODE (XEXP (op0, 0)) == NEG)
2111 rtx in1, in2;
2113 in1 = XEXP (XEXP (op0, 0), 0);
2114 in2 = XEXP (op0, 1);
2115 return simplify_gen_binary (MINUS, mode, op1,
2116 simplify_gen_binary (MULT, mode,
2117 in1, in2));
2120 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2121 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2122 is 1. */
2123 if (COMPARISON_P (op0)
2124 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2125 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2126 && (reversed = reversed_comparison (op0, mode)))
2127 return
2128 simplify_gen_unary (NEG, mode, reversed, mode);
2130 /* If one of the operands is a PLUS or a MINUS, see if we can
2131 simplify this by the associative law.
2132 Don't use the associative law for floating point.
2133 The inaccuracy makes it nonassociative,
2134 and subtle programs can break if operations are associated. */
2136 if (INTEGRAL_MODE_P (mode)
2137 && (plus_minus_operand_p (op0)
2138 || plus_minus_operand_p (op1))
2139 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2140 return tem;
2142 /* Reassociate floating point addition only when the user
2143 specifies associative math operations. */
2144 if (FLOAT_MODE_P (mode)
2145 && flag_associative_math)
2147 tem = simplify_associative_operation (code, mode, op0, op1);
2148 if (tem)
2149 return tem;
2151 break;
2153 case COMPARE:
2154 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2155 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2156 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2157 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2159 rtx xop00 = XEXP (op0, 0);
2160 rtx xop10 = XEXP (op1, 0);
2162 #ifdef HAVE_cc0
2163 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2164 #else
2165 if (REG_P (xop00) && REG_P (xop10)
2166 && GET_MODE (xop00) == GET_MODE (xop10)
2167 && REGNO (xop00) == REGNO (xop10)
2168 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2169 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2170 #endif
2171 return xop00;
2173 break;
2175 case MINUS:
2176 /* We can't assume x-x is 0 even with non-IEEE floating point,
2177 but since it is zero except in very strange circumstances, we
2178 will treat it as zero with -ffinite-math-only. */
2179 if (rtx_equal_p (trueop0, trueop1)
2180 && ! side_effects_p (op0)
2181 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2182 return CONST0_RTX (mode);
2184 /* Change subtraction from zero into negation. (0 - x) is the
2185 same as -x when x is NaN, infinite, or finite and nonzero.
2186 But if the mode has signed zeros, and does not round towards
2187 -infinity, then 0 - 0 is 0, not -0. */
2188 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2189 return simplify_gen_unary (NEG, mode, op1, mode);
2191 /* (-1 - a) is ~a. */
2192 if (trueop0 == constm1_rtx)
2193 return simplify_gen_unary (NOT, mode, op1, mode);
2195 /* Subtracting 0 has no effect unless the mode has signed zeros
2196 and supports rounding towards -infinity. In such a case,
2197 0 - 0 is -0. */
2198 if (!(HONOR_SIGNED_ZEROS (mode)
2199 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2200 && trueop1 == CONST0_RTX (mode))
2201 return op0;
2203 /* See if this is something like X * C - X or vice versa or
2204 if the multiplication is written as a shift. If so, we can
2205 distribute and make a new multiply, shift, or maybe just
2206 have X (if C is 2 in the example above). But don't make
2207 something more expensive than we had before. */
2209 if (SCALAR_INT_MODE_P (mode))
2211 rtx lhs = op0, rhs = op1;
2213 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2214 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2216 if (GET_CODE (lhs) == NEG)
2218 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2219 lhs = XEXP (lhs, 0);
2221 else if (GET_CODE (lhs) == MULT
2222 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2224 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2225 lhs = XEXP (lhs, 0);
2227 else if (GET_CODE (lhs) == ASHIFT
2228 && CONST_INT_P (XEXP (lhs, 1))
2229 && INTVAL (XEXP (lhs, 1)) >= 0
2230 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2232 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2233 GET_MODE_PRECISION (mode));
2234 lhs = XEXP (lhs, 0);
2237 if (GET_CODE (rhs) == NEG)
2239 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2240 rhs = XEXP (rhs, 0);
2242 else if (GET_CODE (rhs) == MULT
2243 && CONST_INT_P (XEXP (rhs, 1)))
2245 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2246 rhs = XEXP (rhs, 0);
2248 else if (GET_CODE (rhs) == ASHIFT
2249 && CONST_INT_P (XEXP (rhs, 1))
2250 && INTVAL (XEXP (rhs, 1)) >= 0
2251 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2253 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2254 GET_MODE_PRECISION (mode));
2255 negcoeff1 = -negcoeff1;
2256 rhs = XEXP (rhs, 0);
2259 if (rtx_equal_p (lhs, rhs))
2261 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2262 rtx coeff;
2263 bool speed = optimize_function_for_speed_p (cfun);
2265 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2267 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2268 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2269 ? tem : 0;
2273 /* (a - (-b)) -> (a + b). True even for IEEE. */
2274 if (GET_CODE (op1) == NEG)
2275 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2277 /* (-x - c) may be simplified as (-c - x). */
2278 if (GET_CODE (op0) == NEG
2279 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2281 tem = simplify_unary_operation (NEG, mode, op1, mode);
2282 if (tem)
2283 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2286 /* Don't let a relocatable value get a negative coeff. */
2287 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2288 return simplify_gen_binary (PLUS, mode,
2289 op0,
2290 neg_const_int (mode, op1));
2292 /* (x - (x & y)) -> (x & ~y) */
2293 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2295 if (rtx_equal_p (op0, XEXP (op1, 0)))
2297 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2298 GET_MODE (XEXP (op1, 1)));
2299 return simplify_gen_binary (AND, mode, op0, tem);
2301 if (rtx_equal_p (op0, XEXP (op1, 1)))
2303 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2304 GET_MODE (XEXP (op1, 0)));
2305 return simplify_gen_binary (AND, mode, op0, tem);
2309 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2310 by reversing the comparison code if valid. */
2311 if (STORE_FLAG_VALUE == 1
2312 && trueop0 == const1_rtx
2313 && COMPARISON_P (op1)
2314 && (reversed = reversed_comparison (op1, mode)))
2315 return reversed;
2317 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2318 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2319 && GET_CODE (op1) == MULT
2320 && GET_CODE (XEXP (op1, 0)) == NEG)
2322 rtx in1, in2;
2324 in1 = XEXP (XEXP (op1, 0), 0);
2325 in2 = XEXP (op1, 1);
2326 return simplify_gen_binary (PLUS, mode,
2327 simplify_gen_binary (MULT, mode,
2328 in1, in2),
2329 op0);
2332 /* Canonicalize (minus (neg A) (mult B C)) to
2333 (minus (mult (neg B) C) A). */
2334 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2335 && GET_CODE (op1) == MULT
2336 && GET_CODE (op0) == NEG)
2338 rtx in1, in2;
2340 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2341 in2 = XEXP (op1, 1);
2342 return simplify_gen_binary (MINUS, mode,
2343 simplify_gen_binary (MULT, mode,
2344 in1, in2),
2345 XEXP (op0, 0));
2348 /* If one of the operands is a PLUS or a MINUS, see if we can
2349 simplify this by the associative law. This will, for example,
2350 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2351 Don't use the associative law for floating point.
2352 The inaccuracy makes it nonassociative,
2353 and subtle programs can break if operations are associated. */
2355 if (INTEGRAL_MODE_P (mode)
2356 && (plus_minus_operand_p (op0)
2357 || plus_minus_operand_p (op1))
2358 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2359 return tem;
2360 break;
2362 case MULT:
2363 if (trueop1 == constm1_rtx)
2364 return simplify_gen_unary (NEG, mode, op0, mode);
2366 if (GET_CODE (op0) == NEG)
2368 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2369 /* If op1 is a MULT as well and simplify_unary_operation
2370 just moved the NEG to the second operand, simplify_gen_binary
2371 below could through simplify_associative_operation move
2372 the NEG around again and recurse endlessly. */
2373 if (temp
2374 && GET_CODE (op1) == MULT
2375 && GET_CODE (temp) == MULT
2376 && XEXP (op1, 0) == XEXP (temp, 0)
2377 && GET_CODE (XEXP (temp, 1)) == NEG
2378 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2379 temp = NULL_RTX;
2380 if (temp)
2381 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2383 if (GET_CODE (op1) == NEG)
2385 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2386 /* If op0 is a MULT as well and simplify_unary_operation
2387 just moved the NEG to the second operand, simplify_gen_binary
2388 below could through simplify_associative_operation move
2389 the NEG around again and recurse endlessly. */
2390 if (temp
2391 && GET_CODE (op0) == MULT
2392 && GET_CODE (temp) == MULT
2393 && XEXP (op0, 0) == XEXP (temp, 0)
2394 && GET_CODE (XEXP (temp, 1)) == NEG
2395 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2396 temp = NULL_RTX;
2397 if (temp)
2398 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2401 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2402 x is NaN, since x * 0 is then also NaN. Nor is it valid
2403 when the mode has signed zeros, since multiplying a negative
2404 number by 0 will give -0, not 0. */
2405 if (!HONOR_NANS (mode)
2406 && !HONOR_SIGNED_ZEROS (mode)
2407 && trueop1 == CONST0_RTX (mode)
2408 && ! side_effects_p (op0))
2409 return op1;
2411 /* In IEEE floating point, x*1 is not equivalent to x for
2412 signalling NaNs. */
2413 if (!HONOR_SNANS (mode)
2414 && trueop1 == CONST1_RTX (mode))
2415 return op0;
2417 /* Convert multiply by constant power of two into shift. */
2418 if (CONST_SCALAR_INT_P (trueop1))
2420 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2421 if (val >= 0)
2422 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2425 /* x*2 is x+x and x*(-1) is -x */
2426 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2427 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2428 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2429 && GET_MODE (op0) == mode)
2431 REAL_VALUE_TYPE d;
2432 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2434 if (REAL_VALUES_EQUAL (d, dconst2))
2435 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2437 if (!HONOR_SNANS (mode)
2438 && REAL_VALUES_EQUAL (d, dconstm1))
2439 return simplify_gen_unary (NEG, mode, op0, mode);
2442 /* Optimize -x * -x as x * x. */
2443 if (FLOAT_MODE_P (mode)
2444 && GET_CODE (op0) == NEG
2445 && GET_CODE (op1) == NEG
2446 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2447 && !side_effects_p (XEXP (op0, 0)))
2448 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2450 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2451 if (SCALAR_FLOAT_MODE_P (mode)
2452 && GET_CODE (op0) == ABS
2453 && GET_CODE (op1) == ABS
2454 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2455 && !side_effects_p (XEXP (op0, 0)))
2456 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2458 /* Reassociate multiplication, but for floating point MULTs
2459 only when the user specifies unsafe math optimizations. */
2460 if (! FLOAT_MODE_P (mode)
2461 || flag_unsafe_math_optimizations)
2463 tem = simplify_associative_operation (code, mode, op0, op1);
2464 if (tem)
2465 return tem;
2467 break;
2469 case IOR:
2470 if (trueop1 == CONST0_RTX (mode))
2471 return op0;
2472 if (INTEGRAL_MODE_P (mode)
2473 && trueop1 == CONSTM1_RTX (mode)
2474 && !side_effects_p (op0))
2475 return op1;
2476 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2477 return op0;
2478 /* A | (~A) -> -1 */
2479 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2480 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2481 && ! side_effects_p (op0)
2482 && SCALAR_INT_MODE_P (mode))
2483 return constm1_rtx;
2485 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2486 if (CONST_INT_P (op1)
2487 && HWI_COMPUTABLE_MODE_P (mode)
2488 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2489 && !side_effects_p (op0))
2490 return op1;
2492 /* Canonicalize (X & C1) | C2. */
2493 if (GET_CODE (op0) == AND
2494 && CONST_INT_P (trueop1)
2495 && CONST_INT_P (XEXP (op0, 1)))
2497 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2498 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2499 HOST_WIDE_INT c2 = INTVAL (trueop1);
2501 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2502 if ((c1 & c2) == c1
2503 && !side_effects_p (XEXP (op0, 0)))
2504 return trueop1;
2506 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2507 if (((c1|c2) & mask) == mask)
2508 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2510 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2511 if (((c1 & ~c2) & mask) != (c1 & mask))
2513 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2514 gen_int_mode (c1 & ~c2, mode));
2515 return simplify_gen_binary (IOR, mode, tem, op1);
2519 /* Convert (A & B) | A to A. */
2520 if (GET_CODE (op0) == AND
2521 && (rtx_equal_p (XEXP (op0, 0), op1)
2522 || rtx_equal_p (XEXP (op0, 1), op1))
2523 && ! side_effects_p (XEXP (op0, 0))
2524 && ! side_effects_p (XEXP (op0, 1)))
2525 return op1;
2527 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2528 mode size to (rotate A CX). */
2530 if (GET_CODE (op1) == ASHIFT
2531 || GET_CODE (op1) == SUBREG)
2533 opleft = op1;
2534 opright = op0;
2536 else
2538 opright = op1;
2539 opleft = op0;
2542 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2543 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2544 && CONST_INT_P (XEXP (opleft, 1))
2545 && CONST_INT_P (XEXP (opright, 1))
2546 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2547 == GET_MODE_PRECISION (mode)))
2548 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2550 /* Same, but for ashift that has been "simplified" to a wider mode
2551 by simplify_shift_const. */
2553 if (GET_CODE (opleft) == SUBREG
2554 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2555 && GET_CODE (opright) == LSHIFTRT
2556 && GET_CODE (XEXP (opright, 0)) == SUBREG
2557 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2558 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2559 && (GET_MODE_SIZE (GET_MODE (opleft))
2560 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2561 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2562 SUBREG_REG (XEXP (opright, 0)))
2563 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2564 && CONST_INT_P (XEXP (opright, 1))
2565 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2566 == GET_MODE_PRECISION (mode)))
2567 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2568 XEXP (SUBREG_REG (opleft), 1));
2570 /* If we have (ior (and (X C1) C2)), simplify this by making
2571 C1 as small as possible if C1 actually changes. */
2572 if (CONST_INT_P (op1)
2573 && (HWI_COMPUTABLE_MODE_P (mode)
2574 || INTVAL (op1) > 0)
2575 && GET_CODE (op0) == AND
2576 && CONST_INT_P (XEXP (op0, 1))
2577 && CONST_INT_P (op1)
2578 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2580 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2581 gen_int_mode (UINTVAL (XEXP (op0, 1))
2582 & ~UINTVAL (op1),
2583 mode));
2584 return simplify_gen_binary (IOR, mode, tmp, op1);
2587 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2588 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2589 the PLUS does not affect any of the bits in OP1: then we can do
2590 the IOR as a PLUS and we can associate. This is valid if OP1
2591 can be safely shifted left C bits. */
2592 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2593 && GET_CODE (XEXP (op0, 0)) == PLUS
2594 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2595 && CONST_INT_P (XEXP (op0, 1))
2596 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2598 int count = INTVAL (XEXP (op0, 1));
2599 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2601 if (mask >> count == INTVAL (trueop1)
2602 && trunc_int_for_mode (mask, mode) == mask
2603 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2604 return simplify_gen_binary (ASHIFTRT, mode,
2605 plus_constant (mode, XEXP (op0, 0),
2606 mask),
2607 XEXP (op0, 1));
2610 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2611 if (tem)
2612 return tem;
2614 tem = simplify_associative_operation (code, mode, op0, op1);
2615 if (tem)
2616 return tem;
2617 break;
2619 case XOR:
2620 if (trueop1 == CONST0_RTX (mode))
2621 return op0;
2622 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2623 return simplify_gen_unary (NOT, mode, op0, mode);
2624 if (rtx_equal_p (trueop0, trueop1)
2625 && ! side_effects_p (op0)
2626 && GET_MODE_CLASS (mode) != MODE_CC)
2627 return CONST0_RTX (mode);
2629 /* Canonicalize XOR of the most significant bit to PLUS. */
2630 if (CONST_SCALAR_INT_P (op1)
2631 && mode_signbit_p (mode, op1))
2632 return simplify_gen_binary (PLUS, mode, op0, op1);
2633 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2634 if (CONST_SCALAR_INT_P (op1)
2635 && GET_CODE (op0) == PLUS
2636 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2637 && mode_signbit_p (mode, XEXP (op0, 1)))
2638 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2639 simplify_gen_binary (XOR, mode, op1,
2640 XEXP (op0, 1)));
2642 /* If we are XORing two things that have no bits in common,
2643 convert them into an IOR. This helps to detect rotation encoded
2644 using those methods and possibly other simplifications. */
2646 if (HWI_COMPUTABLE_MODE_P (mode)
2647 && (nonzero_bits (op0, mode)
2648 & nonzero_bits (op1, mode)) == 0)
2649 return (simplify_gen_binary (IOR, mode, op0, op1));
2651 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2652 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2653 (NOT y). */
2655 int num_negated = 0;
2657 if (GET_CODE (op0) == NOT)
2658 num_negated++, op0 = XEXP (op0, 0);
2659 if (GET_CODE (op1) == NOT)
2660 num_negated++, op1 = XEXP (op1, 0);
2662 if (num_negated == 2)
2663 return simplify_gen_binary (XOR, mode, op0, op1);
2664 else if (num_negated == 1)
2665 return simplify_gen_unary (NOT, mode,
2666 simplify_gen_binary (XOR, mode, op0, op1),
2667 mode);
2670 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2671 correspond to a machine insn or result in further simplifications
2672 if B is a constant. */
2674 if (GET_CODE (op0) == AND
2675 && rtx_equal_p (XEXP (op0, 1), op1)
2676 && ! side_effects_p (op1))
2677 return simplify_gen_binary (AND, mode,
2678 simplify_gen_unary (NOT, mode,
2679 XEXP (op0, 0), mode),
2680 op1);
2682 else if (GET_CODE (op0) == AND
2683 && rtx_equal_p (XEXP (op0, 0), op1)
2684 && ! side_effects_p (op1))
2685 return simplify_gen_binary (AND, mode,
2686 simplify_gen_unary (NOT, mode,
2687 XEXP (op0, 1), mode),
2688 op1);
2690 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2691 we can transform like this:
2692 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2693 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2694 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2695 Attempt a few simplifications when B and C are both constants. */
2696 if (GET_CODE (op0) == AND
2697 && CONST_INT_P (op1)
2698 && CONST_INT_P (XEXP (op0, 1)))
2700 rtx a = XEXP (op0, 0);
2701 rtx b = XEXP (op0, 1);
2702 rtx c = op1;
2703 HOST_WIDE_INT bval = INTVAL (b);
2704 HOST_WIDE_INT cval = INTVAL (c);
2706 rtx na_c
2707 = simplify_binary_operation (AND, mode,
2708 simplify_gen_unary (NOT, mode, a, mode),
2710 if ((~cval & bval) == 0)
2712 /* Try to simplify ~A&C | ~B&C. */
2713 if (na_c != NULL_RTX)
2714 return simplify_gen_binary (IOR, mode, na_c,
2715 gen_int_mode (~bval & cval, mode));
2717 else
2719 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2720 if (na_c == const0_rtx)
2722 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2723 gen_int_mode (~cval & bval,
2724 mode));
2725 return simplify_gen_binary (IOR, mode, a_nc_b,
2726 gen_int_mode (~bval & cval,
2727 mode));
2732 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2733 comparison if STORE_FLAG_VALUE is 1. */
2734 if (STORE_FLAG_VALUE == 1
2735 && trueop1 == const1_rtx
2736 && COMPARISON_P (op0)
2737 && (reversed = reversed_comparison (op0, mode)))
2738 return reversed;
2740 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2741 is (lt foo (const_int 0)), so we can perform the above
2742 simplification if STORE_FLAG_VALUE is 1. */
2744 if (STORE_FLAG_VALUE == 1
2745 && trueop1 == const1_rtx
2746 && GET_CODE (op0) == LSHIFTRT
2747 && CONST_INT_P (XEXP (op0, 1))
2748 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2749 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2751 /* (xor (comparison foo bar) (const_int sign-bit))
2752 when STORE_FLAG_VALUE is the sign bit. */
2753 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2754 && trueop1 == const_true_rtx
2755 && COMPARISON_P (op0)
2756 && (reversed = reversed_comparison (op0, mode)))
2757 return reversed;
2759 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2760 if (tem)
2761 return tem;
2763 tem = simplify_associative_operation (code, mode, op0, op1);
2764 if (tem)
2765 return tem;
2766 break;
2768 case AND:
2769 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2770 return trueop1;
2771 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2772 return op0;
2773 if (HWI_COMPUTABLE_MODE_P (mode))
2775 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2776 HOST_WIDE_INT nzop1;
2777 if (CONST_INT_P (trueop1))
2779 HOST_WIDE_INT val1 = INTVAL (trueop1);
2780 /* If we are turning off bits already known off in OP0, we need
2781 not do an AND. */
2782 if ((nzop0 & ~val1) == 0)
2783 return op0;
2785 nzop1 = nonzero_bits (trueop1, mode);
2786 /* If we are clearing all the nonzero bits, the result is zero. */
2787 if ((nzop1 & nzop0) == 0
2788 && !side_effects_p (op0) && !side_effects_p (op1))
2789 return CONST0_RTX (mode);
2791 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2792 && GET_MODE_CLASS (mode) != MODE_CC)
2793 return op0;
2794 /* A & (~A) -> 0 */
2795 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2796 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2797 && ! side_effects_p (op0)
2798 && GET_MODE_CLASS (mode) != MODE_CC)
2799 return CONST0_RTX (mode);
2801 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2802 there are no nonzero bits of C outside of X's mode. */
2803 if ((GET_CODE (op0) == SIGN_EXTEND
2804 || GET_CODE (op0) == ZERO_EXTEND)
2805 && CONST_INT_P (trueop1)
2806 && HWI_COMPUTABLE_MODE_P (mode)
2807 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2808 & UINTVAL (trueop1)) == 0)
2810 machine_mode imode = GET_MODE (XEXP (op0, 0));
2811 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2812 gen_int_mode (INTVAL (trueop1),
2813 imode));
2814 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2817 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2818 we might be able to further simplify the AND with X and potentially
2819 remove the truncation altogether. */
2820 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2822 rtx x = XEXP (op0, 0);
2823 machine_mode xmode = GET_MODE (x);
2824 tem = simplify_gen_binary (AND, xmode, x,
2825 gen_int_mode (INTVAL (trueop1), xmode));
2826 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2829 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2830 if (GET_CODE (op0) == IOR
2831 && CONST_INT_P (trueop1)
2832 && CONST_INT_P (XEXP (op0, 1)))
2834 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2835 return simplify_gen_binary (IOR, mode,
2836 simplify_gen_binary (AND, mode,
2837 XEXP (op0, 0), op1),
2838 gen_int_mode (tmp, mode));
2841 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2842 insn (and may simplify more). */
2843 if (GET_CODE (op0) == XOR
2844 && rtx_equal_p (XEXP (op0, 0), op1)
2845 && ! side_effects_p (op1))
2846 return simplify_gen_binary (AND, mode,
2847 simplify_gen_unary (NOT, mode,
2848 XEXP (op0, 1), mode),
2849 op1);
2851 if (GET_CODE (op0) == XOR
2852 && rtx_equal_p (XEXP (op0, 1), op1)
2853 && ! side_effects_p (op1))
2854 return simplify_gen_binary (AND, mode,
2855 simplify_gen_unary (NOT, mode,
2856 XEXP (op0, 0), mode),
2857 op1);
2859 /* Similarly for (~(A ^ B)) & A. */
2860 if (GET_CODE (op0) == NOT
2861 && GET_CODE (XEXP (op0, 0)) == XOR
2862 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2863 && ! side_effects_p (op1))
2864 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2866 if (GET_CODE (op0) == NOT
2867 && GET_CODE (XEXP (op0, 0)) == XOR
2868 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2869 && ! side_effects_p (op1))
2870 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2872 /* Convert (A | B) & A to A. */
2873 if (GET_CODE (op0) == IOR
2874 && (rtx_equal_p (XEXP (op0, 0), op1)
2875 || rtx_equal_p (XEXP (op0, 1), op1))
2876 && ! side_effects_p (XEXP (op0, 0))
2877 && ! side_effects_p (XEXP (op0, 1)))
2878 return op1;
2880 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2881 ((A & N) + B) & M -> (A + B) & M
2882 Similarly if (N & M) == 0,
2883 ((A | N) + B) & M -> (A + B) & M
2884 and for - instead of + and/or ^ instead of |.
2885 Also, if (N & M) == 0, then
2886 (A +- N) & M -> A & M. */
2887 if (CONST_INT_P (trueop1)
2888 && HWI_COMPUTABLE_MODE_P (mode)
2889 && ~UINTVAL (trueop1)
2890 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2891 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2893 rtx pmop[2];
2894 int which;
2896 pmop[0] = XEXP (op0, 0);
2897 pmop[1] = XEXP (op0, 1);
2899 if (CONST_INT_P (pmop[1])
2900 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2901 return simplify_gen_binary (AND, mode, pmop[0], op1);
2903 for (which = 0; which < 2; which++)
2905 tem = pmop[which];
2906 switch (GET_CODE (tem))
2908 case AND:
2909 if (CONST_INT_P (XEXP (tem, 1))
2910 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2911 == UINTVAL (trueop1))
2912 pmop[which] = XEXP (tem, 0);
2913 break;
2914 case IOR:
2915 case XOR:
2916 if (CONST_INT_P (XEXP (tem, 1))
2917 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2918 pmop[which] = XEXP (tem, 0);
2919 break;
2920 default:
2921 break;
2925 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2927 tem = simplify_gen_binary (GET_CODE (op0), mode,
2928 pmop[0], pmop[1]);
2929 return simplify_gen_binary (code, mode, tem, op1);
2933 /* (and X (ior (not X) Y) -> (and X Y) */
2934 if (GET_CODE (op1) == IOR
2935 && GET_CODE (XEXP (op1, 0)) == NOT
2936 && op0 == XEXP (XEXP (op1, 0), 0))
2937 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2939 /* (and (ior (not X) Y) X) -> (and X Y) */
2940 if (GET_CODE (op0) == IOR
2941 && GET_CODE (XEXP (op0, 0)) == NOT
2942 && op1 == XEXP (XEXP (op0, 0), 0))
2943 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2945 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2946 if (tem)
2947 return tem;
2949 tem = simplify_associative_operation (code, mode, op0, op1);
2950 if (tem)
2951 return tem;
2952 break;
2954 case UDIV:
2955 /* 0/x is 0 (or x&0 if x has side-effects). */
2956 if (trueop0 == CONST0_RTX (mode))
2958 if (side_effects_p (op1))
2959 return simplify_gen_binary (AND, mode, op1, trueop0);
2960 return trueop0;
2962 /* x/1 is x. */
2963 if (trueop1 == CONST1_RTX (mode))
2965 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2966 if (tem)
2967 return tem;
2969 /* Convert divide by power of two into shift. */
2970 if (CONST_INT_P (trueop1)
2971 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2972 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2973 break;
2975 case DIV:
2976 /* Handle floating point and integers separately. */
2977 if (SCALAR_FLOAT_MODE_P (mode))
2979 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2980 safe for modes with NaNs, since 0.0 / 0.0 will then be
2981 NaN rather than 0.0. Nor is it safe for modes with signed
2982 zeros, since dividing 0 by a negative number gives -0.0 */
2983 if (trueop0 == CONST0_RTX (mode)
2984 && !HONOR_NANS (mode)
2985 && !HONOR_SIGNED_ZEROS (mode)
2986 && ! side_effects_p (op1))
2987 return op0;
2988 /* x/1.0 is x. */
2989 if (trueop1 == CONST1_RTX (mode)
2990 && !HONOR_SNANS (mode))
2991 return op0;
2993 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2994 && trueop1 != CONST0_RTX (mode))
2996 REAL_VALUE_TYPE d;
2997 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2999 /* x/-1.0 is -x. */
3000 if (REAL_VALUES_EQUAL (d, dconstm1)
3001 && !HONOR_SNANS (mode))
3002 return simplify_gen_unary (NEG, mode, op0, mode);
3004 /* Change FP division by a constant into multiplication.
3005 Only do this with -freciprocal-math. */
3006 if (flag_reciprocal_math
3007 && !REAL_VALUES_EQUAL (d, dconst0))
3009 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3010 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3011 return simplify_gen_binary (MULT, mode, op0, tem);
3015 else if (SCALAR_INT_MODE_P (mode))
3017 /* 0/x is 0 (or x&0 if x has side-effects). */
3018 if (trueop0 == CONST0_RTX (mode)
3019 && !cfun->can_throw_non_call_exceptions)
3021 if (side_effects_p (op1))
3022 return simplify_gen_binary (AND, mode, op1, trueop0);
3023 return trueop0;
3025 /* x/1 is x. */
3026 if (trueop1 == CONST1_RTX (mode))
3028 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3029 if (tem)
3030 return tem;
3032 /* x/-1 is -x. */
3033 if (trueop1 == constm1_rtx)
3035 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3036 if (x)
3037 return simplify_gen_unary (NEG, mode, x, mode);
3040 break;
3042 case UMOD:
3043 /* 0%x is 0 (or x&0 if x has side-effects). */
3044 if (trueop0 == CONST0_RTX (mode))
3046 if (side_effects_p (op1))
3047 return simplify_gen_binary (AND, mode, op1, trueop0);
3048 return trueop0;
3050 /* x%1 is 0 (of x&0 if x has side-effects). */
3051 if (trueop1 == CONST1_RTX (mode))
3053 if (side_effects_p (op0))
3054 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3055 return CONST0_RTX (mode);
3057 /* Implement modulus by power of two as AND. */
3058 if (CONST_INT_P (trueop1)
3059 && exact_log2 (UINTVAL (trueop1)) > 0)
3060 return simplify_gen_binary (AND, mode, op0,
3061 gen_int_mode (INTVAL (op1) - 1, mode));
3062 break;
3064 case MOD:
3065 /* 0%x is 0 (or x&0 if x has side-effects). */
3066 if (trueop0 == CONST0_RTX (mode))
3068 if (side_effects_p (op1))
3069 return simplify_gen_binary (AND, mode, op1, trueop0);
3070 return trueop0;
3072 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3073 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3075 if (side_effects_p (op0))
3076 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3077 return CONST0_RTX (mode);
3079 break;
3081 case ROTATERT:
3082 case ROTATE:
3083 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3084 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3085 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3086 amount instead. */
3087 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3088 if (CONST_INT_P (trueop1)
3089 && IN_RANGE (INTVAL (trueop1),
3090 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3091 GET_MODE_PRECISION (mode) - 1))
3092 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3093 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3094 - INTVAL (trueop1)));
3095 #endif
3096 /* FALLTHRU */
3097 case ASHIFTRT:
3098 if (trueop1 == CONST0_RTX (mode))
3099 return op0;
3100 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3101 return op0;
3102 /* Rotating ~0 always results in ~0. */
3103 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3104 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3105 && ! side_effects_p (op1))
3106 return op0;
3107 /* Given:
3108 scalar modes M1, M2
3109 scalar constants c1, c2
3110 size (M2) > size (M1)
3111 c1 == size (M2) - size (M1)
3112 optimize:
3113 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3114 <low_part>)
3115 (const_int <c2>))
3117 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3118 <low_part>). */
3119 if (code == ASHIFTRT
3120 && !VECTOR_MODE_P (mode)
3121 && SUBREG_P (op0)
3122 && CONST_INT_P (op1)
3123 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3124 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3125 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3126 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3127 > GET_MODE_BITSIZE (mode))
3128 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3129 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3130 - GET_MODE_BITSIZE (mode)))
3131 && subreg_lowpart_p (op0))
3133 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3134 + INTVAL (op1));
3135 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3136 tmp = simplify_gen_binary (ASHIFTRT,
3137 GET_MODE (SUBREG_REG (op0)),
3138 XEXP (SUBREG_REG (op0), 0),
3139 tmp);
3140 return simplify_gen_subreg (mode, tmp, inner_mode,
3141 subreg_lowpart_offset (mode,
3142 inner_mode));
3144 canonicalize_shift:
3145 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3147 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3148 if (val != INTVAL (op1))
3149 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3151 break;
3153 case ASHIFT:
3154 case SS_ASHIFT:
3155 case US_ASHIFT:
3156 if (trueop1 == CONST0_RTX (mode))
3157 return op0;
3158 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3159 return op0;
3160 goto canonicalize_shift;
3162 case LSHIFTRT:
3163 if (trueop1 == CONST0_RTX (mode))
3164 return op0;
3165 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3166 return op0;
3167 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3168 if (GET_CODE (op0) == CLZ
3169 && CONST_INT_P (trueop1)
3170 && STORE_FLAG_VALUE == 1
3171 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3173 machine_mode imode = GET_MODE (XEXP (op0, 0));
3174 unsigned HOST_WIDE_INT zero_val = 0;
3176 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3177 && zero_val == GET_MODE_PRECISION (imode)
3178 && INTVAL (trueop1) == exact_log2 (zero_val))
3179 return simplify_gen_relational (EQ, mode, imode,
3180 XEXP (op0, 0), const0_rtx);
3182 goto canonicalize_shift;
3184 case SMIN:
3185 if (width <= HOST_BITS_PER_WIDE_INT
3186 && mode_signbit_p (mode, trueop1)
3187 && ! side_effects_p (op0))
3188 return op1;
3189 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3190 return op0;
3191 tem = simplify_associative_operation (code, mode, op0, op1);
3192 if (tem)
3193 return tem;
3194 break;
3196 case SMAX:
3197 if (width <= HOST_BITS_PER_WIDE_INT
3198 && CONST_INT_P (trueop1)
3199 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3200 && ! side_effects_p (op0))
3201 return op1;
3202 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3203 return op0;
3204 tem = simplify_associative_operation (code, mode, op0, op1);
3205 if (tem)
3206 return tem;
3207 break;
3209 case UMIN:
3210 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3211 return op1;
3212 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3213 return op0;
3214 tem = simplify_associative_operation (code, mode, op0, op1);
3215 if (tem)
3216 return tem;
3217 break;
3219 case UMAX:
3220 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3221 return op1;
3222 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3223 return op0;
3224 tem = simplify_associative_operation (code, mode, op0, op1);
3225 if (tem)
3226 return tem;
3227 break;
3229 case SS_PLUS:
3230 case US_PLUS:
3231 case SS_MINUS:
3232 case US_MINUS:
3233 case SS_MULT:
3234 case US_MULT:
3235 case SS_DIV:
3236 case US_DIV:
3237 /* ??? There are simplifications that can be done. */
3238 return 0;
3240 case VEC_SELECT:
3241 if (!VECTOR_MODE_P (mode))
3243 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3244 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3245 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3246 gcc_assert (XVECLEN (trueop1, 0) == 1);
3247 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3249 if (GET_CODE (trueop0) == CONST_VECTOR)
3250 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3251 (trueop1, 0, 0)));
3253 /* Extract a scalar element from a nested VEC_SELECT expression
3254 (with optional nested VEC_CONCAT expression). Some targets
3255 (i386) extract scalar element from a vector using chain of
3256 nested VEC_SELECT expressions. When input operand is a memory
3257 operand, this operation can be simplified to a simple scalar
3258 load from an offseted memory address. */
3259 if (GET_CODE (trueop0) == VEC_SELECT)
3261 rtx op0 = XEXP (trueop0, 0);
3262 rtx op1 = XEXP (trueop0, 1);
3264 machine_mode opmode = GET_MODE (op0);
3265 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3266 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3268 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3269 int elem;
3271 rtvec vec;
3272 rtx tmp_op, tmp;
3274 gcc_assert (GET_CODE (op1) == PARALLEL);
3275 gcc_assert (i < n_elts);
3277 /* Select element, pointed by nested selector. */
3278 elem = INTVAL (XVECEXP (op1, 0, i));
3280 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3281 if (GET_CODE (op0) == VEC_CONCAT)
3283 rtx op00 = XEXP (op0, 0);
3284 rtx op01 = XEXP (op0, 1);
3286 machine_mode mode00, mode01;
3287 int n_elts00, n_elts01;
3289 mode00 = GET_MODE (op00);
3290 mode01 = GET_MODE (op01);
3292 /* Find out number of elements of each operand. */
3293 if (VECTOR_MODE_P (mode00))
3295 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3296 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3298 else
3299 n_elts00 = 1;
3301 if (VECTOR_MODE_P (mode01))
3303 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3304 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3306 else
3307 n_elts01 = 1;
3309 gcc_assert (n_elts == n_elts00 + n_elts01);
3311 /* Select correct operand of VEC_CONCAT
3312 and adjust selector. */
3313 if (elem < n_elts01)
3314 tmp_op = op00;
3315 else
3317 tmp_op = op01;
3318 elem -= n_elts00;
3321 else
3322 tmp_op = op0;
3324 vec = rtvec_alloc (1);
3325 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3327 tmp = gen_rtx_fmt_ee (code, mode,
3328 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3329 return tmp;
3331 if (GET_CODE (trueop0) == VEC_DUPLICATE
3332 && GET_MODE (XEXP (trueop0, 0)) == mode)
3333 return XEXP (trueop0, 0);
3335 else
3337 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3338 gcc_assert (GET_MODE_INNER (mode)
3339 == GET_MODE_INNER (GET_MODE (trueop0)));
3340 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3342 if (GET_CODE (trueop0) == CONST_VECTOR)
3344 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3345 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3346 rtvec v = rtvec_alloc (n_elts);
3347 unsigned int i;
3349 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3350 for (i = 0; i < n_elts; i++)
3352 rtx x = XVECEXP (trueop1, 0, i);
3354 gcc_assert (CONST_INT_P (x));
3355 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3356 INTVAL (x));
3359 return gen_rtx_CONST_VECTOR (mode, v);
3362 /* Recognize the identity. */
3363 if (GET_MODE (trueop0) == mode)
3365 bool maybe_ident = true;
3366 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3368 rtx j = XVECEXP (trueop1, 0, i);
3369 if (!CONST_INT_P (j) || INTVAL (j) != i)
3371 maybe_ident = false;
3372 break;
3375 if (maybe_ident)
3376 return trueop0;
3379 /* If we build {a,b} then permute it, build the result directly. */
3380 if (XVECLEN (trueop1, 0) == 2
3381 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3382 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3383 && GET_CODE (trueop0) == VEC_CONCAT
3384 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3385 && GET_MODE (XEXP (trueop0, 0)) == mode
3386 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3387 && GET_MODE (XEXP (trueop0, 1)) == mode)
3389 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3390 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3391 rtx subop0, subop1;
3393 gcc_assert (i0 < 4 && i1 < 4);
3394 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3395 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3397 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3400 if (XVECLEN (trueop1, 0) == 2
3401 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3402 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3403 && GET_CODE (trueop0) == VEC_CONCAT
3404 && GET_MODE (trueop0) == mode)
3406 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3407 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3408 rtx subop0, subop1;
3410 gcc_assert (i0 < 2 && i1 < 2);
3411 subop0 = XEXP (trueop0, i0);
3412 subop1 = XEXP (trueop0, i1);
3414 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3417 /* If we select one half of a vec_concat, return that. */
3418 if (GET_CODE (trueop0) == VEC_CONCAT
3419 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3421 rtx subop0 = XEXP (trueop0, 0);
3422 rtx subop1 = XEXP (trueop0, 1);
3423 machine_mode mode0 = GET_MODE (subop0);
3424 machine_mode mode1 = GET_MODE (subop1);
3425 int li = GET_MODE_SIZE (GET_MODE_INNER (mode0));
3426 int l0 = GET_MODE_SIZE (mode0) / li;
3427 int l1 = GET_MODE_SIZE (mode1) / li;
3428 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3429 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3431 bool success = true;
3432 for (int i = 1; i < l0; ++i)
3434 rtx j = XVECEXP (trueop1, 0, i);
3435 if (!CONST_INT_P (j) || INTVAL (j) != i)
3437 success = false;
3438 break;
3441 if (success)
3442 return subop0;
3444 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3446 bool success = true;
3447 for (int i = 1; i < l1; ++i)
3449 rtx j = XVECEXP (trueop1, 0, i);
3450 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3452 success = false;
3453 break;
3456 if (success)
3457 return subop1;
3462 if (XVECLEN (trueop1, 0) == 1
3463 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3464 && GET_CODE (trueop0) == VEC_CONCAT)
3466 rtx vec = trueop0;
3467 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3469 /* Try to find the element in the VEC_CONCAT. */
3470 while (GET_MODE (vec) != mode
3471 && GET_CODE (vec) == VEC_CONCAT)
3473 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3474 if (offset < vec_size)
3475 vec = XEXP (vec, 0);
3476 else
3478 offset -= vec_size;
3479 vec = XEXP (vec, 1);
3481 vec = avoid_constant_pool_reference (vec);
3484 if (GET_MODE (vec) == mode)
3485 return vec;
3488 /* If we select elements in a vec_merge that all come from the same
3489 operand, select from that operand directly. */
3490 if (GET_CODE (op0) == VEC_MERGE)
3492 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3493 if (CONST_INT_P (trueop02))
3495 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3496 bool all_operand0 = true;
3497 bool all_operand1 = true;
3498 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3500 rtx j = XVECEXP (trueop1, 0, i);
3501 if (sel & (1 << UINTVAL (j)))
3502 all_operand1 = false;
3503 else
3504 all_operand0 = false;
3506 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3507 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3508 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3509 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3513 /* If we have two nested selects that are inverses of each
3514 other, replace them with the source operand. */
3515 if (GET_CODE (trueop0) == VEC_SELECT
3516 && GET_MODE (XEXP (trueop0, 0)) == mode)
3518 rtx op0_subop1 = XEXP (trueop0, 1);
3519 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3520 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3522 /* Apply the outer ordering vector to the inner one. (The inner
3523 ordering vector is expressly permitted to be of a different
3524 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3525 then the two VEC_SELECTs cancel. */
3526 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3528 rtx x = XVECEXP (trueop1, 0, i);
3529 if (!CONST_INT_P (x))
3530 return 0;
3531 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3532 if (!CONST_INT_P (y) || i != INTVAL (y))
3533 return 0;
3535 return XEXP (trueop0, 0);
3538 return 0;
3539 case VEC_CONCAT:
3541 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3542 ? GET_MODE (trueop0)
3543 : GET_MODE_INNER (mode));
3544 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3545 ? GET_MODE (trueop1)
3546 : GET_MODE_INNER (mode));
3548 gcc_assert (VECTOR_MODE_P (mode));
3549 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3550 == GET_MODE_SIZE (mode));
3552 if (VECTOR_MODE_P (op0_mode))
3553 gcc_assert (GET_MODE_INNER (mode)
3554 == GET_MODE_INNER (op0_mode));
3555 else
3556 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3558 if (VECTOR_MODE_P (op1_mode))
3559 gcc_assert (GET_MODE_INNER (mode)
3560 == GET_MODE_INNER (op1_mode));
3561 else
3562 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3564 if ((GET_CODE (trueop0) == CONST_VECTOR
3565 || CONST_SCALAR_INT_P (trueop0)
3566 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3567 && (GET_CODE (trueop1) == CONST_VECTOR
3568 || CONST_SCALAR_INT_P (trueop1)
3569 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3571 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3572 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3573 rtvec v = rtvec_alloc (n_elts);
3574 unsigned int i;
3575 unsigned in_n_elts = 1;
3577 if (VECTOR_MODE_P (op0_mode))
3578 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3579 for (i = 0; i < n_elts; i++)
3581 if (i < in_n_elts)
3583 if (!VECTOR_MODE_P (op0_mode))
3584 RTVEC_ELT (v, i) = trueop0;
3585 else
3586 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3588 else
3590 if (!VECTOR_MODE_P (op1_mode))
3591 RTVEC_ELT (v, i) = trueop1;
3592 else
3593 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3594 i - in_n_elts);
3598 return gen_rtx_CONST_VECTOR (mode, v);
3601 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3602 Restrict the transformation to avoid generating a VEC_SELECT with a
3603 mode unrelated to its operand. */
3604 if (GET_CODE (trueop0) == VEC_SELECT
3605 && GET_CODE (trueop1) == VEC_SELECT
3606 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3607 && GET_MODE (XEXP (trueop0, 0)) == mode)
3609 rtx par0 = XEXP (trueop0, 1);
3610 rtx par1 = XEXP (trueop1, 1);
3611 int len0 = XVECLEN (par0, 0);
3612 int len1 = XVECLEN (par1, 0);
3613 rtvec vec = rtvec_alloc (len0 + len1);
3614 for (int i = 0; i < len0; i++)
3615 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3616 for (int i = 0; i < len1; i++)
3617 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3618 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3619 gen_rtx_PARALLEL (VOIDmode, vec));
3622 return 0;
3624 default:
3625 gcc_unreachable ();
3628 return 0;
3632 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3633 rtx op0, rtx op1)
3635 unsigned int width = GET_MODE_PRECISION (mode);
3637 if (VECTOR_MODE_P (mode)
3638 && code != VEC_CONCAT
3639 && GET_CODE (op0) == CONST_VECTOR
3640 && GET_CODE (op1) == CONST_VECTOR)
3642 unsigned n_elts = GET_MODE_NUNITS (mode);
3643 machine_mode op0mode = GET_MODE (op0);
3644 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3645 machine_mode op1mode = GET_MODE (op1);
3646 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3647 rtvec v = rtvec_alloc (n_elts);
3648 unsigned int i;
3650 gcc_assert (op0_n_elts == n_elts);
3651 gcc_assert (op1_n_elts == n_elts);
3652 for (i = 0; i < n_elts; i++)
3654 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3655 CONST_VECTOR_ELT (op0, i),
3656 CONST_VECTOR_ELT (op1, i));
3657 if (!x)
3658 return 0;
3659 RTVEC_ELT (v, i) = x;
3662 return gen_rtx_CONST_VECTOR (mode, v);
3665 if (VECTOR_MODE_P (mode)
3666 && code == VEC_CONCAT
3667 && (CONST_SCALAR_INT_P (op0)
3668 || GET_CODE (op0) == CONST_FIXED
3669 || CONST_DOUBLE_AS_FLOAT_P (op0))
3670 && (CONST_SCALAR_INT_P (op1)
3671 || CONST_DOUBLE_AS_FLOAT_P (op1)
3672 || GET_CODE (op1) == CONST_FIXED))
3674 unsigned n_elts = GET_MODE_NUNITS (mode);
3675 rtvec v = rtvec_alloc (n_elts);
3677 gcc_assert (n_elts >= 2);
3678 if (n_elts == 2)
3680 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3681 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3683 RTVEC_ELT (v, 0) = op0;
3684 RTVEC_ELT (v, 1) = op1;
3686 else
3688 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3689 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3690 unsigned i;
3692 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3693 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3694 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3696 for (i = 0; i < op0_n_elts; ++i)
3697 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3698 for (i = 0; i < op1_n_elts; ++i)
3699 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3702 return gen_rtx_CONST_VECTOR (mode, v);
3705 if (SCALAR_FLOAT_MODE_P (mode)
3706 && CONST_DOUBLE_AS_FLOAT_P (op0)
3707 && CONST_DOUBLE_AS_FLOAT_P (op1)
3708 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3710 if (code == AND
3711 || code == IOR
3712 || code == XOR)
3714 long tmp0[4];
3715 long tmp1[4];
3716 REAL_VALUE_TYPE r;
3717 int i;
3719 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3720 GET_MODE (op0));
3721 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3722 GET_MODE (op1));
3723 for (i = 0; i < 4; i++)
3725 switch (code)
3727 case AND:
3728 tmp0[i] &= tmp1[i];
3729 break;
3730 case IOR:
3731 tmp0[i] |= tmp1[i];
3732 break;
3733 case XOR:
3734 tmp0[i] ^= tmp1[i];
3735 break;
3736 default:
3737 gcc_unreachable ();
3740 real_from_target (&r, tmp0, mode);
3741 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3743 else
3745 REAL_VALUE_TYPE f0, f1, value, result;
3746 bool inexact;
3748 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3749 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3750 real_convert (&f0, mode, &f0);
3751 real_convert (&f1, mode, &f1);
3753 if (HONOR_SNANS (mode)
3754 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3755 return 0;
3757 if (code == DIV
3758 && REAL_VALUES_EQUAL (f1, dconst0)
3759 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3760 return 0;
3762 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3763 && flag_trapping_math
3764 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3766 int s0 = REAL_VALUE_NEGATIVE (f0);
3767 int s1 = REAL_VALUE_NEGATIVE (f1);
3769 switch (code)
3771 case PLUS:
3772 /* Inf + -Inf = NaN plus exception. */
3773 if (s0 != s1)
3774 return 0;
3775 break;
3776 case MINUS:
3777 /* Inf - Inf = NaN plus exception. */
3778 if (s0 == s1)
3779 return 0;
3780 break;
3781 case DIV:
3782 /* Inf / Inf = NaN plus exception. */
3783 return 0;
3784 default:
3785 break;
3789 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3790 && flag_trapping_math
3791 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3792 || (REAL_VALUE_ISINF (f1)
3793 && REAL_VALUES_EQUAL (f0, dconst0))))
3794 /* Inf * 0 = NaN plus exception. */
3795 return 0;
3797 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3798 &f0, &f1);
3799 real_convert (&result, mode, &value);
3801 /* Don't constant fold this floating point operation if
3802 the result has overflowed and flag_trapping_math. */
3804 if (flag_trapping_math
3805 && MODE_HAS_INFINITIES (mode)
3806 && REAL_VALUE_ISINF (result)
3807 && !REAL_VALUE_ISINF (f0)
3808 && !REAL_VALUE_ISINF (f1))
3809 /* Overflow plus exception. */
3810 return 0;
3812 /* Don't constant fold this floating point operation if the
3813 result may dependent upon the run-time rounding mode and
3814 flag_rounding_math is set, or if GCC's software emulation
3815 is unable to accurately represent the result. */
3817 if ((flag_rounding_math
3818 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3819 && (inexact || !real_identical (&result, &value)))
3820 return NULL_RTX;
3822 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3826 /* We can fold some multi-word operations. */
3827 if ((GET_MODE_CLASS (mode) == MODE_INT
3828 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3829 && CONST_SCALAR_INT_P (op0)
3830 && CONST_SCALAR_INT_P (op1))
3832 wide_int result;
3833 bool overflow;
3834 rtx_mode_t pop0 = std::make_pair (op0, mode);
3835 rtx_mode_t pop1 = std::make_pair (op1, mode);
3837 #if TARGET_SUPPORTS_WIDE_INT == 0
3838 /* This assert keeps the simplification from producing a result
3839 that cannot be represented in a CONST_DOUBLE but a lot of
3840 upstream callers expect that this function never fails to
3841 simplify something and so you if you added this to the test
3842 above the code would die later anyway. If this assert
3843 happens, you just need to make the port support wide int. */
3844 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3845 #endif
3846 switch (code)
3848 case MINUS:
3849 result = wi::sub (pop0, pop1);
3850 break;
3852 case PLUS:
3853 result = wi::add (pop0, pop1);
3854 break;
3856 case MULT:
3857 result = wi::mul (pop0, pop1);
3858 break;
3860 case DIV:
3861 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3862 if (overflow)
3863 return NULL_RTX;
3864 break;
3866 case MOD:
3867 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3868 if (overflow)
3869 return NULL_RTX;
3870 break;
3872 case UDIV:
3873 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3874 if (overflow)
3875 return NULL_RTX;
3876 break;
3878 case UMOD:
3879 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3880 if (overflow)
3881 return NULL_RTX;
3882 break;
3884 case AND:
3885 result = wi::bit_and (pop0, pop1);
3886 break;
3888 case IOR:
3889 result = wi::bit_or (pop0, pop1);
3890 break;
3892 case XOR:
3893 result = wi::bit_xor (pop0, pop1);
3894 break;
3896 case SMIN:
3897 result = wi::smin (pop0, pop1);
3898 break;
3900 case SMAX:
3901 result = wi::smax (pop0, pop1);
3902 break;
3904 case UMIN:
3905 result = wi::umin (pop0, pop1);
3906 break;
3908 case UMAX:
3909 result = wi::umax (pop0, pop1);
3910 break;
3912 case LSHIFTRT:
3913 case ASHIFTRT:
3914 case ASHIFT:
3916 wide_int wop1 = pop1;
3917 if (SHIFT_COUNT_TRUNCATED)
3918 wop1 = wi::umod_trunc (wop1, width);
3919 else if (wi::geu_p (wop1, width))
3920 return NULL_RTX;
3922 switch (code)
3924 case LSHIFTRT:
3925 result = wi::lrshift (pop0, wop1);
3926 break;
3928 case ASHIFTRT:
3929 result = wi::arshift (pop0, wop1);
3930 break;
3932 case ASHIFT:
3933 result = wi::lshift (pop0, wop1);
3934 break;
3936 default:
3937 gcc_unreachable ();
3939 break;
3941 case ROTATE:
3942 case ROTATERT:
3944 if (wi::neg_p (pop1))
3945 return NULL_RTX;
3947 switch (code)
3949 case ROTATE:
3950 result = wi::lrotate (pop0, pop1);
3951 break;
3953 case ROTATERT:
3954 result = wi::rrotate (pop0, pop1);
3955 break;
3957 default:
3958 gcc_unreachable ();
3960 break;
3962 default:
3963 return NULL_RTX;
3965 return immed_wide_int_const (result, mode);
3968 return NULL_RTX;
3973 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3974 PLUS or MINUS.
3976 Rather than test for specific case, we do this by a brute-force method
3977 and do all possible simplifications until no more changes occur. Then
3978 we rebuild the operation. */
3980 struct simplify_plus_minus_op_data
3982 rtx op;
3983 short neg;
3986 static bool
3987 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3989 int result;
3991 result = (commutative_operand_precedence (y)
3992 - commutative_operand_precedence (x));
3993 if (result)
3994 return result > 0;
3996 /* Group together equal REGs to do more simplification. */
3997 if (REG_P (x) && REG_P (y))
3998 return REGNO (x) > REGNO (y);
3999 else
4000 return false;
4003 static rtx
4004 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4005 rtx op1)
4007 struct simplify_plus_minus_op_data ops[16];
4008 rtx result, tem;
4009 int n_ops = 2;
4010 int changed, n_constants, canonicalized = 0;
4011 int i, j;
4013 memset (ops, 0, sizeof ops);
4015 /* Set up the two operands and then expand them until nothing has been
4016 changed. If we run out of room in our array, give up; this should
4017 almost never happen. */
4019 ops[0].op = op0;
4020 ops[0].neg = 0;
4021 ops[1].op = op1;
4022 ops[1].neg = (code == MINUS);
4026 changed = 0;
4027 n_constants = 0;
4029 for (i = 0; i < n_ops; i++)
4031 rtx this_op = ops[i].op;
4032 int this_neg = ops[i].neg;
4033 enum rtx_code this_code = GET_CODE (this_op);
4035 switch (this_code)
4037 case PLUS:
4038 case MINUS:
4039 if (n_ops == ARRAY_SIZE (ops))
4040 return NULL_RTX;
4042 ops[n_ops].op = XEXP (this_op, 1);
4043 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4044 n_ops++;
4046 ops[i].op = XEXP (this_op, 0);
4047 changed = 1;
4048 canonicalized |= this_neg || i != n_ops - 2;
4049 break;
4051 case NEG:
4052 ops[i].op = XEXP (this_op, 0);
4053 ops[i].neg = ! this_neg;
4054 changed = 1;
4055 canonicalized = 1;
4056 break;
4058 case CONST:
4059 if (n_ops != ARRAY_SIZE (ops)
4060 && GET_CODE (XEXP (this_op, 0)) == PLUS
4061 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4062 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4064 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4065 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4066 ops[n_ops].neg = this_neg;
4067 n_ops++;
4068 changed = 1;
4069 canonicalized = 1;
4071 break;
4073 case NOT:
4074 /* ~a -> (-a - 1) */
4075 if (n_ops != ARRAY_SIZE (ops))
4077 ops[n_ops].op = CONSTM1_RTX (mode);
4078 ops[n_ops++].neg = this_neg;
4079 ops[i].op = XEXP (this_op, 0);
4080 ops[i].neg = !this_neg;
4081 changed = 1;
4082 canonicalized = 1;
4084 break;
4086 case CONST_INT:
4087 n_constants++;
4088 if (this_neg)
4090 ops[i].op = neg_const_int (mode, this_op);
4091 ops[i].neg = 0;
4092 changed = 1;
4093 canonicalized = 1;
4095 break;
4097 default:
4098 break;
4102 while (changed);
4104 if (n_constants > 1)
4105 canonicalized = 1;
4107 gcc_assert (n_ops >= 2);
4109 /* If we only have two operands, we can avoid the loops. */
4110 if (n_ops == 2)
4112 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4113 rtx lhs, rhs;
4115 /* Get the two operands. Be careful with the order, especially for
4116 the cases where code == MINUS. */
4117 if (ops[0].neg && ops[1].neg)
4119 lhs = gen_rtx_NEG (mode, ops[0].op);
4120 rhs = ops[1].op;
4122 else if (ops[0].neg)
4124 lhs = ops[1].op;
4125 rhs = ops[0].op;
4127 else
4129 lhs = ops[0].op;
4130 rhs = ops[1].op;
4133 return simplify_const_binary_operation (code, mode, lhs, rhs);
4136 /* Now simplify each pair of operands until nothing changes. */
4139 /* Insertion sort is good enough for a small array. */
4140 for (i = 1; i < n_ops; i++)
4142 struct simplify_plus_minus_op_data save;
4143 j = i - 1;
4144 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4145 continue;
4147 canonicalized = 1;
4148 save = ops[i];
4150 ops[j + 1] = ops[j];
4151 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4152 ops[j + 1] = save;
4155 changed = 0;
4156 for (i = n_ops - 1; i > 0; i--)
4157 for (j = i - 1; j >= 0; j--)
4159 rtx lhs = ops[j].op, rhs = ops[i].op;
4160 int lneg = ops[j].neg, rneg = ops[i].neg;
4162 if (lhs != 0 && rhs != 0)
4164 enum rtx_code ncode = PLUS;
4166 if (lneg != rneg)
4168 ncode = MINUS;
4169 if (lneg)
4170 tem = lhs, lhs = rhs, rhs = tem;
4172 else if (swap_commutative_operands_p (lhs, rhs))
4173 tem = lhs, lhs = rhs, rhs = tem;
4175 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4176 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4178 rtx tem_lhs, tem_rhs;
4180 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4181 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4182 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4184 if (tem && !CONSTANT_P (tem))
4185 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4187 else
4188 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4190 if (tem)
4192 /* Reject "simplifications" that just wrap the two
4193 arguments in a CONST. Failure to do so can result
4194 in infinite recursion with simplify_binary_operation
4195 when it calls us to simplify CONST operations.
4196 Also, if we find such a simplification, don't try
4197 any more combinations with this rhs: We must have
4198 something like symbol+offset, ie. one of the
4199 trivial CONST expressions we handle later. */
4200 if (GET_CODE (tem) == CONST
4201 && GET_CODE (XEXP (tem, 0)) == ncode
4202 && XEXP (XEXP (tem, 0), 0) == lhs
4203 && XEXP (XEXP (tem, 0), 1) == rhs)
4204 break;
4205 lneg &= rneg;
4206 if (GET_CODE (tem) == NEG)
4207 tem = XEXP (tem, 0), lneg = !lneg;
4208 if (CONST_INT_P (tem) && lneg)
4209 tem = neg_const_int (mode, tem), lneg = 0;
4211 ops[i].op = tem;
4212 ops[i].neg = lneg;
4213 ops[j].op = NULL_RTX;
4214 changed = 1;
4215 canonicalized = 1;
4220 /* If nothing changed, fail. */
4221 if (!canonicalized)
4222 return NULL_RTX;
4224 /* Pack all the operands to the lower-numbered entries. */
4225 for (i = 0, j = 0; j < n_ops; j++)
4226 if (ops[j].op)
4228 ops[i] = ops[j];
4229 i++;
4231 n_ops = i;
4233 while (changed);
4235 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4236 if (n_ops == 2
4237 && CONST_INT_P (ops[1].op)
4238 && CONSTANT_P (ops[0].op)
4239 && ops[0].neg)
4240 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4242 /* We suppressed creation of trivial CONST expressions in the
4243 combination loop to avoid recursion. Create one manually now.
4244 The combination loop should have ensured that there is exactly
4245 one CONST_INT, and the sort will have ensured that it is last
4246 in the array and that any other constant will be next-to-last. */
4248 if (n_ops > 1
4249 && CONST_INT_P (ops[n_ops - 1].op)
4250 && CONSTANT_P (ops[n_ops - 2].op))
4252 rtx value = ops[n_ops - 1].op;
4253 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4254 value = neg_const_int (mode, value);
4255 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4256 INTVAL (value));
4257 n_ops--;
4260 /* Put a non-negated operand first, if possible. */
4262 for (i = 0; i < n_ops && ops[i].neg; i++)
4263 continue;
4264 if (i == n_ops)
4265 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4266 else if (i != 0)
4268 tem = ops[0].op;
4269 ops[0] = ops[i];
4270 ops[i].op = tem;
4271 ops[i].neg = 1;
4274 /* Now make the result by performing the requested operations. */
4275 result = ops[0].op;
4276 for (i = 1; i < n_ops; i++)
4277 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4278 mode, result, ops[i].op);
4280 return result;
4283 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4284 static bool
4285 plus_minus_operand_p (const_rtx x)
4287 return GET_CODE (x) == PLUS
4288 || GET_CODE (x) == MINUS
4289 || (GET_CODE (x) == CONST
4290 && GET_CODE (XEXP (x, 0)) == PLUS
4291 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4292 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4295 /* Like simplify_binary_operation except used for relational operators.
4296 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4297 not also be VOIDmode.
4299 CMP_MODE specifies in which mode the comparison is done in, so it is
4300 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4301 the operands or, if both are VOIDmode, the operands are compared in
4302 "infinite precision". */
4304 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4305 machine_mode cmp_mode, rtx op0, rtx op1)
4307 rtx tem, trueop0, trueop1;
4309 if (cmp_mode == VOIDmode)
4310 cmp_mode = GET_MODE (op0);
4311 if (cmp_mode == VOIDmode)
4312 cmp_mode = GET_MODE (op1);
4314 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4315 if (tem)
4317 if (SCALAR_FLOAT_MODE_P (mode))
4319 if (tem == const0_rtx)
4320 return CONST0_RTX (mode);
4321 #ifdef FLOAT_STORE_FLAG_VALUE
4323 REAL_VALUE_TYPE val;
4324 val = FLOAT_STORE_FLAG_VALUE (mode);
4325 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4327 #else
4328 return NULL_RTX;
4329 #endif
4331 if (VECTOR_MODE_P (mode))
4333 if (tem == const0_rtx)
4334 return CONST0_RTX (mode);
4335 #ifdef VECTOR_STORE_FLAG_VALUE
4337 int i, units;
4338 rtvec v;
4340 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4341 if (val == NULL_RTX)
4342 return NULL_RTX;
4343 if (val == const1_rtx)
4344 return CONST1_RTX (mode);
4346 units = GET_MODE_NUNITS (mode);
4347 v = rtvec_alloc (units);
4348 for (i = 0; i < units; i++)
4349 RTVEC_ELT (v, i) = val;
4350 return gen_rtx_raw_CONST_VECTOR (mode, v);
4352 #else
4353 return NULL_RTX;
4354 #endif
4357 return tem;
4360 /* For the following tests, ensure const0_rtx is op1. */
4361 if (swap_commutative_operands_p (op0, op1)
4362 || (op0 == const0_rtx && op1 != const0_rtx))
4363 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4365 /* If op0 is a compare, extract the comparison arguments from it. */
4366 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4367 return simplify_gen_relational (code, mode, VOIDmode,
4368 XEXP (op0, 0), XEXP (op0, 1));
4370 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4371 || CC0_P (op0))
4372 return NULL_RTX;
4374 trueop0 = avoid_constant_pool_reference (op0);
4375 trueop1 = avoid_constant_pool_reference (op1);
4376 return simplify_relational_operation_1 (code, mode, cmp_mode,
4377 trueop0, trueop1);
4380 /* This part of simplify_relational_operation is only used when CMP_MODE
4381 is not in class MODE_CC (i.e. it is a real comparison).
4383 MODE is the mode of the result, while CMP_MODE specifies in which
4384 mode the comparison is done in, so it is the mode of the operands. */
4386 static rtx
4387 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4388 machine_mode cmp_mode, rtx op0, rtx op1)
4390 enum rtx_code op0code = GET_CODE (op0);
4392 if (op1 == const0_rtx && COMPARISON_P (op0))
4394 /* If op0 is a comparison, extract the comparison arguments
4395 from it. */
4396 if (code == NE)
4398 if (GET_MODE (op0) == mode)
4399 return simplify_rtx (op0);
4400 else
4401 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4402 XEXP (op0, 0), XEXP (op0, 1));
4404 else if (code == EQ)
4406 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4407 if (new_code != UNKNOWN)
4408 return simplify_gen_relational (new_code, mode, VOIDmode,
4409 XEXP (op0, 0), XEXP (op0, 1));
4413 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4414 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4415 if ((code == LTU || code == GEU)
4416 && GET_CODE (op0) == PLUS
4417 && CONST_INT_P (XEXP (op0, 1))
4418 && (rtx_equal_p (op1, XEXP (op0, 0))
4419 || rtx_equal_p (op1, XEXP (op0, 1)))
4420 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4421 && XEXP (op0, 1) != const0_rtx)
4423 rtx new_cmp
4424 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4425 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4426 cmp_mode, XEXP (op0, 0), new_cmp);
4429 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4430 if ((code == LTU || code == GEU)
4431 && GET_CODE (op0) == PLUS
4432 && rtx_equal_p (op1, XEXP (op0, 1))
4433 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4434 && !rtx_equal_p (op1, XEXP (op0, 0)))
4435 return simplify_gen_relational (code, mode, cmp_mode, op0,
4436 copy_rtx (XEXP (op0, 0)));
4438 if (op1 == const0_rtx)
4440 /* Canonicalize (GTU x 0) as (NE x 0). */
4441 if (code == GTU)
4442 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4443 /* Canonicalize (LEU x 0) as (EQ x 0). */
4444 if (code == LEU)
4445 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4447 else if (op1 == const1_rtx)
4449 switch (code)
4451 case GE:
4452 /* Canonicalize (GE x 1) as (GT x 0). */
4453 return simplify_gen_relational (GT, mode, cmp_mode,
4454 op0, const0_rtx);
4455 case GEU:
4456 /* Canonicalize (GEU x 1) as (NE x 0). */
4457 return simplify_gen_relational (NE, mode, cmp_mode,
4458 op0, const0_rtx);
4459 case LT:
4460 /* Canonicalize (LT x 1) as (LE x 0). */
4461 return simplify_gen_relational (LE, mode, cmp_mode,
4462 op0, const0_rtx);
4463 case LTU:
4464 /* Canonicalize (LTU x 1) as (EQ x 0). */
4465 return simplify_gen_relational (EQ, mode, cmp_mode,
4466 op0, const0_rtx);
4467 default:
4468 break;
4471 else if (op1 == constm1_rtx)
4473 /* Canonicalize (LE x -1) as (LT x 0). */
4474 if (code == LE)
4475 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4476 /* Canonicalize (GT x -1) as (GE x 0). */
4477 if (code == GT)
4478 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4481 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4482 if ((code == EQ || code == NE)
4483 && (op0code == PLUS || op0code == MINUS)
4484 && CONSTANT_P (op1)
4485 && CONSTANT_P (XEXP (op0, 1))
4486 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4488 rtx x = XEXP (op0, 0);
4489 rtx c = XEXP (op0, 1);
4490 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4491 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4493 /* Detect an infinite recursive condition, where we oscillate at this
4494 simplification case between:
4495 A + B == C <---> C - B == A,
4496 where A, B, and C are all constants with non-simplifiable expressions,
4497 usually SYMBOL_REFs. */
4498 if (GET_CODE (tem) == invcode
4499 && CONSTANT_P (x)
4500 && rtx_equal_p (c, XEXP (tem, 1)))
4501 return NULL_RTX;
4503 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4506 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4507 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4508 if (code == NE
4509 && op1 == const0_rtx
4510 && GET_MODE_CLASS (mode) == MODE_INT
4511 && cmp_mode != VOIDmode
4512 /* ??? Work-around BImode bugs in the ia64 backend. */
4513 && mode != BImode
4514 && cmp_mode != BImode
4515 && nonzero_bits (op0, cmp_mode) == 1
4516 && STORE_FLAG_VALUE == 1)
4517 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4518 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4519 : lowpart_subreg (mode, op0, cmp_mode);
4521 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4522 if ((code == EQ || code == NE)
4523 && op1 == const0_rtx
4524 && op0code == XOR)
4525 return simplify_gen_relational (code, mode, cmp_mode,
4526 XEXP (op0, 0), XEXP (op0, 1));
4528 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4529 if ((code == EQ || code == NE)
4530 && op0code == XOR
4531 && rtx_equal_p (XEXP (op0, 0), op1)
4532 && !side_effects_p (XEXP (op0, 0)))
4533 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4534 CONST0_RTX (mode));
4536 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4537 if ((code == EQ || code == NE)
4538 && op0code == XOR
4539 && rtx_equal_p (XEXP (op0, 1), op1)
4540 && !side_effects_p (XEXP (op0, 1)))
4541 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4542 CONST0_RTX (mode));
4544 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4545 if ((code == EQ || code == NE)
4546 && op0code == XOR
4547 && CONST_SCALAR_INT_P (op1)
4548 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4549 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4550 simplify_gen_binary (XOR, cmp_mode,
4551 XEXP (op0, 1), op1));
4553 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4554 can be implemented with a BICS instruction on some targets, or
4555 constant-folded if y is a constant. */
4556 if ((code == EQ || code == NE)
4557 && op0code == AND
4558 && rtx_equal_p (XEXP (op0, 0), op1)
4559 && !side_effects_p (op1))
4561 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4562 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4564 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4565 CONST0_RTX (cmp_mode));
4568 /* Likewise for (eq/ne (and x y) y). */
4569 if ((code == EQ || code == NE)
4570 && op0code == AND
4571 && rtx_equal_p (XEXP (op0, 1), op1)
4572 && !side_effects_p (op1))
4574 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4575 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4577 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4578 CONST0_RTX (cmp_mode));
4581 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4582 if ((code == EQ || code == NE)
4583 && GET_CODE (op0) == BSWAP
4584 && CONST_SCALAR_INT_P (op1))
4585 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4586 simplify_gen_unary (BSWAP, cmp_mode,
4587 op1, cmp_mode));
4589 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4590 if ((code == EQ || code == NE)
4591 && GET_CODE (op0) == BSWAP
4592 && GET_CODE (op1) == BSWAP)
4593 return simplify_gen_relational (code, mode, cmp_mode,
4594 XEXP (op0, 0), XEXP (op1, 0));
4596 if (op0code == POPCOUNT && op1 == const0_rtx)
4597 switch (code)
4599 case EQ:
4600 case LE:
4601 case LEU:
4602 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4603 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4604 XEXP (op0, 0), const0_rtx);
4606 case NE:
4607 case GT:
4608 case GTU:
4609 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4610 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4611 XEXP (op0, 0), const0_rtx);
4613 default:
4614 break;
4617 return NULL_RTX;
4620 enum
4622 CMP_EQ = 1,
4623 CMP_LT = 2,
4624 CMP_GT = 4,
4625 CMP_LTU = 8,
4626 CMP_GTU = 16
4630 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4631 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4632 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4633 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4634 For floating-point comparisons, assume that the operands were ordered. */
4636 static rtx
4637 comparison_result (enum rtx_code code, int known_results)
4639 switch (code)
4641 case EQ:
4642 case UNEQ:
4643 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4644 case NE:
4645 case LTGT:
4646 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4648 case LT:
4649 case UNLT:
4650 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4651 case GE:
4652 case UNGE:
4653 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4655 case GT:
4656 case UNGT:
4657 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4658 case LE:
4659 case UNLE:
4660 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4662 case LTU:
4663 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4664 case GEU:
4665 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4667 case GTU:
4668 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4669 case LEU:
4670 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4672 case ORDERED:
4673 return const_true_rtx;
4674 case UNORDERED:
4675 return const0_rtx;
4676 default:
4677 gcc_unreachable ();
4681 /* Check if the given comparison (done in the given MODE) is actually
4682 a tautology or a contradiction. If the mode is VOID_mode, the
4683 comparison is done in "infinite precision". If no simplification
4684 is possible, this function returns zero. Otherwise, it returns
4685 either const_true_rtx or const0_rtx. */
4688 simplify_const_relational_operation (enum rtx_code code,
4689 machine_mode mode,
4690 rtx op0, rtx op1)
4692 rtx tem;
4693 rtx trueop0;
4694 rtx trueop1;
4696 gcc_assert (mode != VOIDmode
4697 || (GET_MODE (op0) == VOIDmode
4698 && GET_MODE (op1) == VOIDmode));
4700 /* If op0 is a compare, extract the comparison arguments from it. */
4701 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4703 op1 = XEXP (op0, 1);
4704 op0 = XEXP (op0, 0);
4706 if (GET_MODE (op0) != VOIDmode)
4707 mode = GET_MODE (op0);
4708 else if (GET_MODE (op1) != VOIDmode)
4709 mode = GET_MODE (op1);
4710 else
4711 return 0;
4714 /* We can't simplify MODE_CC values since we don't know what the
4715 actual comparison is. */
4716 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4717 return 0;
4719 /* Make sure the constant is second. */
4720 if (swap_commutative_operands_p (op0, op1))
4722 tem = op0, op0 = op1, op1 = tem;
4723 code = swap_condition (code);
4726 trueop0 = avoid_constant_pool_reference (op0);
4727 trueop1 = avoid_constant_pool_reference (op1);
4729 /* For integer comparisons of A and B maybe we can simplify A - B and can
4730 then simplify a comparison of that with zero. If A and B are both either
4731 a register or a CONST_INT, this can't help; testing for these cases will
4732 prevent infinite recursion here and speed things up.
4734 We can only do this for EQ and NE comparisons as otherwise we may
4735 lose or introduce overflow which we cannot disregard as undefined as
4736 we do not know the signedness of the operation on either the left or
4737 the right hand side of the comparison. */
4739 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4740 && (code == EQ || code == NE)
4741 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4742 && (REG_P (op1) || CONST_INT_P (trueop1)))
4743 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4744 /* We cannot do this if tem is a nonzero address. */
4745 && ! nonzero_address_p (tem))
4746 return simplify_const_relational_operation (signed_condition (code),
4747 mode, tem, const0_rtx);
4749 if (! HONOR_NANS (mode) && code == ORDERED)
4750 return const_true_rtx;
4752 if (! HONOR_NANS (mode) && code == UNORDERED)
4753 return const0_rtx;
4755 /* For modes without NaNs, if the two operands are equal, we know the
4756 result except if they have side-effects. Even with NaNs we know
4757 the result of unordered comparisons and, if signaling NaNs are
4758 irrelevant, also the result of LT/GT/LTGT. */
4759 if ((! HONOR_NANS (trueop0)
4760 || code == UNEQ || code == UNLE || code == UNGE
4761 || ((code == LT || code == GT || code == LTGT)
4762 && ! HONOR_SNANS (trueop0)))
4763 && rtx_equal_p (trueop0, trueop1)
4764 && ! side_effects_p (trueop0))
4765 return comparison_result (code, CMP_EQ);
4767 /* If the operands are floating-point constants, see if we can fold
4768 the result. */
4769 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4770 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4771 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4773 REAL_VALUE_TYPE d0, d1;
4775 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4776 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4778 /* Comparisons are unordered iff at least one of the values is NaN. */
4779 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4780 switch (code)
4782 case UNEQ:
4783 case UNLT:
4784 case UNGT:
4785 case UNLE:
4786 case UNGE:
4787 case NE:
4788 case UNORDERED:
4789 return const_true_rtx;
4790 case EQ:
4791 case LT:
4792 case GT:
4793 case LE:
4794 case GE:
4795 case LTGT:
4796 case ORDERED:
4797 return const0_rtx;
4798 default:
4799 return 0;
4802 return comparison_result (code,
4803 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4804 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4807 /* Otherwise, see if the operands are both integers. */
4808 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4809 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4811 /* It would be nice if we really had a mode here. However, the
4812 largest int representable on the target is as good as
4813 infinite. */
4814 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4815 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4816 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4818 if (wi::eq_p (ptrueop0, ptrueop1))
4819 return comparison_result (code, CMP_EQ);
4820 else
4822 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4823 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4824 return comparison_result (code, cr);
4828 /* Optimize comparisons with upper and lower bounds. */
4829 if (HWI_COMPUTABLE_MODE_P (mode)
4830 && CONST_INT_P (trueop1))
4832 int sign;
4833 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4834 HOST_WIDE_INT val = INTVAL (trueop1);
4835 HOST_WIDE_INT mmin, mmax;
4837 if (code == GEU
4838 || code == LEU
4839 || code == GTU
4840 || code == LTU)
4841 sign = 0;
4842 else
4843 sign = 1;
4845 /* Get a reduced range if the sign bit is zero. */
4846 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4848 mmin = 0;
4849 mmax = nonzero;
4851 else
4853 rtx mmin_rtx, mmax_rtx;
4854 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4856 mmin = INTVAL (mmin_rtx);
4857 mmax = INTVAL (mmax_rtx);
4858 if (sign)
4860 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4862 mmin >>= (sign_copies - 1);
4863 mmax >>= (sign_copies - 1);
4867 switch (code)
4869 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4870 case GEU:
4871 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4872 return const_true_rtx;
4873 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4874 return const0_rtx;
4875 break;
4876 case GE:
4877 if (val <= mmin)
4878 return const_true_rtx;
4879 if (val > mmax)
4880 return const0_rtx;
4881 break;
4883 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4884 case LEU:
4885 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4886 return const_true_rtx;
4887 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4888 return const0_rtx;
4889 break;
4890 case LE:
4891 if (val >= mmax)
4892 return const_true_rtx;
4893 if (val < mmin)
4894 return const0_rtx;
4895 break;
4897 case EQ:
4898 /* x == y is always false for y out of range. */
4899 if (val < mmin || val > mmax)
4900 return const0_rtx;
4901 break;
4903 /* x > y is always false for y >= mmax, always true for y < mmin. */
4904 case GTU:
4905 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4906 return const0_rtx;
4907 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4908 return const_true_rtx;
4909 break;
4910 case GT:
4911 if (val >= mmax)
4912 return const0_rtx;
4913 if (val < mmin)
4914 return const_true_rtx;
4915 break;
4917 /* x < y is always false for y <= mmin, always true for y > mmax. */
4918 case LTU:
4919 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4920 return const0_rtx;
4921 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4922 return const_true_rtx;
4923 break;
4924 case LT:
4925 if (val <= mmin)
4926 return const0_rtx;
4927 if (val > mmax)
4928 return const_true_rtx;
4929 break;
4931 case NE:
4932 /* x != y is always true for y out of range. */
4933 if (val < mmin || val > mmax)
4934 return const_true_rtx;
4935 break;
4937 default:
4938 break;
4942 /* Optimize integer comparisons with zero. */
4943 if (trueop1 == const0_rtx)
4945 /* Some addresses are known to be nonzero. We don't know
4946 their sign, but equality comparisons are known. */
4947 if (nonzero_address_p (trueop0))
4949 if (code == EQ || code == LEU)
4950 return const0_rtx;
4951 if (code == NE || code == GTU)
4952 return const_true_rtx;
4955 /* See if the first operand is an IOR with a constant. If so, we
4956 may be able to determine the result of this comparison. */
4957 if (GET_CODE (op0) == IOR)
4959 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4960 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4962 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
4963 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4964 && (UINTVAL (inner_const)
4965 & ((unsigned HOST_WIDE_INT) 1
4966 << sign_bitnum)));
4968 switch (code)
4970 case EQ:
4971 case LEU:
4972 return const0_rtx;
4973 case NE:
4974 case GTU:
4975 return const_true_rtx;
4976 case LT:
4977 case LE:
4978 if (has_sign)
4979 return const_true_rtx;
4980 break;
4981 case GT:
4982 case GE:
4983 if (has_sign)
4984 return const0_rtx;
4985 break;
4986 default:
4987 break;
4993 /* Optimize comparison of ABS with zero. */
4994 if (trueop1 == CONST0_RTX (mode)
4995 && (GET_CODE (trueop0) == ABS
4996 || (GET_CODE (trueop0) == FLOAT_EXTEND
4997 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4999 switch (code)
5001 case LT:
5002 /* Optimize abs(x) < 0.0. */
5003 if (!HONOR_SNANS (mode)
5004 && (!INTEGRAL_MODE_P (mode)
5005 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5007 if (INTEGRAL_MODE_P (mode)
5008 && (issue_strict_overflow_warning
5009 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5010 warning (OPT_Wstrict_overflow,
5011 ("assuming signed overflow does not occur when "
5012 "assuming abs (x) < 0 is false"));
5013 return const0_rtx;
5015 break;
5017 case GE:
5018 /* Optimize abs(x) >= 0.0. */
5019 if (!HONOR_NANS (mode)
5020 && (!INTEGRAL_MODE_P (mode)
5021 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5023 if (INTEGRAL_MODE_P (mode)
5024 && (issue_strict_overflow_warning
5025 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5026 warning (OPT_Wstrict_overflow,
5027 ("assuming signed overflow does not occur when "
5028 "assuming abs (x) >= 0 is true"));
5029 return const_true_rtx;
5031 break;
5033 case UNGE:
5034 /* Optimize ! (abs(x) < 0.0). */
5035 return const_true_rtx;
5037 default:
5038 break;
5042 return 0;
5045 /* Simplify CODE, an operation with result mode MODE and three operands,
5046 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5047 a constant. Return 0 if no simplifications is possible. */
5050 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5051 machine_mode op0_mode, rtx op0, rtx op1,
5052 rtx op2)
5054 unsigned int width = GET_MODE_PRECISION (mode);
5055 bool any_change = false;
5056 rtx tem, trueop2;
5058 /* VOIDmode means "infinite" precision. */
5059 if (width == 0)
5060 width = HOST_BITS_PER_WIDE_INT;
5062 switch (code)
5064 case FMA:
5065 /* Simplify negations around the multiplication. */
5066 /* -a * -b + c => a * b + c. */
5067 if (GET_CODE (op0) == NEG)
5069 tem = simplify_unary_operation (NEG, mode, op1, mode);
5070 if (tem)
5071 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5073 else if (GET_CODE (op1) == NEG)
5075 tem = simplify_unary_operation (NEG, mode, op0, mode);
5076 if (tem)
5077 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5080 /* Canonicalize the two multiplication operands. */
5081 /* a * -b + c => -b * a + c. */
5082 if (swap_commutative_operands_p (op0, op1))
5083 tem = op0, op0 = op1, op1 = tem, any_change = true;
5085 if (any_change)
5086 return gen_rtx_FMA (mode, op0, op1, op2);
5087 return NULL_RTX;
5089 case SIGN_EXTRACT:
5090 case ZERO_EXTRACT:
5091 if (CONST_INT_P (op0)
5092 && CONST_INT_P (op1)
5093 && CONST_INT_P (op2)
5094 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5095 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5097 /* Extracting a bit-field from a constant */
5098 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5099 HOST_WIDE_INT op1val = INTVAL (op1);
5100 HOST_WIDE_INT op2val = INTVAL (op2);
5101 if (BITS_BIG_ENDIAN)
5102 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5103 else
5104 val >>= op2val;
5106 if (HOST_BITS_PER_WIDE_INT != op1val)
5108 /* First zero-extend. */
5109 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5110 /* If desired, propagate sign bit. */
5111 if (code == SIGN_EXTRACT
5112 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5113 != 0)
5114 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5117 return gen_int_mode (val, mode);
5119 break;
5121 case IF_THEN_ELSE:
5122 if (CONST_INT_P (op0))
5123 return op0 != const0_rtx ? op1 : op2;
5125 /* Convert c ? a : a into "a". */
5126 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5127 return op1;
5129 /* Convert a != b ? a : b into "a". */
5130 if (GET_CODE (op0) == NE
5131 && ! side_effects_p (op0)
5132 && ! HONOR_NANS (mode)
5133 && ! HONOR_SIGNED_ZEROS (mode)
5134 && ((rtx_equal_p (XEXP (op0, 0), op1)
5135 && rtx_equal_p (XEXP (op0, 1), op2))
5136 || (rtx_equal_p (XEXP (op0, 0), op2)
5137 && rtx_equal_p (XEXP (op0, 1), op1))))
5138 return op1;
5140 /* Convert a == b ? a : b into "b". */
5141 if (GET_CODE (op0) == EQ
5142 && ! side_effects_p (op0)
5143 && ! HONOR_NANS (mode)
5144 && ! HONOR_SIGNED_ZEROS (mode)
5145 && ((rtx_equal_p (XEXP (op0, 0), op1)
5146 && rtx_equal_p (XEXP (op0, 1), op2))
5147 || (rtx_equal_p (XEXP (op0, 0), op2)
5148 && rtx_equal_p (XEXP (op0, 1), op1))))
5149 return op2;
5151 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5153 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5154 ? GET_MODE (XEXP (op0, 1))
5155 : GET_MODE (XEXP (op0, 0)));
5156 rtx temp;
5158 /* Look for happy constants in op1 and op2. */
5159 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5161 HOST_WIDE_INT t = INTVAL (op1);
5162 HOST_WIDE_INT f = INTVAL (op2);
5164 if (t == STORE_FLAG_VALUE && f == 0)
5165 code = GET_CODE (op0);
5166 else if (t == 0 && f == STORE_FLAG_VALUE)
5168 enum rtx_code tmp;
5169 tmp = reversed_comparison_code (op0, NULL_RTX);
5170 if (tmp == UNKNOWN)
5171 break;
5172 code = tmp;
5174 else
5175 break;
5177 return simplify_gen_relational (code, mode, cmp_mode,
5178 XEXP (op0, 0), XEXP (op0, 1));
5181 if (cmp_mode == VOIDmode)
5182 cmp_mode = op0_mode;
5183 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5184 cmp_mode, XEXP (op0, 0),
5185 XEXP (op0, 1));
5187 /* See if any simplifications were possible. */
5188 if (temp)
5190 if (CONST_INT_P (temp))
5191 return temp == const0_rtx ? op2 : op1;
5192 else if (temp)
5193 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5196 break;
5198 case VEC_MERGE:
5199 gcc_assert (GET_MODE (op0) == mode);
5200 gcc_assert (GET_MODE (op1) == mode);
5201 gcc_assert (VECTOR_MODE_P (mode));
5202 trueop2 = avoid_constant_pool_reference (op2);
5203 if (CONST_INT_P (trueop2))
5205 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5206 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5207 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5208 unsigned HOST_WIDE_INT mask;
5209 if (n_elts == HOST_BITS_PER_WIDE_INT)
5210 mask = -1;
5211 else
5212 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5214 if (!(sel & mask) && !side_effects_p (op0))
5215 return op1;
5216 if ((sel & mask) == mask && !side_effects_p (op1))
5217 return op0;
5219 rtx trueop0 = avoid_constant_pool_reference (op0);
5220 rtx trueop1 = avoid_constant_pool_reference (op1);
5221 if (GET_CODE (trueop0) == CONST_VECTOR
5222 && GET_CODE (trueop1) == CONST_VECTOR)
5224 rtvec v = rtvec_alloc (n_elts);
5225 unsigned int i;
5227 for (i = 0; i < n_elts; i++)
5228 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5229 ? CONST_VECTOR_ELT (trueop0, i)
5230 : CONST_VECTOR_ELT (trueop1, i));
5231 return gen_rtx_CONST_VECTOR (mode, v);
5234 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5235 if no element from a appears in the result. */
5236 if (GET_CODE (op0) == VEC_MERGE)
5238 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5239 if (CONST_INT_P (tem))
5241 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5242 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5243 return simplify_gen_ternary (code, mode, mode,
5244 XEXP (op0, 1), op1, op2);
5245 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5246 return simplify_gen_ternary (code, mode, mode,
5247 XEXP (op0, 0), op1, op2);
5250 if (GET_CODE (op1) == VEC_MERGE)
5252 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5253 if (CONST_INT_P (tem))
5255 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5256 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5257 return simplify_gen_ternary (code, mode, mode,
5258 op0, XEXP (op1, 1), op2);
5259 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5260 return simplify_gen_ternary (code, mode, mode,
5261 op0, XEXP (op1, 0), op2);
5265 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5266 with a. */
5267 if (GET_CODE (op0) == VEC_DUPLICATE
5268 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5269 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5270 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5272 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5273 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5275 if (XEXP (XEXP (op0, 0), 0) == op1
5276 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5277 return op1;
5282 if (rtx_equal_p (op0, op1)
5283 && !side_effects_p (op2) && !side_effects_p (op1))
5284 return op0;
5286 break;
5288 default:
5289 gcc_unreachable ();
5292 return 0;
5295 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5296 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5297 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5299 Works by unpacking OP into a collection of 8-bit values
5300 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5301 and then repacking them again for OUTERMODE. */
5303 static rtx
5304 simplify_immed_subreg (machine_mode outermode, rtx op,
5305 machine_mode innermode, unsigned int byte)
5307 enum {
5308 value_bit = 8,
5309 value_mask = (1 << value_bit) - 1
5311 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5312 int value_start;
5313 int i;
5314 int elem;
5316 int num_elem;
5317 rtx * elems;
5318 int elem_bitsize;
5319 rtx result_s;
5320 rtvec result_v = NULL;
5321 enum mode_class outer_class;
5322 machine_mode outer_submode;
5323 int max_bitsize;
5325 /* Some ports misuse CCmode. */
5326 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5327 return op;
5329 /* We have no way to represent a complex constant at the rtl level. */
5330 if (COMPLEX_MODE_P (outermode))
5331 return NULL_RTX;
5333 /* We support any size mode. */
5334 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5335 GET_MODE_BITSIZE (innermode));
5337 /* Unpack the value. */
5339 if (GET_CODE (op) == CONST_VECTOR)
5341 num_elem = CONST_VECTOR_NUNITS (op);
5342 elems = &CONST_VECTOR_ELT (op, 0);
5343 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5345 else
5347 num_elem = 1;
5348 elems = &op;
5349 elem_bitsize = max_bitsize;
5351 /* If this asserts, it is too complicated; reducing value_bit may help. */
5352 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5353 /* I don't know how to handle endianness of sub-units. */
5354 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5356 for (elem = 0; elem < num_elem; elem++)
5358 unsigned char * vp;
5359 rtx el = elems[elem];
5361 /* Vectors are kept in target memory order. (This is probably
5362 a mistake.) */
5364 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5365 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5366 / BITS_PER_UNIT);
5367 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5368 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5369 unsigned bytele = (subword_byte % UNITS_PER_WORD
5370 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5371 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5374 switch (GET_CODE (el))
5376 case CONST_INT:
5377 for (i = 0;
5378 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5379 i += value_bit)
5380 *vp++ = INTVAL (el) >> i;
5381 /* CONST_INTs are always logically sign-extended. */
5382 for (; i < elem_bitsize; i += value_bit)
5383 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5384 break;
5386 case CONST_WIDE_INT:
5388 rtx_mode_t val = std::make_pair (el, innermode);
5389 unsigned char extend = wi::sign_mask (val);
5391 for (i = 0; i < elem_bitsize; i += value_bit)
5392 *vp++ = wi::extract_uhwi (val, i, value_bit);
5393 for (; i < elem_bitsize; i += value_bit)
5394 *vp++ = extend;
5396 break;
5398 case CONST_DOUBLE:
5399 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5401 unsigned char extend = 0;
5402 /* If this triggers, someone should have generated a
5403 CONST_INT instead. */
5404 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5406 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5407 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5408 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5410 *vp++
5411 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5412 i += value_bit;
5415 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5416 extend = -1;
5417 for (; i < elem_bitsize; i += value_bit)
5418 *vp++ = extend;
5420 else
5422 /* This is big enough for anything on the platform. */
5423 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5424 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5426 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5427 gcc_assert (bitsize <= elem_bitsize);
5428 gcc_assert (bitsize % value_bit == 0);
5430 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5431 GET_MODE (el));
5433 /* real_to_target produces its result in words affected by
5434 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5435 and use WORDS_BIG_ENDIAN instead; see the documentation
5436 of SUBREG in rtl.texi. */
5437 for (i = 0; i < bitsize; i += value_bit)
5439 int ibase;
5440 if (WORDS_BIG_ENDIAN)
5441 ibase = bitsize - 1 - i;
5442 else
5443 ibase = i;
5444 *vp++ = tmp[ibase / 32] >> i % 32;
5447 /* It shouldn't matter what's done here, so fill it with
5448 zero. */
5449 for (; i < elem_bitsize; i += value_bit)
5450 *vp++ = 0;
5452 break;
5454 case CONST_FIXED:
5455 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5457 for (i = 0; i < elem_bitsize; i += value_bit)
5458 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5460 else
5462 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5463 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5464 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5465 i += value_bit)
5466 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5467 >> (i - HOST_BITS_PER_WIDE_INT);
5468 for (; i < elem_bitsize; i += value_bit)
5469 *vp++ = 0;
5471 break;
5473 default:
5474 gcc_unreachable ();
5478 /* Now, pick the right byte to start with. */
5479 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5480 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5481 will already have offset 0. */
5482 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5484 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5485 - byte);
5486 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5487 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5488 byte = (subword_byte % UNITS_PER_WORD
5489 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5492 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5493 so if it's become negative it will instead be very large.) */
5494 gcc_assert (byte < GET_MODE_SIZE (innermode));
5496 /* Convert from bytes to chunks of size value_bit. */
5497 value_start = byte * (BITS_PER_UNIT / value_bit);
5499 /* Re-pack the value. */
5501 if (VECTOR_MODE_P (outermode))
5503 num_elem = GET_MODE_NUNITS (outermode);
5504 result_v = rtvec_alloc (num_elem);
5505 elems = &RTVEC_ELT (result_v, 0);
5506 outer_submode = GET_MODE_INNER (outermode);
5508 else
5510 num_elem = 1;
5511 elems = &result_s;
5512 outer_submode = outermode;
5515 outer_class = GET_MODE_CLASS (outer_submode);
5516 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5518 gcc_assert (elem_bitsize % value_bit == 0);
5519 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5521 for (elem = 0; elem < num_elem; elem++)
5523 unsigned char *vp;
5525 /* Vectors are stored in target memory order. (This is probably
5526 a mistake.) */
5528 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5529 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5530 / BITS_PER_UNIT);
5531 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5532 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5533 unsigned bytele = (subword_byte % UNITS_PER_WORD
5534 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5535 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5538 switch (outer_class)
5540 case MODE_INT:
5541 case MODE_PARTIAL_INT:
5543 int u;
5544 int base = 0;
5545 int units
5546 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5547 / HOST_BITS_PER_WIDE_INT;
5548 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5549 wide_int r;
5551 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5552 return NULL_RTX;
5553 for (u = 0; u < units; u++)
5555 unsigned HOST_WIDE_INT buf = 0;
5556 for (i = 0;
5557 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5558 i += value_bit)
5559 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5561 tmp[u] = buf;
5562 base += HOST_BITS_PER_WIDE_INT;
5564 r = wide_int::from_array (tmp, units,
5565 GET_MODE_PRECISION (outer_submode));
5566 #if TARGET_SUPPORTS_WIDE_INT == 0
5567 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5568 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5569 return NULL_RTX;
5570 #endif
5571 elems[elem] = immed_wide_int_const (r, outer_submode);
5573 break;
5575 case MODE_FLOAT:
5576 case MODE_DECIMAL_FLOAT:
5578 REAL_VALUE_TYPE r;
5579 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5581 /* real_from_target wants its input in words affected by
5582 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5583 and use WORDS_BIG_ENDIAN instead; see the documentation
5584 of SUBREG in rtl.texi. */
5585 for (i = 0; i < max_bitsize / 32; i++)
5586 tmp[i] = 0;
5587 for (i = 0; i < elem_bitsize; i += value_bit)
5589 int ibase;
5590 if (WORDS_BIG_ENDIAN)
5591 ibase = elem_bitsize - 1 - i;
5592 else
5593 ibase = i;
5594 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5597 real_from_target (&r, tmp, outer_submode);
5598 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5600 break;
5602 case MODE_FRACT:
5603 case MODE_UFRACT:
5604 case MODE_ACCUM:
5605 case MODE_UACCUM:
5607 FIXED_VALUE_TYPE f;
5608 f.data.low = 0;
5609 f.data.high = 0;
5610 f.mode = outer_submode;
5612 for (i = 0;
5613 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5614 i += value_bit)
5615 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5616 for (; i < elem_bitsize; i += value_bit)
5617 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5618 << (i - HOST_BITS_PER_WIDE_INT));
5620 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5622 break;
5624 default:
5625 gcc_unreachable ();
5628 if (VECTOR_MODE_P (outermode))
5629 return gen_rtx_CONST_VECTOR (outermode, result_v);
5630 else
5631 return result_s;
5634 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5635 Return 0 if no simplifications are possible. */
5637 simplify_subreg (machine_mode outermode, rtx op,
5638 machine_mode innermode, unsigned int byte)
5640 /* Little bit of sanity checking. */
5641 gcc_assert (innermode != VOIDmode);
5642 gcc_assert (outermode != VOIDmode);
5643 gcc_assert (innermode != BLKmode);
5644 gcc_assert (outermode != BLKmode);
5646 gcc_assert (GET_MODE (op) == innermode
5647 || GET_MODE (op) == VOIDmode);
5649 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5650 return NULL_RTX;
5652 if (byte >= GET_MODE_SIZE (innermode))
5653 return NULL_RTX;
5655 if (outermode == innermode && !byte)
5656 return op;
5658 if (CONST_SCALAR_INT_P (op)
5659 || CONST_DOUBLE_AS_FLOAT_P (op)
5660 || GET_CODE (op) == CONST_FIXED
5661 || GET_CODE (op) == CONST_VECTOR)
5662 return simplify_immed_subreg (outermode, op, innermode, byte);
5664 /* Changing mode twice with SUBREG => just change it once,
5665 or not at all if changing back op starting mode. */
5666 if (GET_CODE (op) == SUBREG)
5668 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5669 int final_offset = byte + SUBREG_BYTE (op);
5670 rtx newx;
5672 if (outermode == innermostmode
5673 && byte == 0 && SUBREG_BYTE (op) == 0)
5674 return SUBREG_REG (op);
5676 /* The SUBREG_BYTE represents offset, as if the value were stored
5677 in memory. Irritating exception is paradoxical subreg, where
5678 we define SUBREG_BYTE to be 0. On big endian machines, this
5679 value should be negative. For a moment, undo this exception. */
5680 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5682 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5683 if (WORDS_BIG_ENDIAN)
5684 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5685 if (BYTES_BIG_ENDIAN)
5686 final_offset += difference % UNITS_PER_WORD;
5688 if (SUBREG_BYTE (op) == 0
5689 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5691 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5692 if (WORDS_BIG_ENDIAN)
5693 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5694 if (BYTES_BIG_ENDIAN)
5695 final_offset += difference % UNITS_PER_WORD;
5698 /* See whether resulting subreg will be paradoxical. */
5699 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5701 /* In nonparadoxical subregs we can't handle negative offsets. */
5702 if (final_offset < 0)
5703 return NULL_RTX;
5704 /* Bail out in case resulting subreg would be incorrect. */
5705 if (final_offset % GET_MODE_SIZE (outermode)
5706 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5707 return NULL_RTX;
5709 else
5711 int offset = 0;
5712 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5714 /* In paradoxical subreg, see if we are still looking on lower part.
5715 If so, our SUBREG_BYTE will be 0. */
5716 if (WORDS_BIG_ENDIAN)
5717 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5718 if (BYTES_BIG_ENDIAN)
5719 offset += difference % UNITS_PER_WORD;
5720 if (offset == final_offset)
5721 final_offset = 0;
5722 else
5723 return NULL_RTX;
5726 /* Recurse for further possible simplifications. */
5727 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5728 final_offset);
5729 if (newx)
5730 return newx;
5731 if (validate_subreg (outermode, innermostmode,
5732 SUBREG_REG (op), final_offset))
5734 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5735 if (SUBREG_PROMOTED_VAR_P (op)
5736 && SUBREG_PROMOTED_SIGN (op) >= 0
5737 && GET_MODE_CLASS (outermode) == MODE_INT
5738 && IN_RANGE (GET_MODE_SIZE (outermode),
5739 GET_MODE_SIZE (innermode),
5740 GET_MODE_SIZE (innermostmode))
5741 && subreg_lowpart_p (newx))
5743 SUBREG_PROMOTED_VAR_P (newx) = 1;
5744 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5746 return newx;
5748 return NULL_RTX;
5751 /* SUBREG of a hard register => just change the register number
5752 and/or mode. If the hard register is not valid in that mode,
5753 suppress this simplification. If the hard register is the stack,
5754 frame, or argument pointer, leave this as a SUBREG. */
5756 if (REG_P (op) && HARD_REGISTER_P (op))
5758 unsigned int regno, final_regno;
5760 regno = REGNO (op);
5761 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5762 if (HARD_REGISTER_NUM_P (final_regno))
5764 rtx x;
5765 int final_offset = byte;
5767 /* Adjust offset for paradoxical subregs. */
5768 if (byte == 0
5769 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5771 int difference = (GET_MODE_SIZE (innermode)
5772 - GET_MODE_SIZE (outermode));
5773 if (WORDS_BIG_ENDIAN)
5774 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5775 if (BYTES_BIG_ENDIAN)
5776 final_offset += difference % UNITS_PER_WORD;
5779 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5781 /* Propagate original regno. We don't have any way to specify
5782 the offset inside original regno, so do so only for lowpart.
5783 The information is used only by alias analysis that can not
5784 grog partial register anyway. */
5786 if (subreg_lowpart_offset (outermode, innermode) == byte)
5787 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5788 return x;
5792 /* If we have a SUBREG of a register that we are replacing and we are
5793 replacing it with a MEM, make a new MEM and try replacing the
5794 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5795 or if we would be widening it. */
5797 if (MEM_P (op)
5798 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5799 /* Allow splitting of volatile memory references in case we don't
5800 have instruction to move the whole thing. */
5801 && (! MEM_VOLATILE_P (op)
5802 || ! have_insn_for (SET, innermode))
5803 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5804 return adjust_address_nv (op, outermode, byte);
5806 /* Handle complex values represented as CONCAT
5807 of real and imaginary part. */
5808 if (GET_CODE (op) == CONCAT)
5810 unsigned int part_size, final_offset;
5811 rtx part, res;
5813 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5814 if (byte < part_size)
5816 part = XEXP (op, 0);
5817 final_offset = byte;
5819 else
5821 part = XEXP (op, 1);
5822 final_offset = byte - part_size;
5825 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5826 return NULL_RTX;
5828 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5829 if (res)
5830 return res;
5831 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5832 return gen_rtx_SUBREG (outermode, part, final_offset);
5833 return NULL_RTX;
5836 /* A SUBREG resulting from a zero extension may fold to zero if
5837 it extracts higher bits that the ZERO_EXTEND's source bits. */
5838 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5840 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5841 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5842 return CONST0_RTX (outermode);
5845 if (SCALAR_INT_MODE_P (outermode)
5846 && SCALAR_INT_MODE_P (innermode)
5847 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5848 && byte == subreg_lowpart_offset (outermode, innermode))
5850 rtx tem = simplify_truncation (outermode, op, innermode);
5851 if (tem)
5852 return tem;
5855 return NULL_RTX;
5858 /* Make a SUBREG operation or equivalent if it folds. */
5861 simplify_gen_subreg (machine_mode outermode, rtx op,
5862 machine_mode innermode, unsigned int byte)
5864 rtx newx;
5866 newx = simplify_subreg (outermode, op, innermode, byte);
5867 if (newx)
5868 return newx;
5870 if (GET_CODE (op) == SUBREG
5871 || GET_CODE (op) == CONCAT
5872 || GET_MODE (op) == VOIDmode)
5873 return NULL_RTX;
5875 if (validate_subreg (outermode, innermode, op, byte))
5876 return gen_rtx_SUBREG (outermode, op, byte);
5878 return NULL_RTX;
5881 /* Simplify X, an rtx expression.
5883 Return the simplified expression or NULL if no simplifications
5884 were possible.
5886 This is the preferred entry point into the simplification routines;
5887 however, we still allow passes to call the more specific routines.
5889 Right now GCC has three (yes, three) major bodies of RTL simplification
5890 code that need to be unified.
5892 1. fold_rtx in cse.c. This code uses various CSE specific
5893 information to aid in RTL simplification.
5895 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5896 it uses combine specific information to aid in RTL
5897 simplification.
5899 3. The routines in this file.
5902 Long term we want to only have one body of simplification code; to
5903 get to that state I recommend the following steps:
5905 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5906 which are not pass dependent state into these routines.
5908 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5909 use this routine whenever possible.
5911 3. Allow for pass dependent state to be provided to these
5912 routines and add simplifications based on the pass dependent
5913 state. Remove code from cse.c & combine.c that becomes
5914 redundant/dead.
5916 It will take time, but ultimately the compiler will be easier to
5917 maintain and improve. It's totally silly that when we add a
5918 simplification that it needs to be added to 4 places (3 for RTL
5919 simplification and 1 for tree simplification. */
5922 simplify_rtx (const_rtx x)
5924 const enum rtx_code code = GET_CODE (x);
5925 const machine_mode mode = GET_MODE (x);
5927 switch (GET_RTX_CLASS (code))
5929 case RTX_UNARY:
5930 return simplify_unary_operation (code, mode,
5931 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5932 case RTX_COMM_ARITH:
5933 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5934 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5936 /* Fall through.... */
5938 case RTX_BIN_ARITH:
5939 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5941 case RTX_TERNARY:
5942 case RTX_BITFIELD_OPS:
5943 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5944 XEXP (x, 0), XEXP (x, 1),
5945 XEXP (x, 2));
5947 case RTX_COMPARE:
5948 case RTX_COMM_COMPARE:
5949 return simplify_relational_operation (code, mode,
5950 ((GET_MODE (XEXP (x, 0))
5951 != VOIDmode)
5952 ? GET_MODE (XEXP (x, 0))
5953 : GET_MODE (XEXP (x, 1))),
5954 XEXP (x, 0),
5955 XEXP (x, 1));
5957 case RTX_EXTRA:
5958 if (code == SUBREG)
5959 return simplify_subreg (mode, SUBREG_REG (x),
5960 GET_MODE (SUBREG_REG (x)),
5961 SUBREG_BYTE (x));
5962 break;
5964 case RTX_OBJ:
5965 if (code == LO_SUM)
5967 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5968 if (GET_CODE (XEXP (x, 0)) == HIGH
5969 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5970 return XEXP (x, 1);
5972 break;
5974 default:
5975 break;
5977 return NULL;