2013-01-12 Janus Weil <janus@gcc.gnu.org>
[official-gcc.git] / gcc / simplify-rtx.c
blobd728bbc3b7d0ee49e86be161ed8dd6433272a779
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "recog.h"
33 #include "function.h"
34 #include "expr.h"
35 #include "diagnostic-core.h"
36 #include "ggc.h"
37 #include "target.h"
39 /* Simplification and canonicalization of RTL. */
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
44 signed wide int. */
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
48 static rtx neg_const_int (enum machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
51 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
52 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
53 unsigned int);
54 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
55 rtx, rtx);
56 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
57 enum machine_mode, rtx, rtx);
58 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
59 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
60 rtx, rtx, rtx, rtx);
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
64 static rtx
65 neg_const_int (enum machine_mode mode, const_rtx i)
67 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
70 /* Test whether expression, X, is an immediate constant that represents
71 the most significant bit of machine mode MODE. */
73 bool
74 mode_signbit_p (enum machine_mode mode, const_rtx x)
76 unsigned HOST_WIDE_INT val;
77 unsigned int width;
79 if (GET_MODE_CLASS (mode) != MODE_INT)
80 return false;
82 width = GET_MODE_PRECISION (mode);
83 if (width == 0)
84 return false;
86 if (width <= HOST_BITS_PER_WIDE_INT
87 && CONST_INT_P (x))
88 val = INTVAL (x);
89 else if (width <= HOST_BITS_PER_DOUBLE_INT
90 && CONST_DOUBLE_AS_INT_P (x)
91 && CONST_DOUBLE_LOW (x) == 0)
93 val = CONST_DOUBLE_HIGH (x);
94 width -= HOST_BITS_PER_WIDE_INT;
96 else
97 /* FIXME: We don't yet have a representation for wider modes. */
98 return false;
100 if (width < HOST_BITS_PER_WIDE_INT)
101 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
102 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
105 /* Test whether VAL is equal to the most significant bit of mode MODE
106 (after masking with the mode mask of MODE). Returns false if the
107 precision of MODE is too large to handle. */
109 bool
110 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
112 unsigned int width;
114 if (GET_MODE_CLASS (mode) != MODE_INT)
115 return false;
117 width = GET_MODE_PRECISION (mode);
118 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
119 return false;
121 val &= GET_MODE_MASK (mode);
122 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
125 /* Test whether the most significant bit of mode MODE is set in VAL.
126 Returns false if the precision of MODE is too large to handle. */
127 bool
128 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
130 unsigned int width;
132 if (GET_MODE_CLASS (mode) != MODE_INT)
133 return false;
135 width = GET_MODE_PRECISION (mode);
136 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
137 return false;
139 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
140 return val != 0;
143 /* Test whether the most significant bit of mode MODE is clear in VAL.
144 Returns false if the precision of MODE is too large to handle. */
145 bool
146 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
148 unsigned int width;
150 if (GET_MODE_CLASS (mode) != MODE_INT)
151 return false;
153 width = GET_MODE_PRECISION (mode);
154 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
155 return false;
157 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
158 return val == 0;
161 /* Make a binary operation by properly ordering the operands and
162 seeing if the expression folds. */
165 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
166 rtx op1)
168 rtx tem;
170 /* If this simplifies, do it. */
171 tem = simplify_binary_operation (code, mode, op0, op1);
172 if (tem)
173 return tem;
175 /* Put complex operands first and constants second if commutative. */
176 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
177 && swap_commutative_operands_p (op0, op1))
178 tem = op0, op0 = op1, op1 = tem;
180 return gen_rtx_fmt_ee (code, mode, op0, op1);
183 /* If X is a MEM referencing the constant pool, return the real value.
184 Otherwise return X. */
186 avoid_constant_pool_reference (rtx x)
188 rtx c, tmp, addr;
189 enum machine_mode cmode;
190 HOST_WIDE_INT offset = 0;
192 switch (GET_CODE (x))
194 case MEM:
195 break;
197 case FLOAT_EXTEND:
198 /* Handle float extensions of constant pool references. */
199 tmp = XEXP (x, 0);
200 c = avoid_constant_pool_reference (tmp);
201 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
203 REAL_VALUE_TYPE d;
205 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
206 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
208 return x;
210 default:
211 return x;
214 if (GET_MODE (x) == BLKmode)
215 return x;
217 addr = XEXP (x, 0);
219 /* Call target hook to avoid the effects of -fpic etc.... */
220 addr = targetm.delegitimize_address (addr);
222 /* Split the address into a base and integer offset. */
223 if (GET_CODE (addr) == CONST
224 && GET_CODE (XEXP (addr, 0)) == PLUS
225 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
227 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
228 addr = XEXP (XEXP (addr, 0), 0);
231 if (GET_CODE (addr) == LO_SUM)
232 addr = XEXP (addr, 1);
234 /* If this is a constant pool reference, we can turn it into its
235 constant and hope that simplifications happen. */
236 if (GET_CODE (addr) == SYMBOL_REF
237 && CONSTANT_POOL_ADDRESS_P (addr))
239 c = get_pool_constant (addr);
240 cmode = get_pool_mode (addr);
242 /* If we're accessing the constant in a different mode than it was
243 originally stored, attempt to fix that up via subreg simplifications.
244 If that fails we have no choice but to return the original memory. */
245 if (offset != 0 || cmode != GET_MODE (x))
247 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
248 if (tem && CONSTANT_P (tem))
249 return tem;
251 else
252 return c;
255 return x;
258 /* Simplify a MEM based on its attributes. This is the default
259 delegitimize_address target hook, and it's recommended that every
260 overrider call it. */
263 delegitimize_mem_from_attrs (rtx x)
265 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
266 use their base addresses as equivalent. */
267 if (MEM_P (x)
268 && MEM_EXPR (x)
269 && MEM_OFFSET_KNOWN_P (x))
271 tree decl = MEM_EXPR (x);
272 enum machine_mode mode = GET_MODE (x);
273 HOST_WIDE_INT offset = 0;
275 switch (TREE_CODE (decl))
277 default:
278 decl = NULL;
279 break;
281 case VAR_DECL:
282 break;
284 case ARRAY_REF:
285 case ARRAY_RANGE_REF:
286 case COMPONENT_REF:
287 case BIT_FIELD_REF:
288 case REALPART_EXPR:
289 case IMAGPART_EXPR:
290 case VIEW_CONVERT_EXPR:
292 HOST_WIDE_INT bitsize, bitpos;
293 tree toffset;
294 int unsignedp, volatilep = 0;
296 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
297 &mode, &unsignedp, &volatilep, false);
298 if (bitsize != GET_MODE_BITSIZE (mode)
299 || (bitpos % BITS_PER_UNIT)
300 || (toffset && !host_integerp (toffset, 0)))
301 decl = NULL;
302 else
304 offset += bitpos / BITS_PER_UNIT;
305 if (toffset)
306 offset += TREE_INT_CST_LOW (toffset);
308 break;
312 if (decl
313 && mode == GET_MODE (x)
314 && TREE_CODE (decl) == VAR_DECL
315 && (TREE_STATIC (decl)
316 || DECL_THREAD_LOCAL_P (decl))
317 && DECL_RTL_SET_P (decl)
318 && MEM_P (DECL_RTL (decl)))
320 rtx newx;
322 offset += MEM_OFFSET (x);
324 newx = DECL_RTL (decl);
326 if (MEM_P (newx))
328 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
330 /* Avoid creating a new MEM needlessly if we already had
331 the same address. We do if there's no OFFSET and the
332 old address X is identical to NEWX, or if X is of the
333 form (plus NEWX OFFSET), or the NEWX is of the form
334 (plus Y (const_int Z)) and X is that with the offset
335 added: (plus Y (const_int Z+OFFSET)). */
336 if (!((offset == 0
337 || (GET_CODE (o) == PLUS
338 && GET_CODE (XEXP (o, 1)) == CONST_INT
339 && (offset == INTVAL (XEXP (o, 1))
340 || (GET_CODE (n) == PLUS
341 && GET_CODE (XEXP (n, 1)) == CONST_INT
342 && (INTVAL (XEXP (n, 1)) + offset
343 == INTVAL (XEXP (o, 1)))
344 && (n = XEXP (n, 0))))
345 && (o = XEXP (o, 0))))
346 && rtx_equal_p (o, n)))
347 x = adjust_address_nv (newx, mode, offset);
349 else if (GET_MODE (x) == GET_MODE (newx)
350 && offset == 0)
351 x = newx;
355 return x;
358 /* Make a unary operation by first seeing if it folds and otherwise making
359 the specified operation. */
362 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
363 enum machine_mode op_mode)
365 rtx tem;
367 /* If this simplifies, use it. */
368 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
369 return tem;
371 return gen_rtx_fmt_e (code, mode, op);
374 /* Likewise for ternary operations. */
377 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
378 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
380 rtx tem;
382 /* If this simplifies, use it. */
383 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
384 op0, op1, op2)))
385 return tem;
387 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
390 /* Likewise, for relational operations.
391 CMP_MODE specifies mode comparison is done in. */
394 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
395 enum machine_mode cmp_mode, rtx op0, rtx op1)
397 rtx tem;
399 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
400 op0, op1)))
401 return tem;
403 return gen_rtx_fmt_ee (code, mode, op0, op1);
406 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
407 and simplify the result. If FN is non-NULL, call this callback on each
408 X, if it returns non-NULL, replace X with its return value and simplify the
409 result. */
412 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
413 rtx (*fn) (rtx, const_rtx, void *), void *data)
415 enum rtx_code code = GET_CODE (x);
416 enum machine_mode mode = GET_MODE (x);
417 enum machine_mode op_mode;
418 const char *fmt;
419 rtx op0, op1, op2, newx, op;
420 rtvec vec, newvec;
421 int i, j;
423 if (__builtin_expect (fn != NULL, 0))
425 newx = fn (x, old_rtx, data);
426 if (newx)
427 return newx;
429 else if (rtx_equal_p (x, old_rtx))
430 return copy_rtx ((rtx) data);
432 switch (GET_RTX_CLASS (code))
434 case RTX_UNARY:
435 op0 = XEXP (x, 0);
436 op_mode = GET_MODE (op0);
437 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
438 if (op0 == XEXP (x, 0))
439 return x;
440 return simplify_gen_unary (code, mode, op0, op_mode);
442 case RTX_BIN_ARITH:
443 case RTX_COMM_ARITH:
444 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
445 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
446 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
447 return x;
448 return simplify_gen_binary (code, mode, op0, op1);
450 case RTX_COMPARE:
451 case RTX_COMM_COMPARE:
452 op0 = XEXP (x, 0);
453 op1 = XEXP (x, 1);
454 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
455 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
456 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
457 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
458 return x;
459 return simplify_gen_relational (code, mode, op_mode, op0, op1);
461 case RTX_TERNARY:
462 case RTX_BITFIELD_OPS:
463 op0 = XEXP (x, 0);
464 op_mode = GET_MODE (op0);
465 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
466 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
467 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
468 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
469 return x;
470 if (op_mode == VOIDmode)
471 op_mode = GET_MODE (op0);
472 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
474 case RTX_EXTRA:
475 if (code == SUBREG)
477 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
478 if (op0 == SUBREG_REG (x))
479 return x;
480 op0 = simplify_gen_subreg (GET_MODE (x), op0,
481 GET_MODE (SUBREG_REG (x)),
482 SUBREG_BYTE (x));
483 return op0 ? op0 : x;
485 break;
487 case RTX_OBJ:
488 if (code == MEM)
490 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
491 if (op0 == XEXP (x, 0))
492 return x;
493 return replace_equiv_address_nv (x, op0);
495 else if (code == LO_SUM)
497 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
498 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
500 /* (lo_sum (high x) x) -> x */
501 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
502 return op1;
504 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
505 return x;
506 return gen_rtx_LO_SUM (mode, op0, op1);
508 break;
510 default:
511 break;
514 newx = x;
515 fmt = GET_RTX_FORMAT (code);
516 for (i = 0; fmt[i]; i++)
517 switch (fmt[i])
519 case 'E':
520 vec = XVEC (x, i);
521 newvec = XVEC (newx, i);
522 for (j = 0; j < GET_NUM_ELEM (vec); j++)
524 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
525 old_rtx, fn, data);
526 if (op != RTVEC_ELT (vec, j))
528 if (newvec == vec)
530 newvec = shallow_copy_rtvec (vec);
531 if (x == newx)
532 newx = shallow_copy_rtx (x);
533 XVEC (newx, i) = newvec;
535 RTVEC_ELT (newvec, j) = op;
538 break;
540 case 'e':
541 if (XEXP (x, i))
543 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
544 if (op != XEXP (x, i))
546 if (x == newx)
547 newx = shallow_copy_rtx (x);
548 XEXP (newx, i) = op;
551 break;
553 return newx;
556 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
557 resulting RTX. Return a new RTX which is as simplified as possible. */
560 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
562 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
565 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
566 Only handle cases where the truncated value is inherently an rvalue.
568 RTL provides two ways of truncating a value:
570 1. a lowpart subreg. This form is only a truncation when both
571 the outer and inner modes (here MODE and OP_MODE respectively)
572 are scalar integers, and only then when the subreg is used as
573 an rvalue.
575 It is only valid to form such truncating subregs if the
576 truncation requires no action by the target. The onus for
577 proving this is on the creator of the subreg -- e.g. the
578 caller to simplify_subreg or simplify_gen_subreg -- and typically
579 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
581 2. a TRUNCATE. This form handles both scalar and compound integers.
583 The first form is preferred where valid. However, the TRUNCATE
584 handling in simplify_unary_operation turns the second form into the
585 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
586 so it is generally safe to form rvalue truncations using:
588 simplify_gen_unary (TRUNCATE, ...)
590 and leave simplify_unary_operation to work out which representation
591 should be used.
593 Because of the proof requirements on (1), simplify_truncation must
594 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
595 regardless of whether the outer truncation came from a SUBREG or a
596 TRUNCATE. For example, if the caller has proven that an SImode
597 truncation of:
599 (and:DI X Y)
601 is a no-op and can be represented as a subreg, it does not follow
602 that SImode truncations of X and Y are also no-ops. On a target
603 like 64-bit MIPS that requires SImode values to be stored in
604 sign-extended form, an SImode truncation of:
606 (and:DI (reg:DI X) (const_int 63))
608 is trivially a no-op because only the lower 6 bits can be set.
609 However, X is still an arbitrary 64-bit number and so we cannot
610 assume that truncating it too is a no-op. */
612 static rtx
613 simplify_truncation (enum machine_mode mode, rtx op,
614 enum machine_mode op_mode)
616 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
617 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
618 gcc_assert (precision <= op_precision);
620 /* Optimize truncations of zero and sign extended values. */
621 if (GET_CODE (op) == ZERO_EXTEND
622 || GET_CODE (op) == SIGN_EXTEND)
624 /* There are three possibilities. If MODE is the same as the
625 origmode, we can omit both the extension and the subreg.
626 If MODE is not larger than the origmode, we can apply the
627 truncation without the extension. Finally, if the outermode
628 is larger than the origmode, we can just extend to the appropriate
629 mode. */
630 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
631 if (mode == origmode)
632 return XEXP (op, 0);
633 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
634 return simplify_gen_unary (TRUNCATE, mode,
635 XEXP (op, 0), origmode);
636 else
637 return simplify_gen_unary (GET_CODE (op), mode,
638 XEXP (op, 0), origmode);
641 /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
642 to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
643 if (GET_CODE (op) == PLUS
644 || GET_CODE (op) == MINUS
645 || GET_CODE (op) == MULT)
647 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
648 if (op0)
650 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
651 if (op1)
652 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
656 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
657 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
658 the outer subreg is effectively a truncation to the original mode. */
659 if ((GET_CODE (op) == LSHIFTRT
660 || GET_CODE (op) == ASHIFTRT)
661 /* Ensure that OP_MODE is at least twice as wide as MODE
662 to avoid the possibility that an outer LSHIFTRT shifts by more
663 than the sign extension's sign_bit_copies and introduces zeros
664 into the high bits of the result. */
665 && 2 * precision <= op_precision
666 && CONST_INT_P (XEXP (op, 1))
667 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
668 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
669 && UINTVAL (XEXP (op, 1)) < precision)
670 return simplify_gen_binary (ASHIFTRT, mode,
671 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
673 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
674 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
675 the outer subreg is effectively a truncation to the original mode. */
676 if ((GET_CODE (op) == LSHIFTRT
677 || GET_CODE (op) == ASHIFTRT)
678 && CONST_INT_P (XEXP (op, 1))
679 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
680 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
681 && UINTVAL (XEXP (op, 1)) < precision)
682 return simplify_gen_binary (LSHIFTRT, mode,
683 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
685 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
686 to (ashift:QI (x:QI) C), where C is a suitable small constant and
687 the outer subreg is effectively a truncation to the original mode. */
688 if (GET_CODE (op) == ASHIFT
689 && CONST_INT_P (XEXP (op, 1))
690 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
691 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
692 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
693 && UINTVAL (XEXP (op, 1)) < precision)
694 return simplify_gen_binary (ASHIFT, mode,
695 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
697 /* Recognize a word extraction from a multi-word subreg. */
698 if ((GET_CODE (op) == LSHIFTRT
699 || GET_CODE (op) == ASHIFTRT)
700 && SCALAR_INT_MODE_P (mode)
701 && SCALAR_INT_MODE_P (op_mode)
702 && precision >= BITS_PER_WORD
703 && 2 * precision <= op_precision
704 && CONST_INT_P (XEXP (op, 1))
705 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
706 && UINTVAL (XEXP (op, 1)) < op_precision)
708 int byte = subreg_lowpart_offset (mode, op_mode);
709 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
710 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
711 (WORDS_BIG_ENDIAN
712 ? byte - shifted_bytes
713 : byte + shifted_bytes));
716 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
717 and try replacing the TRUNCATE and shift with it. Don't do this
718 if the MEM has a mode-dependent address. */
719 if ((GET_CODE (op) == LSHIFTRT
720 || GET_CODE (op) == ASHIFTRT)
721 && SCALAR_INT_MODE_P (op_mode)
722 && MEM_P (XEXP (op, 0))
723 && CONST_INT_P (XEXP (op, 1))
724 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
725 && INTVAL (XEXP (op, 1)) > 0
726 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
727 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
728 MEM_ADDR_SPACE (XEXP (op, 0)))
729 && ! MEM_VOLATILE_P (XEXP (op, 0))
730 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
731 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
733 int byte = subreg_lowpart_offset (mode, op_mode);
734 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
735 return adjust_address_nv (XEXP (op, 0), mode,
736 (WORDS_BIG_ENDIAN
737 ? byte - shifted_bytes
738 : byte + shifted_bytes));
741 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
742 (OP:SI foo:SI) if OP is NEG or ABS. */
743 if ((GET_CODE (op) == ABS
744 || GET_CODE (op) == NEG)
745 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
746 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
747 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
748 return simplify_gen_unary (GET_CODE (op), mode,
749 XEXP (XEXP (op, 0), 0), mode);
751 /* (truncate:A (subreg:B (truncate:C X) 0)) is
752 (truncate:A X). */
753 if (GET_CODE (op) == SUBREG
754 && SCALAR_INT_MODE_P (mode)
755 && SCALAR_INT_MODE_P (op_mode)
756 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
757 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
758 && subreg_lowpart_p (op))
759 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
760 GET_MODE (XEXP (SUBREG_REG (op), 0)));
762 /* (truncate:A (truncate:B X)) is (truncate:A X). */
763 if (GET_CODE (op) == TRUNCATE)
764 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
765 GET_MODE (XEXP (op, 0)));
767 return NULL_RTX;
770 /* Try to simplify a unary operation CODE whose output mode is to be
771 MODE with input operand OP whose mode was originally OP_MODE.
772 Return zero if no simplification can be made. */
774 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
775 rtx op, enum machine_mode op_mode)
777 rtx trueop, tem;
779 trueop = avoid_constant_pool_reference (op);
781 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
782 if (tem)
783 return tem;
785 return simplify_unary_operation_1 (code, mode, op);
788 /* Perform some simplifications we can do even if the operands
789 aren't constant. */
790 static rtx
791 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
793 enum rtx_code reversed;
794 rtx temp;
796 switch (code)
798 case NOT:
799 /* (not (not X)) == X. */
800 if (GET_CODE (op) == NOT)
801 return XEXP (op, 0);
803 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
804 comparison is all ones. */
805 if (COMPARISON_P (op)
806 && (mode == BImode || STORE_FLAG_VALUE == -1)
807 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
808 return simplify_gen_relational (reversed, mode, VOIDmode,
809 XEXP (op, 0), XEXP (op, 1));
811 /* (not (plus X -1)) can become (neg X). */
812 if (GET_CODE (op) == PLUS
813 && XEXP (op, 1) == constm1_rtx)
814 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
816 /* Similarly, (not (neg X)) is (plus X -1). */
817 if (GET_CODE (op) == NEG)
818 return plus_constant (mode, XEXP (op, 0), -1);
820 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
821 if (GET_CODE (op) == XOR
822 && CONST_INT_P (XEXP (op, 1))
823 && (temp = simplify_unary_operation (NOT, mode,
824 XEXP (op, 1), mode)) != 0)
825 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
827 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
828 if (GET_CODE (op) == PLUS
829 && CONST_INT_P (XEXP (op, 1))
830 && mode_signbit_p (mode, XEXP (op, 1))
831 && (temp = simplify_unary_operation (NOT, mode,
832 XEXP (op, 1), mode)) != 0)
833 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
836 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
837 operands other than 1, but that is not valid. We could do a
838 similar simplification for (not (lshiftrt C X)) where C is
839 just the sign bit, but this doesn't seem common enough to
840 bother with. */
841 if (GET_CODE (op) == ASHIFT
842 && XEXP (op, 0) == const1_rtx)
844 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
845 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
848 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
849 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
850 so we can perform the above simplification. */
852 if (STORE_FLAG_VALUE == -1
853 && GET_CODE (op) == ASHIFTRT
854 && GET_CODE (XEXP (op, 1))
855 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
856 return simplify_gen_relational (GE, mode, VOIDmode,
857 XEXP (op, 0), const0_rtx);
860 if (GET_CODE (op) == SUBREG
861 && subreg_lowpart_p (op)
862 && (GET_MODE_SIZE (GET_MODE (op))
863 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
864 && GET_CODE (SUBREG_REG (op)) == ASHIFT
865 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
867 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
868 rtx x;
870 x = gen_rtx_ROTATE (inner_mode,
871 simplify_gen_unary (NOT, inner_mode, const1_rtx,
872 inner_mode),
873 XEXP (SUBREG_REG (op), 1));
874 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
875 if (temp)
876 return temp;
879 /* Apply De Morgan's laws to reduce number of patterns for machines
880 with negating logical insns (and-not, nand, etc.). If result has
881 only one NOT, put it first, since that is how the patterns are
882 coded. */
884 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
886 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
887 enum machine_mode op_mode;
889 op_mode = GET_MODE (in1);
890 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
892 op_mode = GET_MODE (in2);
893 if (op_mode == VOIDmode)
894 op_mode = mode;
895 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
897 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
899 rtx tem = in2;
900 in2 = in1; in1 = tem;
903 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
904 mode, in1, in2);
906 break;
908 case NEG:
909 /* (neg (neg X)) == X. */
910 if (GET_CODE (op) == NEG)
911 return XEXP (op, 0);
913 /* (neg (plus X 1)) can become (not X). */
914 if (GET_CODE (op) == PLUS
915 && XEXP (op, 1) == const1_rtx)
916 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
918 /* Similarly, (neg (not X)) is (plus X 1). */
919 if (GET_CODE (op) == NOT)
920 return plus_constant (mode, XEXP (op, 0), 1);
922 /* (neg (minus X Y)) can become (minus Y X). This transformation
923 isn't safe for modes with signed zeros, since if X and Y are
924 both +0, (minus Y X) is the same as (minus X Y). If the
925 rounding mode is towards +infinity (or -infinity) then the two
926 expressions will be rounded differently. */
927 if (GET_CODE (op) == MINUS
928 && !HONOR_SIGNED_ZEROS (mode)
929 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
930 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
932 if (GET_CODE (op) == PLUS
933 && !HONOR_SIGNED_ZEROS (mode)
934 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
936 /* (neg (plus A C)) is simplified to (minus -C A). */
937 if (CONST_SCALAR_INT_P (XEXP (op, 1))
938 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
940 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
941 if (temp)
942 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
945 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
946 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
947 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
950 /* (neg (mult A B)) becomes (mult A (neg B)).
951 This works even for floating-point values. */
952 if (GET_CODE (op) == MULT
953 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
955 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
956 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
959 /* NEG commutes with ASHIFT since it is multiplication. Only do
960 this if we can then eliminate the NEG (e.g., if the operand
961 is a constant). */
962 if (GET_CODE (op) == ASHIFT)
964 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
965 if (temp)
966 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
969 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
970 C is equal to the width of MODE minus 1. */
971 if (GET_CODE (op) == ASHIFTRT
972 && CONST_INT_P (XEXP (op, 1))
973 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
974 return simplify_gen_binary (LSHIFTRT, mode,
975 XEXP (op, 0), XEXP (op, 1));
977 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
978 C is equal to the width of MODE minus 1. */
979 if (GET_CODE (op) == LSHIFTRT
980 && CONST_INT_P (XEXP (op, 1))
981 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
982 return simplify_gen_binary (ASHIFTRT, mode,
983 XEXP (op, 0), XEXP (op, 1));
985 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
986 if (GET_CODE (op) == XOR
987 && XEXP (op, 1) == const1_rtx
988 && nonzero_bits (XEXP (op, 0), mode) == 1)
989 return plus_constant (mode, XEXP (op, 0), -1);
991 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
992 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
993 if (GET_CODE (op) == LT
994 && XEXP (op, 1) == const0_rtx
995 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
997 enum machine_mode inner = GET_MODE (XEXP (op, 0));
998 int isize = GET_MODE_PRECISION (inner);
999 if (STORE_FLAG_VALUE == 1)
1001 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1002 GEN_INT (isize - 1));
1003 if (mode == inner)
1004 return temp;
1005 if (GET_MODE_PRECISION (mode) > isize)
1006 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1007 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1009 else if (STORE_FLAG_VALUE == -1)
1011 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1012 GEN_INT (isize - 1));
1013 if (mode == inner)
1014 return temp;
1015 if (GET_MODE_PRECISION (mode) > isize)
1016 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1017 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1020 break;
1022 case TRUNCATE:
1023 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1024 with the umulXi3_highpart patterns. */
1025 if (GET_CODE (op) == LSHIFTRT
1026 && GET_CODE (XEXP (op, 0)) == MULT)
1027 break;
1029 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1031 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1033 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1034 if (temp)
1035 return temp;
1037 /* We can't handle truncation to a partial integer mode here
1038 because we don't know the real bitsize of the partial
1039 integer mode. */
1040 break;
1043 if (GET_MODE (op) != VOIDmode)
1045 temp = simplify_truncation (mode, op, GET_MODE (op));
1046 if (temp)
1047 return temp;
1050 /* If we know that the value is already truncated, we can
1051 replace the TRUNCATE with a SUBREG. */
1052 if (GET_MODE_NUNITS (mode) == 1
1053 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1054 || truncated_to_mode (mode, op)))
1056 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1057 if (temp)
1058 return temp;
1061 /* A truncate of a comparison can be replaced with a subreg if
1062 STORE_FLAG_VALUE permits. This is like the previous test,
1063 but it works even if the comparison is done in a mode larger
1064 than HOST_BITS_PER_WIDE_INT. */
1065 if (HWI_COMPUTABLE_MODE_P (mode)
1066 && COMPARISON_P (op)
1067 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1069 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1070 if (temp)
1071 return temp;
1074 /* A truncate of a memory is just loading the low part of the memory
1075 if we are not changing the meaning of the address. */
1076 if (GET_CODE (op) == MEM
1077 && !VECTOR_MODE_P (mode)
1078 && !MEM_VOLATILE_P (op)
1079 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1081 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1082 if (temp)
1083 return temp;
1086 break;
1088 case FLOAT_TRUNCATE:
1089 if (DECIMAL_FLOAT_MODE_P (mode))
1090 break;
1092 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1093 if (GET_CODE (op) == FLOAT_EXTEND
1094 && GET_MODE (XEXP (op, 0)) == mode)
1095 return XEXP (op, 0);
1097 /* (float_truncate:SF (float_truncate:DF foo:XF))
1098 = (float_truncate:SF foo:XF).
1099 This may eliminate double rounding, so it is unsafe.
1101 (float_truncate:SF (float_extend:XF foo:DF))
1102 = (float_truncate:SF foo:DF).
1104 (float_truncate:DF (float_extend:XF foo:SF))
1105 = (float_extend:SF foo:DF). */
1106 if ((GET_CODE (op) == FLOAT_TRUNCATE
1107 && flag_unsafe_math_optimizations)
1108 || GET_CODE (op) == FLOAT_EXTEND)
1109 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1110 0)))
1111 > GET_MODE_SIZE (mode)
1112 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1113 mode,
1114 XEXP (op, 0), mode);
1116 /* (float_truncate (float x)) is (float x) */
1117 if (GET_CODE (op) == FLOAT
1118 && (flag_unsafe_math_optimizations
1119 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1120 && ((unsigned)significand_size (GET_MODE (op))
1121 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1122 - num_sign_bit_copies (XEXP (op, 0),
1123 GET_MODE (XEXP (op, 0))))))))
1124 return simplify_gen_unary (FLOAT, mode,
1125 XEXP (op, 0),
1126 GET_MODE (XEXP (op, 0)));
1128 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1129 (OP:SF foo:SF) if OP is NEG or ABS. */
1130 if ((GET_CODE (op) == ABS
1131 || GET_CODE (op) == NEG)
1132 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1133 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1134 return simplify_gen_unary (GET_CODE (op), mode,
1135 XEXP (XEXP (op, 0), 0), mode);
1137 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1138 is (float_truncate:SF x). */
1139 if (GET_CODE (op) == SUBREG
1140 && subreg_lowpart_p (op)
1141 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1142 return SUBREG_REG (op);
1143 break;
1145 case FLOAT_EXTEND:
1146 if (DECIMAL_FLOAT_MODE_P (mode))
1147 break;
1149 /* (float_extend (float_extend x)) is (float_extend x)
1151 (float_extend (float x)) is (float x) assuming that double
1152 rounding can't happen.
1154 if (GET_CODE (op) == FLOAT_EXTEND
1155 || (GET_CODE (op) == FLOAT
1156 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1157 && ((unsigned)significand_size (GET_MODE (op))
1158 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1159 - num_sign_bit_copies (XEXP (op, 0),
1160 GET_MODE (XEXP (op, 0)))))))
1161 return simplify_gen_unary (GET_CODE (op), mode,
1162 XEXP (op, 0),
1163 GET_MODE (XEXP (op, 0)));
1165 break;
1167 case ABS:
1168 /* (abs (neg <foo>)) -> (abs <foo>) */
1169 if (GET_CODE (op) == NEG)
1170 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1171 GET_MODE (XEXP (op, 0)));
1173 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1174 do nothing. */
1175 if (GET_MODE (op) == VOIDmode)
1176 break;
1178 /* If operand is something known to be positive, ignore the ABS. */
1179 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1180 || val_signbit_known_clear_p (GET_MODE (op),
1181 nonzero_bits (op, GET_MODE (op))))
1182 return op;
1184 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1185 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1186 return gen_rtx_NEG (mode, op);
1188 break;
1190 case FFS:
1191 /* (ffs (*_extend <X>)) = (ffs <X>) */
1192 if (GET_CODE (op) == SIGN_EXTEND
1193 || GET_CODE (op) == ZERO_EXTEND)
1194 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1195 GET_MODE (XEXP (op, 0)));
1196 break;
1198 case POPCOUNT:
1199 switch (GET_CODE (op))
1201 case BSWAP:
1202 case ZERO_EXTEND:
1203 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1204 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1205 GET_MODE (XEXP (op, 0)));
1207 case ROTATE:
1208 case ROTATERT:
1209 /* Rotations don't affect popcount. */
1210 if (!side_effects_p (XEXP (op, 1)))
1211 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1212 GET_MODE (XEXP (op, 0)));
1213 break;
1215 default:
1216 break;
1218 break;
1220 case PARITY:
1221 switch (GET_CODE (op))
1223 case NOT:
1224 case BSWAP:
1225 case ZERO_EXTEND:
1226 case SIGN_EXTEND:
1227 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1228 GET_MODE (XEXP (op, 0)));
1230 case ROTATE:
1231 case ROTATERT:
1232 /* Rotations don't affect parity. */
1233 if (!side_effects_p (XEXP (op, 1)))
1234 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1235 GET_MODE (XEXP (op, 0)));
1236 break;
1238 default:
1239 break;
1241 break;
1243 case BSWAP:
1244 /* (bswap (bswap x)) -> x. */
1245 if (GET_CODE (op) == BSWAP)
1246 return XEXP (op, 0);
1247 break;
1249 case FLOAT:
1250 /* (float (sign_extend <X>)) = (float <X>). */
1251 if (GET_CODE (op) == SIGN_EXTEND)
1252 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1253 GET_MODE (XEXP (op, 0)));
1254 break;
1256 case SIGN_EXTEND:
1257 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1258 becomes just the MINUS if its mode is MODE. This allows
1259 folding switch statements on machines using casesi (such as
1260 the VAX). */
1261 if (GET_CODE (op) == TRUNCATE
1262 && GET_MODE (XEXP (op, 0)) == mode
1263 && GET_CODE (XEXP (op, 0)) == MINUS
1264 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1265 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1266 return XEXP (op, 0);
1268 /* Extending a widening multiplication should be canonicalized to
1269 a wider widening multiplication. */
1270 if (GET_CODE (op) == MULT)
1272 rtx lhs = XEXP (op, 0);
1273 rtx rhs = XEXP (op, 1);
1274 enum rtx_code lcode = GET_CODE (lhs);
1275 enum rtx_code rcode = GET_CODE (rhs);
1277 /* Widening multiplies usually extend both operands, but sometimes
1278 they use a shift to extract a portion of a register. */
1279 if ((lcode == SIGN_EXTEND
1280 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1281 && (rcode == SIGN_EXTEND
1282 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1284 enum machine_mode lmode = GET_MODE (lhs);
1285 enum machine_mode rmode = GET_MODE (rhs);
1286 int bits;
1288 if (lcode == ASHIFTRT)
1289 /* Number of bits not shifted off the end. */
1290 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1291 else /* lcode == SIGN_EXTEND */
1292 /* Size of inner mode. */
1293 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1295 if (rcode == ASHIFTRT)
1296 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1297 else /* rcode == SIGN_EXTEND */
1298 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1300 /* We can only widen multiplies if the result is mathematiclly
1301 equivalent. I.e. if overflow was impossible. */
1302 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1303 return simplify_gen_binary
1304 (MULT, mode,
1305 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1306 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1310 /* Check for a sign extension of a subreg of a promoted
1311 variable, where the promotion is sign-extended, and the
1312 target mode is the same as the variable's promotion. */
1313 if (GET_CODE (op) == SUBREG
1314 && SUBREG_PROMOTED_VAR_P (op)
1315 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1316 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1318 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1319 if (temp)
1320 return temp;
1323 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1324 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1325 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1327 gcc_assert (GET_MODE_BITSIZE (mode)
1328 > GET_MODE_BITSIZE (GET_MODE (op)));
1329 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1330 GET_MODE (XEXP (op, 0)));
1333 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1334 is (sign_extend:M (subreg:O <X>)) if there is mode with
1335 GET_MODE_BITSIZE (N) - I bits.
1336 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1337 is similarly (zero_extend:M (subreg:O <X>)). */
1338 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1339 && GET_CODE (XEXP (op, 0)) == ASHIFT
1340 && CONST_INT_P (XEXP (op, 1))
1341 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1342 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1344 enum machine_mode tmode
1345 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1346 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1347 gcc_assert (GET_MODE_BITSIZE (mode)
1348 > GET_MODE_BITSIZE (GET_MODE (op)));
1349 if (tmode != BLKmode)
1351 rtx inner =
1352 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1353 if (inner)
1354 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1355 ? SIGN_EXTEND : ZERO_EXTEND,
1356 mode, inner, tmode);
1360 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1361 /* As we do not know which address space the pointer is referring to,
1362 we can do this only if the target does not support different pointer
1363 or address modes depending on the address space. */
1364 if (target_default_pointer_address_modes_p ()
1365 && ! POINTERS_EXTEND_UNSIGNED
1366 && mode == Pmode && GET_MODE (op) == ptr_mode
1367 && (CONSTANT_P (op)
1368 || (GET_CODE (op) == SUBREG
1369 && REG_P (SUBREG_REG (op))
1370 && REG_POINTER (SUBREG_REG (op))
1371 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1372 return convert_memory_address (Pmode, op);
1373 #endif
1374 break;
1376 case ZERO_EXTEND:
1377 /* Check for a zero extension of a subreg of a promoted
1378 variable, where the promotion is zero-extended, and the
1379 target mode is the same as the variable's promotion. */
1380 if (GET_CODE (op) == SUBREG
1381 && SUBREG_PROMOTED_VAR_P (op)
1382 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1383 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1385 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1386 if (temp)
1387 return temp;
1390 /* Extending a widening multiplication should be canonicalized to
1391 a wider widening multiplication. */
1392 if (GET_CODE (op) == MULT)
1394 rtx lhs = XEXP (op, 0);
1395 rtx rhs = XEXP (op, 1);
1396 enum rtx_code lcode = GET_CODE (lhs);
1397 enum rtx_code rcode = GET_CODE (rhs);
1399 /* Widening multiplies usually extend both operands, but sometimes
1400 they use a shift to extract a portion of a register. */
1401 if ((lcode == ZERO_EXTEND
1402 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1403 && (rcode == ZERO_EXTEND
1404 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1406 enum machine_mode lmode = GET_MODE (lhs);
1407 enum machine_mode rmode = GET_MODE (rhs);
1408 int bits;
1410 if (lcode == LSHIFTRT)
1411 /* Number of bits not shifted off the end. */
1412 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1413 else /* lcode == ZERO_EXTEND */
1414 /* Size of inner mode. */
1415 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1417 if (rcode == LSHIFTRT)
1418 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1419 else /* rcode == ZERO_EXTEND */
1420 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1422 /* We can only widen multiplies if the result is mathematiclly
1423 equivalent. I.e. if overflow was impossible. */
1424 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1425 return simplify_gen_binary
1426 (MULT, mode,
1427 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1428 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1432 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1433 if (GET_CODE (op) == ZERO_EXTEND)
1434 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1435 GET_MODE (XEXP (op, 0)));
1437 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1438 is (zero_extend:M (subreg:O <X>)) if there is mode with
1439 GET_MODE_BITSIZE (N) - I bits. */
1440 if (GET_CODE (op) == LSHIFTRT
1441 && GET_CODE (XEXP (op, 0)) == ASHIFT
1442 && CONST_INT_P (XEXP (op, 1))
1443 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1444 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1446 enum machine_mode tmode
1447 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1448 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1449 if (tmode != BLKmode)
1451 rtx inner =
1452 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1453 if (inner)
1454 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1458 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1459 /* As we do not know which address space the pointer is referring to,
1460 we can do this only if the target does not support different pointer
1461 or address modes depending on the address space. */
1462 if (target_default_pointer_address_modes_p ()
1463 && POINTERS_EXTEND_UNSIGNED > 0
1464 && mode == Pmode && GET_MODE (op) == ptr_mode
1465 && (CONSTANT_P (op)
1466 || (GET_CODE (op) == SUBREG
1467 && REG_P (SUBREG_REG (op))
1468 && REG_POINTER (SUBREG_REG (op))
1469 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1470 return convert_memory_address (Pmode, op);
1471 #endif
1472 break;
1474 default:
1475 break;
1478 return 0;
1481 /* Try to compute the value of a unary operation CODE whose output mode is to
1482 be MODE with input operand OP whose mode was originally OP_MODE.
1483 Return zero if the value cannot be computed. */
1485 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1486 rtx op, enum machine_mode op_mode)
1488 unsigned int width = GET_MODE_PRECISION (mode);
1489 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1491 if (code == VEC_DUPLICATE)
1493 gcc_assert (VECTOR_MODE_P (mode));
1494 if (GET_MODE (op) != VOIDmode)
1496 if (!VECTOR_MODE_P (GET_MODE (op)))
1497 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1498 else
1499 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1500 (GET_MODE (op)));
1502 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1503 || GET_CODE (op) == CONST_VECTOR)
1505 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1506 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1507 rtvec v = rtvec_alloc (n_elts);
1508 unsigned int i;
1510 if (GET_CODE (op) != CONST_VECTOR)
1511 for (i = 0; i < n_elts; i++)
1512 RTVEC_ELT (v, i) = op;
1513 else
1515 enum machine_mode inmode = GET_MODE (op);
1516 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1517 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1519 gcc_assert (in_n_elts < n_elts);
1520 gcc_assert ((n_elts % in_n_elts) == 0);
1521 for (i = 0; i < n_elts; i++)
1522 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1524 return gen_rtx_CONST_VECTOR (mode, v);
1528 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1530 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1531 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1532 enum machine_mode opmode = GET_MODE (op);
1533 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1534 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1535 rtvec v = rtvec_alloc (n_elts);
1536 unsigned int i;
1538 gcc_assert (op_n_elts == n_elts);
1539 for (i = 0; i < n_elts; i++)
1541 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1542 CONST_VECTOR_ELT (op, i),
1543 GET_MODE_INNER (opmode));
1544 if (!x)
1545 return 0;
1546 RTVEC_ELT (v, i) = x;
1548 return gen_rtx_CONST_VECTOR (mode, v);
1551 /* The order of these tests is critical so that, for example, we don't
1552 check the wrong mode (input vs. output) for a conversion operation,
1553 such as FIX. At some point, this should be simplified. */
1555 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1557 HOST_WIDE_INT hv, lv;
1558 REAL_VALUE_TYPE d;
1560 if (CONST_INT_P (op))
1561 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1562 else
1563 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1565 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1566 d = real_value_truncate (mode, d);
1567 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1569 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1571 HOST_WIDE_INT hv, lv;
1572 REAL_VALUE_TYPE d;
1574 if (CONST_INT_P (op))
1575 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1576 else
1577 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1579 if (op_mode == VOIDmode
1580 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1581 /* We should never get a negative number. */
1582 gcc_assert (hv >= 0);
1583 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1584 hv = 0, lv &= GET_MODE_MASK (op_mode);
1586 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1587 d = real_value_truncate (mode, d);
1588 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1591 if (CONST_INT_P (op)
1592 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1594 HOST_WIDE_INT arg0 = INTVAL (op);
1595 HOST_WIDE_INT val;
1597 switch (code)
1599 case NOT:
1600 val = ~ arg0;
1601 break;
1603 case NEG:
1604 val = - arg0;
1605 break;
1607 case ABS:
1608 val = (arg0 >= 0 ? arg0 : - arg0);
1609 break;
1611 case FFS:
1612 arg0 &= GET_MODE_MASK (mode);
1613 val = ffs_hwi (arg0);
1614 break;
1616 case CLZ:
1617 arg0 &= GET_MODE_MASK (mode);
1618 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1620 else
1621 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1622 break;
1624 case CLRSB:
1625 arg0 &= GET_MODE_MASK (mode);
1626 if (arg0 == 0)
1627 val = GET_MODE_PRECISION (mode) - 1;
1628 else if (arg0 >= 0)
1629 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1630 else if (arg0 < 0)
1631 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1632 break;
1634 case CTZ:
1635 arg0 &= GET_MODE_MASK (mode);
1636 if (arg0 == 0)
1638 /* Even if the value at zero is undefined, we have to come
1639 up with some replacement. Seems good enough. */
1640 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1641 val = GET_MODE_PRECISION (mode);
1643 else
1644 val = ctz_hwi (arg0);
1645 break;
1647 case POPCOUNT:
1648 arg0 &= GET_MODE_MASK (mode);
1649 val = 0;
1650 while (arg0)
1651 val++, arg0 &= arg0 - 1;
1652 break;
1654 case PARITY:
1655 arg0 &= GET_MODE_MASK (mode);
1656 val = 0;
1657 while (arg0)
1658 val++, arg0 &= arg0 - 1;
1659 val &= 1;
1660 break;
1662 case BSWAP:
1664 unsigned int s;
1666 val = 0;
1667 for (s = 0; s < width; s += 8)
1669 unsigned int d = width - s - 8;
1670 unsigned HOST_WIDE_INT byte;
1671 byte = (arg0 >> s) & 0xff;
1672 val |= byte << d;
1675 break;
1677 case TRUNCATE:
1678 val = arg0;
1679 break;
1681 case ZERO_EXTEND:
1682 /* When zero-extending a CONST_INT, we need to know its
1683 original mode. */
1684 gcc_assert (op_mode != VOIDmode);
1685 if (op_width == HOST_BITS_PER_WIDE_INT)
1687 /* If we were really extending the mode,
1688 we would have to distinguish between zero-extension
1689 and sign-extension. */
1690 gcc_assert (width == op_width);
1691 val = arg0;
1693 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1694 val = arg0 & GET_MODE_MASK (op_mode);
1695 else
1696 return 0;
1697 break;
1699 case SIGN_EXTEND:
1700 if (op_mode == VOIDmode)
1701 op_mode = mode;
1702 op_width = GET_MODE_PRECISION (op_mode);
1703 if (op_width == HOST_BITS_PER_WIDE_INT)
1705 /* If we were really extending the mode,
1706 we would have to distinguish between zero-extension
1707 and sign-extension. */
1708 gcc_assert (width == op_width);
1709 val = arg0;
1711 else if (op_width < HOST_BITS_PER_WIDE_INT)
1713 val = arg0 & GET_MODE_MASK (op_mode);
1714 if (val_signbit_known_set_p (op_mode, val))
1715 val |= ~GET_MODE_MASK (op_mode);
1717 else
1718 return 0;
1719 break;
1721 case SQRT:
1722 case FLOAT_EXTEND:
1723 case FLOAT_TRUNCATE:
1724 case SS_TRUNCATE:
1725 case US_TRUNCATE:
1726 case SS_NEG:
1727 case US_NEG:
1728 case SS_ABS:
1729 return 0;
1731 default:
1732 gcc_unreachable ();
1735 return gen_int_mode (val, mode);
1738 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1739 for a DImode operation on a CONST_INT. */
1740 else if (width <= HOST_BITS_PER_DOUBLE_INT
1741 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1743 double_int first, value;
1745 if (CONST_DOUBLE_AS_INT_P (op))
1746 first = double_int::from_pair (CONST_DOUBLE_HIGH (op),
1747 CONST_DOUBLE_LOW (op));
1748 else
1749 first = double_int::from_shwi (INTVAL (op));
1751 switch (code)
1753 case NOT:
1754 value = ~first;
1755 break;
1757 case NEG:
1758 value = -first;
1759 break;
1761 case ABS:
1762 if (first.is_negative ())
1763 value = -first;
1764 else
1765 value = first;
1766 break;
1768 case FFS:
1769 value.high = 0;
1770 if (first.low != 0)
1771 value.low = ffs_hwi (first.low);
1772 else if (first.high != 0)
1773 value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high);
1774 else
1775 value.low = 0;
1776 break;
1778 case CLZ:
1779 value.high = 0;
1780 if (first.high != 0)
1781 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1
1782 - HOST_BITS_PER_WIDE_INT;
1783 else if (first.low != 0)
1784 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1;
1785 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1786 value.low = GET_MODE_PRECISION (mode);
1787 break;
1789 case CTZ:
1790 value.high = 0;
1791 if (first.low != 0)
1792 value.low = ctz_hwi (first.low);
1793 else if (first.high != 0)
1794 value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high);
1795 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1796 value.low = GET_MODE_PRECISION (mode);
1797 break;
1799 case POPCOUNT:
1800 value = double_int_zero;
1801 while (first.low)
1803 value.low++;
1804 first.low &= first.low - 1;
1806 while (first.high)
1808 value.low++;
1809 first.high &= first.high - 1;
1811 break;
1813 case PARITY:
1814 value = double_int_zero;
1815 while (first.low)
1817 value.low++;
1818 first.low &= first.low - 1;
1820 while (first.high)
1822 value.low++;
1823 first.high &= first.high - 1;
1825 value.low &= 1;
1826 break;
1828 case BSWAP:
1830 unsigned int s;
1832 value = double_int_zero;
1833 for (s = 0; s < width; s += 8)
1835 unsigned int d = width - s - 8;
1836 unsigned HOST_WIDE_INT byte;
1838 if (s < HOST_BITS_PER_WIDE_INT)
1839 byte = (first.low >> s) & 0xff;
1840 else
1841 byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1843 if (d < HOST_BITS_PER_WIDE_INT)
1844 value.low |= byte << d;
1845 else
1846 value.high |= byte << (d - HOST_BITS_PER_WIDE_INT);
1849 break;
1851 case TRUNCATE:
1852 /* This is just a change-of-mode, so do nothing. */
1853 value = first;
1854 break;
1856 case ZERO_EXTEND:
1857 gcc_assert (op_mode != VOIDmode);
1859 if (op_width > HOST_BITS_PER_WIDE_INT)
1860 return 0;
1862 value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode));
1863 break;
1865 case SIGN_EXTEND:
1866 if (op_mode == VOIDmode
1867 || op_width > HOST_BITS_PER_WIDE_INT)
1868 return 0;
1869 else
1871 value.low = first.low & GET_MODE_MASK (op_mode);
1872 if (val_signbit_known_set_p (op_mode, value.low))
1873 value.low |= ~GET_MODE_MASK (op_mode);
1875 value.high = HWI_SIGN_EXTEND (value.low);
1877 break;
1879 case SQRT:
1880 return 0;
1882 default:
1883 return 0;
1886 return immed_double_int_const (value, mode);
1889 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1890 && SCALAR_FLOAT_MODE_P (mode)
1891 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1893 REAL_VALUE_TYPE d, t;
1894 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1896 switch (code)
1898 case SQRT:
1899 if (HONOR_SNANS (mode) && real_isnan (&d))
1900 return 0;
1901 real_sqrt (&t, mode, &d);
1902 d = t;
1903 break;
1904 case ABS:
1905 d = real_value_abs (&d);
1906 break;
1907 case NEG:
1908 d = real_value_negate (&d);
1909 break;
1910 case FLOAT_TRUNCATE:
1911 d = real_value_truncate (mode, d);
1912 break;
1913 case FLOAT_EXTEND:
1914 /* All this does is change the mode, unless changing
1915 mode class. */
1916 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1917 real_convert (&d, mode, &d);
1918 break;
1919 case FIX:
1920 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1921 break;
1922 case NOT:
1924 long tmp[4];
1925 int i;
1927 real_to_target (tmp, &d, GET_MODE (op));
1928 for (i = 0; i < 4; i++)
1929 tmp[i] = ~tmp[i];
1930 real_from_target (&d, tmp, mode);
1931 break;
1933 default:
1934 gcc_unreachable ();
1936 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1939 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1940 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1941 && GET_MODE_CLASS (mode) == MODE_INT
1942 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1944 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1945 operators are intentionally left unspecified (to ease implementation
1946 by target backends), for consistency, this routine implements the
1947 same semantics for constant folding as used by the middle-end. */
1949 /* This was formerly used only for non-IEEE float.
1950 eggert@twinsun.com says it is safe for IEEE also. */
1951 HOST_WIDE_INT xh, xl, th, tl;
1952 REAL_VALUE_TYPE x, t;
1953 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1954 switch (code)
1956 case FIX:
1957 if (REAL_VALUE_ISNAN (x))
1958 return const0_rtx;
1960 /* Test against the signed upper bound. */
1961 if (width > HOST_BITS_PER_WIDE_INT)
1963 th = ((unsigned HOST_WIDE_INT) 1
1964 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1965 tl = -1;
1967 else
1969 th = 0;
1970 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1972 real_from_integer (&t, VOIDmode, tl, th, 0);
1973 if (REAL_VALUES_LESS (t, x))
1975 xh = th;
1976 xl = tl;
1977 break;
1980 /* Test against the signed lower bound. */
1981 if (width > HOST_BITS_PER_WIDE_INT)
1983 th = (unsigned HOST_WIDE_INT) (-1)
1984 << (width - HOST_BITS_PER_WIDE_INT - 1);
1985 tl = 0;
1987 else
1989 th = -1;
1990 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1992 real_from_integer (&t, VOIDmode, tl, th, 0);
1993 if (REAL_VALUES_LESS (x, t))
1995 xh = th;
1996 xl = tl;
1997 break;
1999 REAL_VALUE_TO_INT (&xl, &xh, x);
2000 break;
2002 case UNSIGNED_FIX:
2003 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
2004 return const0_rtx;
2006 /* Test against the unsigned upper bound. */
2007 if (width == HOST_BITS_PER_DOUBLE_INT)
2009 th = -1;
2010 tl = -1;
2012 else if (width >= HOST_BITS_PER_WIDE_INT)
2014 th = ((unsigned HOST_WIDE_INT) 1
2015 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
2016 tl = -1;
2018 else
2020 th = 0;
2021 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
2023 real_from_integer (&t, VOIDmode, tl, th, 1);
2024 if (REAL_VALUES_LESS (t, x))
2026 xh = th;
2027 xl = tl;
2028 break;
2031 REAL_VALUE_TO_INT (&xl, &xh, x);
2032 break;
2034 default:
2035 gcc_unreachable ();
2037 return immed_double_const (xl, xh, mode);
2040 return NULL_RTX;
2043 /* Subroutine of simplify_binary_operation to simplify a commutative,
2044 associative binary operation CODE with result mode MODE, operating
2045 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2046 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2047 canonicalization is possible. */
2049 static rtx
2050 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
2051 rtx op0, rtx op1)
2053 rtx tem;
2055 /* Linearize the operator to the left. */
2056 if (GET_CODE (op1) == code)
2058 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2059 if (GET_CODE (op0) == code)
2061 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2062 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2065 /* "a op (b op c)" becomes "(b op c) op a". */
2066 if (! swap_commutative_operands_p (op1, op0))
2067 return simplify_gen_binary (code, mode, op1, op0);
2069 tem = op0;
2070 op0 = op1;
2071 op1 = tem;
2074 if (GET_CODE (op0) == code)
2076 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2077 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2079 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2080 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2083 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2084 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2085 if (tem != 0)
2086 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2088 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2089 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2090 if (tem != 0)
2091 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2094 return 0;
2098 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2099 and OP1. Return 0 if no simplification is possible.
2101 Don't use this for relational operations such as EQ or LT.
2102 Use simplify_relational_operation instead. */
2104 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
2105 rtx op0, rtx op1)
2107 rtx trueop0, trueop1;
2108 rtx tem;
2110 /* Relational operations don't work here. We must know the mode
2111 of the operands in order to do the comparison correctly.
2112 Assuming a full word can give incorrect results.
2113 Consider comparing 128 with -128 in QImode. */
2114 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2115 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2117 /* Make sure the constant is second. */
2118 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2119 && swap_commutative_operands_p (op0, op1))
2121 tem = op0, op0 = op1, op1 = tem;
2124 trueop0 = avoid_constant_pool_reference (op0);
2125 trueop1 = avoid_constant_pool_reference (op1);
2127 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2128 if (tem)
2129 return tem;
2130 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2133 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2134 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2135 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2136 actual constants. */
2138 static rtx
2139 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
2140 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2142 rtx tem, reversed, opleft, opright;
2143 HOST_WIDE_INT val;
2144 unsigned int width = GET_MODE_PRECISION (mode);
2146 /* Even if we can't compute a constant result,
2147 there are some cases worth simplifying. */
2149 switch (code)
2151 case PLUS:
2152 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2153 when x is NaN, infinite, or finite and nonzero. They aren't
2154 when x is -0 and the rounding mode is not towards -infinity,
2155 since (-0) + 0 is then 0. */
2156 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2157 return op0;
2159 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2160 transformations are safe even for IEEE. */
2161 if (GET_CODE (op0) == NEG)
2162 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2163 else if (GET_CODE (op1) == NEG)
2164 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2166 /* (~a) + 1 -> -a */
2167 if (INTEGRAL_MODE_P (mode)
2168 && GET_CODE (op0) == NOT
2169 && trueop1 == const1_rtx)
2170 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2172 /* Handle both-operands-constant cases. We can only add
2173 CONST_INTs to constants since the sum of relocatable symbols
2174 can't be handled by most assemblers. Don't add CONST_INT
2175 to CONST_INT since overflow won't be computed properly if wider
2176 than HOST_BITS_PER_WIDE_INT. */
2178 if ((GET_CODE (op0) == CONST
2179 || GET_CODE (op0) == SYMBOL_REF
2180 || GET_CODE (op0) == LABEL_REF)
2181 && CONST_INT_P (op1))
2182 return plus_constant (mode, op0, INTVAL (op1));
2183 else if ((GET_CODE (op1) == CONST
2184 || GET_CODE (op1) == SYMBOL_REF
2185 || GET_CODE (op1) == LABEL_REF)
2186 && CONST_INT_P (op0))
2187 return plus_constant (mode, op1, INTVAL (op0));
2189 /* See if this is something like X * C - X or vice versa or
2190 if the multiplication is written as a shift. If so, we can
2191 distribute and make a new multiply, shift, or maybe just
2192 have X (if C is 2 in the example above). But don't make
2193 something more expensive than we had before. */
2195 if (SCALAR_INT_MODE_P (mode))
2197 double_int coeff0, coeff1;
2198 rtx lhs = op0, rhs = op1;
2200 coeff0 = double_int_one;
2201 coeff1 = double_int_one;
2203 if (GET_CODE (lhs) == NEG)
2205 coeff0 = double_int_minus_one;
2206 lhs = XEXP (lhs, 0);
2208 else if (GET_CODE (lhs) == MULT
2209 && CONST_INT_P (XEXP (lhs, 1)))
2211 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2212 lhs = XEXP (lhs, 0);
2214 else if (GET_CODE (lhs) == ASHIFT
2215 && CONST_INT_P (XEXP (lhs, 1))
2216 && INTVAL (XEXP (lhs, 1)) >= 0
2217 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2219 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2220 lhs = XEXP (lhs, 0);
2223 if (GET_CODE (rhs) == NEG)
2225 coeff1 = double_int_minus_one;
2226 rhs = XEXP (rhs, 0);
2228 else if (GET_CODE (rhs) == MULT
2229 && CONST_INT_P (XEXP (rhs, 1)))
2231 coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
2232 rhs = XEXP (rhs, 0);
2234 else if (GET_CODE (rhs) == ASHIFT
2235 && CONST_INT_P (XEXP (rhs, 1))
2236 && INTVAL (XEXP (rhs, 1)) >= 0
2237 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2239 coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2240 rhs = XEXP (rhs, 0);
2243 if (rtx_equal_p (lhs, rhs))
2245 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2246 rtx coeff;
2247 double_int val;
2248 bool speed = optimize_function_for_speed_p (cfun);
2250 val = coeff0 + coeff1;
2251 coeff = immed_double_int_const (val, mode);
2253 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2254 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2255 ? tem : 0;
2259 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2260 if (CONST_SCALAR_INT_P (op1)
2261 && GET_CODE (op0) == XOR
2262 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2263 && mode_signbit_p (mode, op1))
2264 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2265 simplify_gen_binary (XOR, mode, op1,
2266 XEXP (op0, 1)));
2268 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2269 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2270 && GET_CODE (op0) == MULT
2271 && GET_CODE (XEXP (op0, 0)) == NEG)
2273 rtx in1, in2;
2275 in1 = XEXP (XEXP (op0, 0), 0);
2276 in2 = XEXP (op0, 1);
2277 return simplify_gen_binary (MINUS, mode, op1,
2278 simplify_gen_binary (MULT, mode,
2279 in1, in2));
2282 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2283 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2284 is 1. */
2285 if (COMPARISON_P (op0)
2286 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2287 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2288 && (reversed = reversed_comparison (op0, mode)))
2289 return
2290 simplify_gen_unary (NEG, mode, reversed, mode);
2292 /* If one of the operands is a PLUS or a MINUS, see if we can
2293 simplify this by the associative law.
2294 Don't use the associative law for floating point.
2295 The inaccuracy makes it nonassociative,
2296 and subtle programs can break if operations are associated. */
2298 if (INTEGRAL_MODE_P (mode)
2299 && (plus_minus_operand_p (op0)
2300 || plus_minus_operand_p (op1))
2301 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2302 return tem;
2304 /* Reassociate floating point addition only when the user
2305 specifies associative math operations. */
2306 if (FLOAT_MODE_P (mode)
2307 && flag_associative_math)
2309 tem = simplify_associative_operation (code, mode, op0, op1);
2310 if (tem)
2311 return tem;
2313 break;
2315 case COMPARE:
2316 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2317 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2318 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2319 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2321 rtx xop00 = XEXP (op0, 0);
2322 rtx xop10 = XEXP (op1, 0);
2324 #ifdef HAVE_cc0
2325 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2326 #else
2327 if (REG_P (xop00) && REG_P (xop10)
2328 && GET_MODE (xop00) == GET_MODE (xop10)
2329 && REGNO (xop00) == REGNO (xop10)
2330 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2331 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2332 #endif
2333 return xop00;
2335 break;
2337 case MINUS:
2338 /* We can't assume x-x is 0 even with non-IEEE floating point,
2339 but since it is zero except in very strange circumstances, we
2340 will treat it as zero with -ffinite-math-only. */
2341 if (rtx_equal_p (trueop0, trueop1)
2342 && ! side_effects_p (op0)
2343 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2344 return CONST0_RTX (mode);
2346 /* Change subtraction from zero into negation. (0 - x) is the
2347 same as -x when x is NaN, infinite, or finite and nonzero.
2348 But if the mode has signed zeros, and does not round towards
2349 -infinity, then 0 - 0 is 0, not -0. */
2350 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2351 return simplify_gen_unary (NEG, mode, op1, mode);
2353 /* (-1 - a) is ~a. */
2354 if (trueop0 == constm1_rtx)
2355 return simplify_gen_unary (NOT, mode, op1, mode);
2357 /* Subtracting 0 has no effect unless the mode has signed zeros
2358 and supports rounding towards -infinity. In such a case,
2359 0 - 0 is -0. */
2360 if (!(HONOR_SIGNED_ZEROS (mode)
2361 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2362 && trueop1 == CONST0_RTX (mode))
2363 return op0;
2365 /* See if this is something like X * C - X or vice versa or
2366 if the multiplication is written as a shift. If so, we can
2367 distribute and make a new multiply, shift, or maybe just
2368 have X (if C is 2 in the example above). But don't make
2369 something more expensive than we had before. */
2371 if (SCALAR_INT_MODE_P (mode))
2373 double_int coeff0, negcoeff1;
2374 rtx lhs = op0, rhs = op1;
2376 coeff0 = double_int_one;
2377 negcoeff1 = double_int_minus_one;
2379 if (GET_CODE (lhs) == NEG)
2381 coeff0 = double_int_minus_one;
2382 lhs = XEXP (lhs, 0);
2384 else if (GET_CODE (lhs) == MULT
2385 && CONST_INT_P (XEXP (lhs, 1)))
2387 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2388 lhs = XEXP (lhs, 0);
2390 else if (GET_CODE (lhs) == ASHIFT
2391 && CONST_INT_P (XEXP (lhs, 1))
2392 && INTVAL (XEXP (lhs, 1)) >= 0
2393 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2395 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2396 lhs = XEXP (lhs, 0);
2399 if (GET_CODE (rhs) == NEG)
2401 negcoeff1 = double_int_one;
2402 rhs = XEXP (rhs, 0);
2404 else if (GET_CODE (rhs) == MULT
2405 && CONST_INT_P (XEXP (rhs, 1)))
2407 negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
2408 rhs = XEXP (rhs, 0);
2410 else if (GET_CODE (rhs) == ASHIFT
2411 && CONST_INT_P (XEXP (rhs, 1))
2412 && INTVAL (XEXP (rhs, 1)) >= 0
2413 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2415 negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2416 negcoeff1 = -negcoeff1;
2417 rhs = XEXP (rhs, 0);
2420 if (rtx_equal_p (lhs, rhs))
2422 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2423 rtx coeff;
2424 double_int val;
2425 bool speed = optimize_function_for_speed_p (cfun);
2427 val = coeff0 + negcoeff1;
2428 coeff = immed_double_int_const (val, mode);
2430 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2431 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2432 ? tem : 0;
2436 /* (a - (-b)) -> (a + b). True even for IEEE. */
2437 if (GET_CODE (op1) == NEG)
2438 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2440 /* (-x - c) may be simplified as (-c - x). */
2441 if (GET_CODE (op0) == NEG
2442 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2444 tem = simplify_unary_operation (NEG, mode, op1, mode);
2445 if (tem)
2446 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2449 /* Don't let a relocatable value get a negative coeff. */
2450 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2451 return simplify_gen_binary (PLUS, mode,
2452 op0,
2453 neg_const_int (mode, op1));
2455 /* (x - (x & y)) -> (x & ~y) */
2456 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2458 if (rtx_equal_p (op0, XEXP (op1, 0)))
2460 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2461 GET_MODE (XEXP (op1, 1)));
2462 return simplify_gen_binary (AND, mode, op0, tem);
2464 if (rtx_equal_p (op0, XEXP (op1, 1)))
2466 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2467 GET_MODE (XEXP (op1, 0)));
2468 return simplify_gen_binary (AND, mode, op0, tem);
2472 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2473 by reversing the comparison code if valid. */
2474 if (STORE_FLAG_VALUE == 1
2475 && trueop0 == const1_rtx
2476 && COMPARISON_P (op1)
2477 && (reversed = reversed_comparison (op1, mode)))
2478 return reversed;
2480 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2481 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2482 && GET_CODE (op1) == MULT
2483 && GET_CODE (XEXP (op1, 0)) == NEG)
2485 rtx in1, in2;
2487 in1 = XEXP (XEXP (op1, 0), 0);
2488 in2 = XEXP (op1, 1);
2489 return simplify_gen_binary (PLUS, mode,
2490 simplify_gen_binary (MULT, mode,
2491 in1, in2),
2492 op0);
2495 /* Canonicalize (minus (neg A) (mult B C)) to
2496 (minus (mult (neg B) C) A). */
2497 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2498 && GET_CODE (op1) == MULT
2499 && GET_CODE (op0) == NEG)
2501 rtx in1, in2;
2503 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2504 in2 = XEXP (op1, 1);
2505 return simplify_gen_binary (MINUS, mode,
2506 simplify_gen_binary (MULT, mode,
2507 in1, in2),
2508 XEXP (op0, 0));
2511 /* If one of the operands is a PLUS or a MINUS, see if we can
2512 simplify this by the associative law. This will, for example,
2513 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2514 Don't use the associative law for floating point.
2515 The inaccuracy makes it nonassociative,
2516 and subtle programs can break if operations are associated. */
2518 if (INTEGRAL_MODE_P (mode)
2519 && (plus_minus_operand_p (op0)
2520 || plus_minus_operand_p (op1))
2521 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2522 return tem;
2523 break;
2525 case MULT:
2526 if (trueop1 == constm1_rtx)
2527 return simplify_gen_unary (NEG, mode, op0, mode);
2529 if (GET_CODE (op0) == NEG)
2531 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2532 /* If op1 is a MULT as well and simplify_unary_operation
2533 just moved the NEG to the second operand, simplify_gen_binary
2534 below could through simplify_associative_operation move
2535 the NEG around again and recurse endlessly. */
2536 if (temp
2537 && GET_CODE (op1) == MULT
2538 && GET_CODE (temp) == MULT
2539 && XEXP (op1, 0) == XEXP (temp, 0)
2540 && GET_CODE (XEXP (temp, 1)) == NEG
2541 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2542 temp = NULL_RTX;
2543 if (temp)
2544 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2546 if (GET_CODE (op1) == NEG)
2548 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2549 /* If op0 is a MULT as well and simplify_unary_operation
2550 just moved the NEG to the second operand, simplify_gen_binary
2551 below could through simplify_associative_operation move
2552 the NEG around again and recurse endlessly. */
2553 if (temp
2554 && GET_CODE (op0) == MULT
2555 && GET_CODE (temp) == MULT
2556 && XEXP (op0, 0) == XEXP (temp, 0)
2557 && GET_CODE (XEXP (temp, 1)) == NEG
2558 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2559 temp = NULL_RTX;
2560 if (temp)
2561 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2564 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2565 x is NaN, since x * 0 is then also NaN. Nor is it valid
2566 when the mode has signed zeros, since multiplying a negative
2567 number by 0 will give -0, not 0. */
2568 if (!HONOR_NANS (mode)
2569 && !HONOR_SIGNED_ZEROS (mode)
2570 && trueop1 == CONST0_RTX (mode)
2571 && ! side_effects_p (op0))
2572 return op1;
2574 /* In IEEE floating point, x*1 is not equivalent to x for
2575 signalling NaNs. */
2576 if (!HONOR_SNANS (mode)
2577 && trueop1 == CONST1_RTX (mode))
2578 return op0;
2580 /* Convert multiply by constant power of two into shift unless
2581 we are still generating RTL. This test is a kludge. */
2582 if (CONST_INT_P (trueop1)
2583 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2584 /* If the mode is larger than the host word size, and the
2585 uppermost bit is set, then this isn't a power of two due
2586 to implicit sign extension. */
2587 && (width <= HOST_BITS_PER_WIDE_INT
2588 || val != HOST_BITS_PER_WIDE_INT - 1))
2589 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2591 /* Likewise for multipliers wider than a word. */
2592 if (CONST_DOUBLE_AS_INT_P (trueop1)
2593 && GET_MODE (op0) == mode
2594 && CONST_DOUBLE_LOW (trueop1) == 0
2595 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2596 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2597 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2598 return simplify_gen_binary (ASHIFT, mode, op0,
2599 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2601 /* x*2 is x+x and x*(-1) is -x */
2602 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2603 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2604 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2605 && GET_MODE (op0) == mode)
2607 REAL_VALUE_TYPE d;
2608 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2610 if (REAL_VALUES_EQUAL (d, dconst2))
2611 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2613 if (!HONOR_SNANS (mode)
2614 && REAL_VALUES_EQUAL (d, dconstm1))
2615 return simplify_gen_unary (NEG, mode, op0, mode);
2618 /* Optimize -x * -x as x * x. */
2619 if (FLOAT_MODE_P (mode)
2620 && GET_CODE (op0) == NEG
2621 && GET_CODE (op1) == NEG
2622 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2623 && !side_effects_p (XEXP (op0, 0)))
2624 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2626 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2627 if (SCALAR_FLOAT_MODE_P (mode)
2628 && GET_CODE (op0) == ABS
2629 && GET_CODE (op1) == ABS
2630 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2631 && !side_effects_p (XEXP (op0, 0)))
2632 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2634 /* Reassociate multiplication, but for floating point MULTs
2635 only when the user specifies unsafe math optimizations. */
2636 if (! FLOAT_MODE_P (mode)
2637 || flag_unsafe_math_optimizations)
2639 tem = simplify_associative_operation (code, mode, op0, op1);
2640 if (tem)
2641 return tem;
2643 break;
2645 case IOR:
2646 if (trueop1 == CONST0_RTX (mode))
2647 return op0;
2648 if (INTEGRAL_MODE_P (mode)
2649 && trueop1 == CONSTM1_RTX (mode)
2650 && !side_effects_p (op0))
2651 return op1;
2652 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2653 return op0;
2654 /* A | (~A) -> -1 */
2655 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2656 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2657 && ! side_effects_p (op0)
2658 && SCALAR_INT_MODE_P (mode))
2659 return constm1_rtx;
2661 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2662 if (CONST_INT_P (op1)
2663 && HWI_COMPUTABLE_MODE_P (mode)
2664 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2665 && !side_effects_p (op0))
2666 return op1;
2668 /* Canonicalize (X & C1) | C2. */
2669 if (GET_CODE (op0) == AND
2670 && CONST_INT_P (trueop1)
2671 && CONST_INT_P (XEXP (op0, 1)))
2673 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2674 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2675 HOST_WIDE_INT c2 = INTVAL (trueop1);
2677 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2678 if ((c1 & c2) == c1
2679 && !side_effects_p (XEXP (op0, 0)))
2680 return trueop1;
2682 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2683 if (((c1|c2) & mask) == mask)
2684 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2686 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2687 if (((c1 & ~c2) & mask) != (c1 & mask))
2689 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2690 gen_int_mode (c1 & ~c2, mode));
2691 return simplify_gen_binary (IOR, mode, tem, op1);
2695 /* Convert (A & B) | A to A. */
2696 if (GET_CODE (op0) == AND
2697 && (rtx_equal_p (XEXP (op0, 0), op1)
2698 || rtx_equal_p (XEXP (op0, 1), op1))
2699 && ! side_effects_p (XEXP (op0, 0))
2700 && ! side_effects_p (XEXP (op0, 1)))
2701 return op1;
2703 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2704 mode size to (rotate A CX). */
2706 if (GET_CODE (op1) == ASHIFT
2707 || GET_CODE (op1) == SUBREG)
2709 opleft = op1;
2710 opright = op0;
2712 else
2714 opright = op1;
2715 opleft = op0;
2718 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2719 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2720 && CONST_INT_P (XEXP (opleft, 1))
2721 && CONST_INT_P (XEXP (opright, 1))
2722 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2723 == GET_MODE_PRECISION (mode)))
2724 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2726 /* Same, but for ashift that has been "simplified" to a wider mode
2727 by simplify_shift_const. */
2729 if (GET_CODE (opleft) == SUBREG
2730 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2731 && GET_CODE (opright) == LSHIFTRT
2732 && GET_CODE (XEXP (opright, 0)) == SUBREG
2733 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2734 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2735 && (GET_MODE_SIZE (GET_MODE (opleft))
2736 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2737 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2738 SUBREG_REG (XEXP (opright, 0)))
2739 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2740 && CONST_INT_P (XEXP (opright, 1))
2741 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2742 == GET_MODE_PRECISION (mode)))
2743 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2744 XEXP (SUBREG_REG (opleft), 1));
2746 /* If we have (ior (and (X C1) C2)), simplify this by making
2747 C1 as small as possible if C1 actually changes. */
2748 if (CONST_INT_P (op1)
2749 && (HWI_COMPUTABLE_MODE_P (mode)
2750 || INTVAL (op1) > 0)
2751 && GET_CODE (op0) == AND
2752 && CONST_INT_P (XEXP (op0, 1))
2753 && CONST_INT_P (op1)
2754 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2755 return simplify_gen_binary (IOR, mode,
2756 simplify_gen_binary
2757 (AND, mode, XEXP (op0, 0),
2758 GEN_INT (UINTVAL (XEXP (op0, 1))
2759 & ~UINTVAL (op1))),
2760 op1);
2762 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2763 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2764 the PLUS does not affect any of the bits in OP1: then we can do
2765 the IOR as a PLUS and we can associate. This is valid if OP1
2766 can be safely shifted left C bits. */
2767 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2768 && GET_CODE (XEXP (op0, 0)) == PLUS
2769 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2770 && CONST_INT_P (XEXP (op0, 1))
2771 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2773 int count = INTVAL (XEXP (op0, 1));
2774 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2776 if (mask >> count == INTVAL (trueop1)
2777 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2778 return simplify_gen_binary (ASHIFTRT, mode,
2779 plus_constant (mode, XEXP (op0, 0),
2780 mask),
2781 XEXP (op0, 1));
2784 tem = simplify_associative_operation (code, mode, op0, op1);
2785 if (tem)
2786 return tem;
2787 break;
2789 case XOR:
2790 if (trueop1 == CONST0_RTX (mode))
2791 return op0;
2792 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2793 return simplify_gen_unary (NOT, mode, op0, mode);
2794 if (rtx_equal_p (trueop0, trueop1)
2795 && ! side_effects_p (op0)
2796 && GET_MODE_CLASS (mode) != MODE_CC)
2797 return CONST0_RTX (mode);
2799 /* Canonicalize XOR of the most significant bit to PLUS. */
2800 if (CONST_SCALAR_INT_P (op1)
2801 && mode_signbit_p (mode, op1))
2802 return simplify_gen_binary (PLUS, mode, op0, op1);
2803 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2804 if (CONST_SCALAR_INT_P (op1)
2805 && GET_CODE (op0) == PLUS
2806 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2807 && mode_signbit_p (mode, XEXP (op0, 1)))
2808 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2809 simplify_gen_binary (XOR, mode, op1,
2810 XEXP (op0, 1)));
2812 /* If we are XORing two things that have no bits in common,
2813 convert them into an IOR. This helps to detect rotation encoded
2814 using those methods and possibly other simplifications. */
2816 if (HWI_COMPUTABLE_MODE_P (mode)
2817 && (nonzero_bits (op0, mode)
2818 & nonzero_bits (op1, mode)) == 0)
2819 return (simplify_gen_binary (IOR, mode, op0, op1));
2821 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2822 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2823 (NOT y). */
2825 int num_negated = 0;
2827 if (GET_CODE (op0) == NOT)
2828 num_negated++, op0 = XEXP (op0, 0);
2829 if (GET_CODE (op1) == NOT)
2830 num_negated++, op1 = XEXP (op1, 0);
2832 if (num_negated == 2)
2833 return simplify_gen_binary (XOR, mode, op0, op1);
2834 else if (num_negated == 1)
2835 return simplify_gen_unary (NOT, mode,
2836 simplify_gen_binary (XOR, mode, op0, op1),
2837 mode);
2840 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2841 correspond to a machine insn or result in further simplifications
2842 if B is a constant. */
2844 if (GET_CODE (op0) == AND
2845 && rtx_equal_p (XEXP (op0, 1), op1)
2846 && ! side_effects_p (op1))
2847 return simplify_gen_binary (AND, mode,
2848 simplify_gen_unary (NOT, mode,
2849 XEXP (op0, 0), mode),
2850 op1);
2852 else if (GET_CODE (op0) == AND
2853 && rtx_equal_p (XEXP (op0, 0), op1)
2854 && ! side_effects_p (op1))
2855 return simplify_gen_binary (AND, mode,
2856 simplify_gen_unary (NOT, mode,
2857 XEXP (op0, 1), mode),
2858 op1);
2860 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2861 we can transform like this:
2862 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2863 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2864 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2865 Attempt a few simplifications when B and C are both constants. */
2866 if (GET_CODE (op0) == AND
2867 && CONST_INT_P (op1)
2868 && CONST_INT_P (XEXP (op0, 1)))
2870 rtx a = XEXP (op0, 0);
2871 rtx b = XEXP (op0, 1);
2872 rtx c = op1;
2873 HOST_WIDE_INT bval = INTVAL (b);
2874 HOST_WIDE_INT cval = INTVAL (c);
2876 rtx na_c
2877 = simplify_binary_operation (AND, mode,
2878 simplify_gen_unary (NOT, mode, a, mode),
2880 if ((~cval & bval) == 0)
2882 /* Try to simplify ~A&C | ~B&C. */
2883 if (na_c != NULL_RTX)
2884 return simplify_gen_binary (IOR, mode, na_c,
2885 GEN_INT (~bval & cval));
2887 else
2889 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2890 if (na_c == const0_rtx)
2892 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2893 GEN_INT (~cval & bval));
2894 return simplify_gen_binary (IOR, mode, a_nc_b,
2895 GEN_INT (~bval & cval));
2900 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2901 comparison if STORE_FLAG_VALUE is 1. */
2902 if (STORE_FLAG_VALUE == 1
2903 && trueop1 == const1_rtx
2904 && COMPARISON_P (op0)
2905 && (reversed = reversed_comparison (op0, mode)))
2906 return reversed;
2908 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2909 is (lt foo (const_int 0)), so we can perform the above
2910 simplification if STORE_FLAG_VALUE is 1. */
2912 if (STORE_FLAG_VALUE == 1
2913 && trueop1 == const1_rtx
2914 && GET_CODE (op0) == LSHIFTRT
2915 && CONST_INT_P (XEXP (op0, 1))
2916 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2917 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2919 /* (xor (comparison foo bar) (const_int sign-bit))
2920 when STORE_FLAG_VALUE is the sign bit. */
2921 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2922 && trueop1 == const_true_rtx
2923 && COMPARISON_P (op0)
2924 && (reversed = reversed_comparison (op0, mode)))
2925 return reversed;
2927 tem = simplify_associative_operation (code, mode, op0, op1);
2928 if (tem)
2929 return tem;
2930 break;
2932 case AND:
2933 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2934 return trueop1;
2935 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2936 return op0;
2937 if (HWI_COMPUTABLE_MODE_P (mode))
2939 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2940 HOST_WIDE_INT nzop1;
2941 if (CONST_INT_P (trueop1))
2943 HOST_WIDE_INT val1 = INTVAL (trueop1);
2944 /* If we are turning off bits already known off in OP0, we need
2945 not do an AND. */
2946 if ((nzop0 & ~val1) == 0)
2947 return op0;
2949 nzop1 = nonzero_bits (trueop1, mode);
2950 /* If we are clearing all the nonzero bits, the result is zero. */
2951 if ((nzop1 & nzop0) == 0
2952 && !side_effects_p (op0) && !side_effects_p (op1))
2953 return CONST0_RTX (mode);
2955 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2956 && GET_MODE_CLASS (mode) != MODE_CC)
2957 return op0;
2958 /* A & (~A) -> 0 */
2959 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2960 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2961 && ! side_effects_p (op0)
2962 && GET_MODE_CLASS (mode) != MODE_CC)
2963 return CONST0_RTX (mode);
2965 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2966 there are no nonzero bits of C outside of X's mode. */
2967 if ((GET_CODE (op0) == SIGN_EXTEND
2968 || GET_CODE (op0) == ZERO_EXTEND)
2969 && CONST_INT_P (trueop1)
2970 && HWI_COMPUTABLE_MODE_P (mode)
2971 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2972 & UINTVAL (trueop1)) == 0)
2974 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2975 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2976 gen_int_mode (INTVAL (trueop1),
2977 imode));
2978 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2981 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2982 we might be able to further simplify the AND with X and potentially
2983 remove the truncation altogether. */
2984 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2986 rtx x = XEXP (op0, 0);
2987 enum machine_mode xmode = GET_MODE (x);
2988 tem = simplify_gen_binary (AND, xmode, x,
2989 gen_int_mode (INTVAL (trueop1), xmode));
2990 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2993 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2994 if (GET_CODE (op0) == IOR
2995 && CONST_INT_P (trueop1)
2996 && CONST_INT_P (XEXP (op0, 1)))
2998 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2999 return simplify_gen_binary (IOR, mode,
3000 simplify_gen_binary (AND, mode,
3001 XEXP (op0, 0), op1),
3002 gen_int_mode (tmp, mode));
3005 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3006 insn (and may simplify more). */
3007 if (GET_CODE (op0) == XOR
3008 && rtx_equal_p (XEXP (op0, 0), op1)
3009 && ! side_effects_p (op1))
3010 return simplify_gen_binary (AND, mode,
3011 simplify_gen_unary (NOT, mode,
3012 XEXP (op0, 1), mode),
3013 op1);
3015 if (GET_CODE (op0) == XOR
3016 && rtx_equal_p (XEXP (op0, 1), op1)
3017 && ! side_effects_p (op1))
3018 return simplify_gen_binary (AND, mode,
3019 simplify_gen_unary (NOT, mode,
3020 XEXP (op0, 0), mode),
3021 op1);
3023 /* Similarly for (~(A ^ B)) & A. */
3024 if (GET_CODE (op0) == NOT
3025 && GET_CODE (XEXP (op0, 0)) == XOR
3026 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3027 && ! side_effects_p (op1))
3028 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3030 if (GET_CODE (op0) == NOT
3031 && GET_CODE (XEXP (op0, 0)) == XOR
3032 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3033 && ! side_effects_p (op1))
3034 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3036 /* Convert (A | B) & A to A. */
3037 if (GET_CODE (op0) == IOR
3038 && (rtx_equal_p (XEXP (op0, 0), op1)
3039 || rtx_equal_p (XEXP (op0, 1), op1))
3040 && ! side_effects_p (XEXP (op0, 0))
3041 && ! side_effects_p (XEXP (op0, 1)))
3042 return op1;
3044 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3045 ((A & N) + B) & M -> (A + B) & M
3046 Similarly if (N & M) == 0,
3047 ((A | N) + B) & M -> (A + B) & M
3048 and for - instead of + and/or ^ instead of |.
3049 Also, if (N & M) == 0, then
3050 (A +- N) & M -> A & M. */
3051 if (CONST_INT_P (trueop1)
3052 && HWI_COMPUTABLE_MODE_P (mode)
3053 && ~UINTVAL (trueop1)
3054 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3055 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3057 rtx pmop[2];
3058 int which;
3060 pmop[0] = XEXP (op0, 0);
3061 pmop[1] = XEXP (op0, 1);
3063 if (CONST_INT_P (pmop[1])
3064 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3065 return simplify_gen_binary (AND, mode, pmop[0], op1);
3067 for (which = 0; which < 2; which++)
3069 tem = pmop[which];
3070 switch (GET_CODE (tem))
3072 case AND:
3073 if (CONST_INT_P (XEXP (tem, 1))
3074 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3075 == UINTVAL (trueop1))
3076 pmop[which] = XEXP (tem, 0);
3077 break;
3078 case IOR:
3079 case XOR:
3080 if (CONST_INT_P (XEXP (tem, 1))
3081 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3082 pmop[which] = XEXP (tem, 0);
3083 break;
3084 default:
3085 break;
3089 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3091 tem = simplify_gen_binary (GET_CODE (op0), mode,
3092 pmop[0], pmop[1]);
3093 return simplify_gen_binary (code, mode, tem, op1);
3097 /* (and X (ior (not X) Y) -> (and X Y) */
3098 if (GET_CODE (op1) == IOR
3099 && GET_CODE (XEXP (op1, 0)) == NOT
3100 && op0 == XEXP (XEXP (op1, 0), 0))
3101 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3103 /* (and (ior (not X) Y) X) -> (and X Y) */
3104 if (GET_CODE (op0) == IOR
3105 && GET_CODE (XEXP (op0, 0)) == NOT
3106 && op1 == XEXP (XEXP (op0, 0), 0))
3107 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3109 tem = simplify_associative_operation (code, mode, op0, op1);
3110 if (tem)
3111 return tem;
3112 break;
3114 case UDIV:
3115 /* 0/x is 0 (or x&0 if x has side-effects). */
3116 if (trueop0 == CONST0_RTX (mode))
3118 if (side_effects_p (op1))
3119 return simplify_gen_binary (AND, mode, op1, trueop0);
3120 return trueop0;
3122 /* x/1 is x. */
3123 if (trueop1 == CONST1_RTX (mode))
3125 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3126 if (tem)
3127 return tem;
3129 /* Convert divide by power of two into shift. */
3130 if (CONST_INT_P (trueop1)
3131 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3132 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3133 break;
3135 case DIV:
3136 /* Handle floating point and integers separately. */
3137 if (SCALAR_FLOAT_MODE_P (mode))
3139 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3140 safe for modes with NaNs, since 0.0 / 0.0 will then be
3141 NaN rather than 0.0. Nor is it safe for modes with signed
3142 zeros, since dividing 0 by a negative number gives -0.0 */
3143 if (trueop0 == CONST0_RTX (mode)
3144 && !HONOR_NANS (mode)
3145 && !HONOR_SIGNED_ZEROS (mode)
3146 && ! side_effects_p (op1))
3147 return op0;
3148 /* x/1.0 is x. */
3149 if (trueop1 == CONST1_RTX (mode)
3150 && !HONOR_SNANS (mode))
3151 return op0;
3153 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3154 && trueop1 != CONST0_RTX (mode))
3156 REAL_VALUE_TYPE d;
3157 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3159 /* x/-1.0 is -x. */
3160 if (REAL_VALUES_EQUAL (d, dconstm1)
3161 && !HONOR_SNANS (mode))
3162 return simplify_gen_unary (NEG, mode, op0, mode);
3164 /* Change FP division by a constant into multiplication.
3165 Only do this with -freciprocal-math. */
3166 if (flag_reciprocal_math
3167 && !REAL_VALUES_EQUAL (d, dconst0))
3169 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3170 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3171 return simplify_gen_binary (MULT, mode, op0, tem);
3175 else if (SCALAR_INT_MODE_P (mode))
3177 /* 0/x is 0 (or x&0 if x has side-effects). */
3178 if (trueop0 == CONST0_RTX (mode)
3179 && !cfun->can_throw_non_call_exceptions)
3181 if (side_effects_p (op1))
3182 return simplify_gen_binary (AND, mode, op1, trueop0);
3183 return trueop0;
3185 /* x/1 is x. */
3186 if (trueop1 == CONST1_RTX (mode))
3188 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3189 if (tem)
3190 return tem;
3192 /* x/-1 is -x. */
3193 if (trueop1 == constm1_rtx)
3195 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3196 if (x)
3197 return simplify_gen_unary (NEG, mode, x, mode);
3200 break;
3202 case UMOD:
3203 /* 0%x is 0 (or x&0 if x has side-effects). */
3204 if (trueop0 == CONST0_RTX (mode))
3206 if (side_effects_p (op1))
3207 return simplify_gen_binary (AND, mode, op1, trueop0);
3208 return trueop0;
3210 /* x%1 is 0 (of x&0 if x has side-effects). */
3211 if (trueop1 == CONST1_RTX (mode))
3213 if (side_effects_p (op0))
3214 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3215 return CONST0_RTX (mode);
3217 /* Implement modulus by power of two as AND. */
3218 if (CONST_INT_P (trueop1)
3219 && exact_log2 (UINTVAL (trueop1)) > 0)
3220 return simplify_gen_binary (AND, mode, op0,
3221 GEN_INT (INTVAL (op1) - 1));
3222 break;
3224 case MOD:
3225 /* 0%x is 0 (or x&0 if x has side-effects). */
3226 if (trueop0 == CONST0_RTX (mode))
3228 if (side_effects_p (op1))
3229 return simplify_gen_binary (AND, mode, op1, trueop0);
3230 return trueop0;
3232 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3233 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3235 if (side_effects_p (op0))
3236 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3237 return CONST0_RTX (mode);
3239 break;
3241 case ROTATERT:
3242 case ROTATE:
3243 case ASHIFTRT:
3244 if (trueop1 == CONST0_RTX (mode))
3245 return op0;
3246 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3247 return op0;
3248 /* Rotating ~0 always results in ~0. */
3249 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3250 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3251 && ! side_effects_p (op1))
3252 return op0;
3253 canonicalize_shift:
3254 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3256 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3257 if (val != INTVAL (op1))
3258 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3260 break;
3262 case ASHIFT:
3263 case SS_ASHIFT:
3264 case US_ASHIFT:
3265 if (trueop1 == CONST0_RTX (mode))
3266 return op0;
3267 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3268 return op0;
3269 goto canonicalize_shift;
3271 case LSHIFTRT:
3272 if (trueop1 == CONST0_RTX (mode))
3273 return op0;
3274 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3275 return op0;
3276 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3277 if (GET_CODE (op0) == CLZ
3278 && CONST_INT_P (trueop1)
3279 && STORE_FLAG_VALUE == 1
3280 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3282 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3283 unsigned HOST_WIDE_INT zero_val = 0;
3285 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3286 && zero_val == GET_MODE_PRECISION (imode)
3287 && INTVAL (trueop1) == exact_log2 (zero_val))
3288 return simplify_gen_relational (EQ, mode, imode,
3289 XEXP (op0, 0), const0_rtx);
3291 goto canonicalize_shift;
3293 case SMIN:
3294 if (width <= HOST_BITS_PER_WIDE_INT
3295 && mode_signbit_p (mode, trueop1)
3296 && ! side_effects_p (op0))
3297 return op1;
3298 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3299 return op0;
3300 tem = simplify_associative_operation (code, mode, op0, op1);
3301 if (tem)
3302 return tem;
3303 break;
3305 case SMAX:
3306 if (width <= HOST_BITS_PER_WIDE_INT
3307 && CONST_INT_P (trueop1)
3308 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3309 && ! side_effects_p (op0))
3310 return op1;
3311 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3312 return op0;
3313 tem = simplify_associative_operation (code, mode, op0, op1);
3314 if (tem)
3315 return tem;
3316 break;
3318 case UMIN:
3319 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3320 return op1;
3321 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3322 return op0;
3323 tem = simplify_associative_operation (code, mode, op0, op1);
3324 if (tem)
3325 return tem;
3326 break;
3328 case UMAX:
3329 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3330 return op1;
3331 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3332 return op0;
3333 tem = simplify_associative_operation (code, mode, op0, op1);
3334 if (tem)
3335 return tem;
3336 break;
3338 case SS_PLUS:
3339 case US_PLUS:
3340 case SS_MINUS:
3341 case US_MINUS:
3342 case SS_MULT:
3343 case US_MULT:
3344 case SS_DIV:
3345 case US_DIV:
3346 /* ??? There are simplifications that can be done. */
3347 return 0;
3349 case VEC_SELECT:
3350 if (!VECTOR_MODE_P (mode))
3352 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3353 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3354 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3355 gcc_assert (XVECLEN (trueop1, 0) == 1);
3356 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3358 if (GET_CODE (trueop0) == CONST_VECTOR)
3359 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3360 (trueop1, 0, 0)));
3362 /* Extract a scalar element from a nested VEC_SELECT expression
3363 (with optional nested VEC_CONCAT expression). Some targets
3364 (i386) extract scalar element from a vector using chain of
3365 nested VEC_SELECT expressions. When input operand is a memory
3366 operand, this operation can be simplified to a simple scalar
3367 load from an offseted memory address. */
3368 if (GET_CODE (trueop0) == VEC_SELECT)
3370 rtx op0 = XEXP (trueop0, 0);
3371 rtx op1 = XEXP (trueop0, 1);
3373 enum machine_mode opmode = GET_MODE (op0);
3374 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3375 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3377 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3378 int elem;
3380 rtvec vec;
3381 rtx tmp_op, tmp;
3383 gcc_assert (GET_CODE (op1) == PARALLEL);
3384 gcc_assert (i < n_elts);
3386 /* Select element, pointed by nested selector. */
3387 elem = INTVAL (XVECEXP (op1, 0, i));
3389 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3390 if (GET_CODE (op0) == VEC_CONCAT)
3392 rtx op00 = XEXP (op0, 0);
3393 rtx op01 = XEXP (op0, 1);
3395 enum machine_mode mode00, mode01;
3396 int n_elts00, n_elts01;
3398 mode00 = GET_MODE (op00);
3399 mode01 = GET_MODE (op01);
3401 /* Find out number of elements of each operand. */
3402 if (VECTOR_MODE_P (mode00))
3404 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3405 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3407 else
3408 n_elts00 = 1;
3410 if (VECTOR_MODE_P (mode01))
3412 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3413 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3415 else
3416 n_elts01 = 1;
3418 gcc_assert (n_elts == n_elts00 + n_elts01);
3420 /* Select correct operand of VEC_CONCAT
3421 and adjust selector. */
3422 if (elem < n_elts01)
3423 tmp_op = op00;
3424 else
3426 tmp_op = op01;
3427 elem -= n_elts00;
3430 else
3431 tmp_op = op0;
3433 vec = rtvec_alloc (1);
3434 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3436 tmp = gen_rtx_fmt_ee (code, mode,
3437 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3438 return tmp;
3440 if (GET_CODE (trueop0) == VEC_DUPLICATE
3441 && GET_MODE (XEXP (trueop0, 0)) == mode)
3442 return XEXP (trueop0, 0);
3444 else
3446 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3447 gcc_assert (GET_MODE_INNER (mode)
3448 == GET_MODE_INNER (GET_MODE (trueop0)));
3449 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3451 if (GET_CODE (trueop0) == CONST_VECTOR)
3453 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3454 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3455 rtvec v = rtvec_alloc (n_elts);
3456 unsigned int i;
3458 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3459 for (i = 0; i < n_elts; i++)
3461 rtx x = XVECEXP (trueop1, 0, i);
3463 gcc_assert (CONST_INT_P (x));
3464 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3465 INTVAL (x));
3468 return gen_rtx_CONST_VECTOR (mode, v);
3471 /* Recognize the identity. */
3472 if (GET_MODE (trueop0) == mode)
3474 bool maybe_ident = true;
3475 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3477 rtx j = XVECEXP (trueop1, 0, i);
3478 if (!CONST_INT_P (j) || INTVAL (j) != i)
3480 maybe_ident = false;
3481 break;
3484 if (maybe_ident)
3485 return trueop0;
3488 /* If we build {a,b} then permute it, build the result directly. */
3489 if (XVECLEN (trueop1, 0) == 2
3490 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3491 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3492 && GET_CODE (trueop0) == VEC_CONCAT
3493 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3494 && GET_MODE (XEXP (trueop0, 0)) == mode
3495 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3496 && GET_MODE (XEXP (trueop0, 1)) == mode)
3498 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3499 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3500 rtx subop0, subop1;
3502 gcc_assert (i0 < 4 && i1 < 4);
3503 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3504 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3506 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3509 if (XVECLEN (trueop1, 0) == 2
3510 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3511 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3512 && GET_CODE (trueop0) == VEC_CONCAT
3513 && GET_MODE (trueop0) == mode)
3515 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3516 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3517 rtx subop0, subop1;
3519 gcc_assert (i0 < 2 && i1 < 2);
3520 subop0 = XEXP (trueop0, i0);
3521 subop1 = XEXP (trueop0, i1);
3523 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3527 if (XVECLEN (trueop1, 0) == 1
3528 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3529 && GET_CODE (trueop0) == VEC_CONCAT)
3531 rtx vec = trueop0;
3532 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3534 /* Try to find the element in the VEC_CONCAT. */
3535 while (GET_MODE (vec) != mode
3536 && GET_CODE (vec) == VEC_CONCAT)
3538 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3539 if (offset < vec_size)
3540 vec = XEXP (vec, 0);
3541 else
3543 offset -= vec_size;
3544 vec = XEXP (vec, 1);
3546 vec = avoid_constant_pool_reference (vec);
3549 if (GET_MODE (vec) == mode)
3550 return vec;
3553 return 0;
3554 case VEC_CONCAT:
3556 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3557 ? GET_MODE (trueop0)
3558 : GET_MODE_INNER (mode));
3559 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3560 ? GET_MODE (trueop1)
3561 : GET_MODE_INNER (mode));
3563 gcc_assert (VECTOR_MODE_P (mode));
3564 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3565 == GET_MODE_SIZE (mode));
3567 if (VECTOR_MODE_P (op0_mode))
3568 gcc_assert (GET_MODE_INNER (mode)
3569 == GET_MODE_INNER (op0_mode));
3570 else
3571 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3573 if (VECTOR_MODE_P (op1_mode))
3574 gcc_assert (GET_MODE_INNER (mode)
3575 == GET_MODE_INNER (op1_mode));
3576 else
3577 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3579 if ((GET_CODE (trueop0) == CONST_VECTOR
3580 || CONST_SCALAR_INT_P (trueop0)
3581 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3582 && (GET_CODE (trueop1) == CONST_VECTOR
3583 || CONST_SCALAR_INT_P (trueop1)
3584 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3586 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3587 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3588 rtvec v = rtvec_alloc (n_elts);
3589 unsigned int i;
3590 unsigned in_n_elts = 1;
3592 if (VECTOR_MODE_P (op0_mode))
3593 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3594 for (i = 0; i < n_elts; i++)
3596 if (i < in_n_elts)
3598 if (!VECTOR_MODE_P (op0_mode))
3599 RTVEC_ELT (v, i) = trueop0;
3600 else
3601 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3603 else
3605 if (!VECTOR_MODE_P (op1_mode))
3606 RTVEC_ELT (v, i) = trueop1;
3607 else
3608 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3609 i - in_n_elts);
3613 return gen_rtx_CONST_VECTOR (mode, v);
3616 /* Try to merge VEC_SELECTs from the same vector into a single one. */
3617 if (GET_CODE (trueop0) == VEC_SELECT
3618 && GET_CODE (trueop1) == VEC_SELECT
3619 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0)))
3621 rtx par0 = XEXP (trueop0, 1);
3622 rtx par1 = XEXP (trueop1, 1);
3623 int len0 = XVECLEN (par0, 0);
3624 int len1 = XVECLEN (par1, 0);
3625 rtvec vec = rtvec_alloc (len0 + len1);
3626 for (int i = 0; i < len0; i++)
3627 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3628 for (int i = 0; i < len1; i++)
3629 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3630 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3631 gen_rtx_PARALLEL (VOIDmode, vec));
3634 return 0;
3636 default:
3637 gcc_unreachable ();
3640 return 0;
3644 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3645 rtx op0, rtx op1)
3647 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3648 HOST_WIDE_INT val;
3649 unsigned int width = GET_MODE_PRECISION (mode);
3651 if (VECTOR_MODE_P (mode)
3652 && code != VEC_CONCAT
3653 && GET_CODE (op0) == CONST_VECTOR
3654 && GET_CODE (op1) == CONST_VECTOR)
3656 unsigned n_elts = GET_MODE_NUNITS (mode);
3657 enum machine_mode op0mode = GET_MODE (op0);
3658 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3659 enum machine_mode op1mode = GET_MODE (op1);
3660 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3661 rtvec v = rtvec_alloc (n_elts);
3662 unsigned int i;
3664 gcc_assert (op0_n_elts == n_elts);
3665 gcc_assert (op1_n_elts == n_elts);
3666 for (i = 0; i < n_elts; i++)
3668 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3669 CONST_VECTOR_ELT (op0, i),
3670 CONST_VECTOR_ELT (op1, i));
3671 if (!x)
3672 return 0;
3673 RTVEC_ELT (v, i) = x;
3676 return gen_rtx_CONST_VECTOR (mode, v);
3679 if (VECTOR_MODE_P (mode)
3680 && code == VEC_CONCAT
3681 && (CONST_SCALAR_INT_P (op0)
3682 || GET_CODE (op0) == CONST_FIXED
3683 || CONST_DOUBLE_AS_FLOAT_P (op0))
3684 && (CONST_SCALAR_INT_P (op1)
3685 || CONST_DOUBLE_AS_FLOAT_P (op1)
3686 || GET_CODE (op1) == CONST_FIXED))
3688 unsigned n_elts = GET_MODE_NUNITS (mode);
3689 rtvec v = rtvec_alloc (n_elts);
3691 gcc_assert (n_elts >= 2);
3692 if (n_elts == 2)
3694 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3695 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3697 RTVEC_ELT (v, 0) = op0;
3698 RTVEC_ELT (v, 1) = op1;
3700 else
3702 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3703 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3704 unsigned i;
3706 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3707 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3708 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3710 for (i = 0; i < op0_n_elts; ++i)
3711 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3712 for (i = 0; i < op1_n_elts; ++i)
3713 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3716 return gen_rtx_CONST_VECTOR (mode, v);
3719 if (SCALAR_FLOAT_MODE_P (mode)
3720 && CONST_DOUBLE_AS_FLOAT_P (op0)
3721 && CONST_DOUBLE_AS_FLOAT_P (op1)
3722 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3724 if (code == AND
3725 || code == IOR
3726 || code == XOR)
3728 long tmp0[4];
3729 long tmp1[4];
3730 REAL_VALUE_TYPE r;
3731 int i;
3733 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3734 GET_MODE (op0));
3735 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3736 GET_MODE (op1));
3737 for (i = 0; i < 4; i++)
3739 switch (code)
3741 case AND:
3742 tmp0[i] &= tmp1[i];
3743 break;
3744 case IOR:
3745 tmp0[i] |= tmp1[i];
3746 break;
3747 case XOR:
3748 tmp0[i] ^= tmp1[i];
3749 break;
3750 default:
3751 gcc_unreachable ();
3754 real_from_target (&r, tmp0, mode);
3755 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3757 else
3759 REAL_VALUE_TYPE f0, f1, value, result;
3760 bool inexact;
3762 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3763 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3764 real_convert (&f0, mode, &f0);
3765 real_convert (&f1, mode, &f1);
3767 if (HONOR_SNANS (mode)
3768 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3769 return 0;
3771 if (code == DIV
3772 && REAL_VALUES_EQUAL (f1, dconst0)
3773 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3774 return 0;
3776 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3777 && flag_trapping_math
3778 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3780 int s0 = REAL_VALUE_NEGATIVE (f0);
3781 int s1 = REAL_VALUE_NEGATIVE (f1);
3783 switch (code)
3785 case PLUS:
3786 /* Inf + -Inf = NaN plus exception. */
3787 if (s0 != s1)
3788 return 0;
3789 break;
3790 case MINUS:
3791 /* Inf - Inf = NaN plus exception. */
3792 if (s0 == s1)
3793 return 0;
3794 break;
3795 case DIV:
3796 /* Inf / Inf = NaN plus exception. */
3797 return 0;
3798 default:
3799 break;
3803 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3804 && flag_trapping_math
3805 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3806 || (REAL_VALUE_ISINF (f1)
3807 && REAL_VALUES_EQUAL (f0, dconst0))))
3808 /* Inf * 0 = NaN plus exception. */
3809 return 0;
3811 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3812 &f0, &f1);
3813 real_convert (&result, mode, &value);
3815 /* Don't constant fold this floating point operation if
3816 the result has overflowed and flag_trapping_math. */
3818 if (flag_trapping_math
3819 && MODE_HAS_INFINITIES (mode)
3820 && REAL_VALUE_ISINF (result)
3821 && !REAL_VALUE_ISINF (f0)
3822 && !REAL_VALUE_ISINF (f1))
3823 /* Overflow plus exception. */
3824 return 0;
3826 /* Don't constant fold this floating point operation if the
3827 result may dependent upon the run-time rounding mode and
3828 flag_rounding_math is set, or if GCC's software emulation
3829 is unable to accurately represent the result. */
3831 if ((flag_rounding_math
3832 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3833 && (inexact || !real_identical (&result, &value)))
3834 return NULL_RTX;
3836 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3840 /* We can fold some multi-word operations. */
3841 if (GET_MODE_CLASS (mode) == MODE_INT
3842 && width == HOST_BITS_PER_DOUBLE_INT
3843 && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3844 && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3846 double_int o0, o1, res, tmp;
3847 bool overflow;
3849 o0 = rtx_to_double_int (op0);
3850 o1 = rtx_to_double_int (op1);
3852 switch (code)
3854 case MINUS:
3855 /* A - B == A + (-B). */
3856 o1 = -o1;
3858 /* Fall through.... */
3860 case PLUS:
3861 res = o0 + o1;
3862 break;
3864 case MULT:
3865 res = o0 * o1;
3866 break;
3868 case DIV:
3869 res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3870 &tmp, &overflow);
3871 if (overflow)
3872 return 0;
3873 break;
3875 case MOD:
3876 tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3877 &res, &overflow);
3878 if (overflow)
3879 return 0;
3880 break;
3882 case UDIV:
3883 res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3884 &tmp, &overflow);
3885 if (overflow)
3886 return 0;
3887 break;
3889 case UMOD:
3890 tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3891 &res, &overflow);
3892 if (overflow)
3893 return 0;
3894 break;
3896 case AND:
3897 res = o0 & o1;
3898 break;
3900 case IOR:
3901 res = o0 | o1;
3902 break;
3904 case XOR:
3905 res = o0 ^ o1;
3906 break;
3908 case SMIN:
3909 res = o0.smin (o1);
3910 break;
3912 case SMAX:
3913 res = o0.smax (o1);
3914 break;
3916 case UMIN:
3917 res = o0.umin (o1);
3918 break;
3920 case UMAX:
3921 res = o0.umax (o1);
3922 break;
3924 case LSHIFTRT: case ASHIFTRT:
3925 case ASHIFT:
3926 case ROTATE: case ROTATERT:
3928 unsigned HOST_WIDE_INT cnt;
3930 if (SHIFT_COUNT_TRUNCATED)
3932 o1.high = 0;
3933 o1.low &= GET_MODE_PRECISION (mode) - 1;
3936 if (!o1.fits_uhwi ()
3937 || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
3938 return 0;
3940 cnt = o1.to_uhwi ();
3941 unsigned short prec = GET_MODE_PRECISION (mode);
3943 if (code == LSHIFTRT || code == ASHIFTRT)
3944 res = o0.rshift (cnt, prec, code == ASHIFTRT);
3945 else if (code == ASHIFT)
3946 res = o0.alshift (cnt, prec);
3947 else if (code == ROTATE)
3948 res = o0.lrotate (cnt, prec);
3949 else /* code == ROTATERT */
3950 res = o0.rrotate (cnt, prec);
3952 break;
3954 default:
3955 return 0;
3958 return immed_double_int_const (res, mode);
3961 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3962 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3964 /* Get the integer argument values in two forms:
3965 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3967 arg0 = INTVAL (op0);
3968 arg1 = INTVAL (op1);
3970 if (width < HOST_BITS_PER_WIDE_INT)
3972 arg0 &= GET_MODE_MASK (mode);
3973 arg1 &= GET_MODE_MASK (mode);
3975 arg0s = arg0;
3976 if (val_signbit_known_set_p (mode, arg0s))
3977 arg0s |= ~GET_MODE_MASK (mode);
3979 arg1s = arg1;
3980 if (val_signbit_known_set_p (mode, arg1s))
3981 arg1s |= ~GET_MODE_MASK (mode);
3983 else
3985 arg0s = arg0;
3986 arg1s = arg1;
3989 /* Compute the value of the arithmetic. */
3991 switch (code)
3993 case PLUS:
3994 val = arg0s + arg1s;
3995 break;
3997 case MINUS:
3998 val = arg0s - arg1s;
3999 break;
4001 case MULT:
4002 val = arg0s * arg1s;
4003 break;
4005 case DIV:
4006 if (arg1s == 0
4007 || ((unsigned HOST_WIDE_INT) arg0s
4008 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4009 && arg1s == -1))
4010 return 0;
4011 val = arg0s / arg1s;
4012 break;
4014 case MOD:
4015 if (arg1s == 0
4016 || ((unsigned HOST_WIDE_INT) arg0s
4017 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4018 && arg1s == -1))
4019 return 0;
4020 val = arg0s % arg1s;
4021 break;
4023 case UDIV:
4024 if (arg1 == 0
4025 || ((unsigned HOST_WIDE_INT) arg0s
4026 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4027 && arg1s == -1))
4028 return 0;
4029 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
4030 break;
4032 case UMOD:
4033 if (arg1 == 0
4034 || ((unsigned HOST_WIDE_INT) arg0s
4035 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4036 && arg1s == -1))
4037 return 0;
4038 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4039 break;
4041 case AND:
4042 val = arg0 & arg1;
4043 break;
4045 case IOR:
4046 val = arg0 | arg1;
4047 break;
4049 case XOR:
4050 val = arg0 ^ arg1;
4051 break;
4053 case LSHIFTRT:
4054 case ASHIFT:
4055 case ASHIFTRT:
4056 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4057 the value is in range. We can't return any old value for
4058 out-of-range arguments because either the middle-end (via
4059 shift_truncation_mask) or the back-end might be relying on
4060 target-specific knowledge. Nor can we rely on
4061 shift_truncation_mask, since the shift might not be part of an
4062 ashlM3, lshrM3 or ashrM3 instruction. */
4063 if (SHIFT_COUNT_TRUNCATED)
4064 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
4065 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
4066 return 0;
4068 val = (code == ASHIFT
4069 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
4070 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
4072 /* Sign-extend the result for arithmetic right shifts. */
4073 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
4074 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
4075 break;
4077 case ROTATERT:
4078 if (arg1 < 0)
4079 return 0;
4081 arg1 %= width;
4082 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4083 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4084 break;
4086 case ROTATE:
4087 if (arg1 < 0)
4088 return 0;
4090 arg1 %= width;
4091 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4092 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4093 break;
4095 case COMPARE:
4096 /* Do nothing here. */
4097 return 0;
4099 case SMIN:
4100 val = arg0s <= arg1s ? arg0s : arg1s;
4101 break;
4103 case UMIN:
4104 val = ((unsigned HOST_WIDE_INT) arg0
4105 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4106 break;
4108 case SMAX:
4109 val = arg0s > arg1s ? arg0s : arg1s;
4110 break;
4112 case UMAX:
4113 val = ((unsigned HOST_WIDE_INT) arg0
4114 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4115 break;
4117 case SS_PLUS:
4118 case US_PLUS:
4119 case SS_MINUS:
4120 case US_MINUS:
4121 case SS_MULT:
4122 case US_MULT:
4123 case SS_DIV:
4124 case US_DIV:
4125 case SS_ASHIFT:
4126 case US_ASHIFT:
4127 /* ??? There are simplifications that can be done. */
4128 return 0;
4130 default:
4131 gcc_unreachable ();
4134 return gen_int_mode (val, mode);
4137 return NULL_RTX;
4142 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4143 PLUS or MINUS.
4145 Rather than test for specific case, we do this by a brute-force method
4146 and do all possible simplifications until no more changes occur. Then
4147 we rebuild the operation. */
4149 struct simplify_plus_minus_op_data
4151 rtx op;
4152 short neg;
4155 static bool
4156 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4158 int result;
4160 result = (commutative_operand_precedence (y)
4161 - commutative_operand_precedence (x));
4162 if (result)
4163 return result > 0;
4165 /* Group together equal REGs to do more simplification. */
4166 if (REG_P (x) && REG_P (y))
4167 return REGNO (x) > REGNO (y);
4168 else
4169 return false;
4172 static rtx
4173 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
4174 rtx op1)
4176 struct simplify_plus_minus_op_data ops[8];
4177 rtx result, tem;
4178 int n_ops = 2, input_ops = 2;
4179 int changed, n_constants = 0, canonicalized = 0;
4180 int i, j;
4182 memset (ops, 0, sizeof ops);
4184 /* Set up the two operands and then expand them until nothing has been
4185 changed. If we run out of room in our array, give up; this should
4186 almost never happen. */
4188 ops[0].op = op0;
4189 ops[0].neg = 0;
4190 ops[1].op = op1;
4191 ops[1].neg = (code == MINUS);
4195 changed = 0;
4197 for (i = 0; i < n_ops; i++)
4199 rtx this_op = ops[i].op;
4200 int this_neg = ops[i].neg;
4201 enum rtx_code this_code = GET_CODE (this_op);
4203 switch (this_code)
4205 case PLUS:
4206 case MINUS:
4207 if (n_ops == 7)
4208 return NULL_RTX;
4210 ops[n_ops].op = XEXP (this_op, 1);
4211 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4212 n_ops++;
4214 ops[i].op = XEXP (this_op, 0);
4215 input_ops++;
4216 changed = 1;
4217 canonicalized |= this_neg;
4218 break;
4220 case NEG:
4221 ops[i].op = XEXP (this_op, 0);
4222 ops[i].neg = ! this_neg;
4223 changed = 1;
4224 canonicalized = 1;
4225 break;
4227 case CONST:
4228 if (n_ops < 7
4229 && GET_CODE (XEXP (this_op, 0)) == PLUS
4230 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4231 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4233 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4234 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4235 ops[n_ops].neg = this_neg;
4236 n_ops++;
4237 changed = 1;
4238 canonicalized = 1;
4240 break;
4242 case NOT:
4243 /* ~a -> (-a - 1) */
4244 if (n_ops != 7)
4246 ops[n_ops].op = CONSTM1_RTX (mode);
4247 ops[n_ops++].neg = this_neg;
4248 ops[i].op = XEXP (this_op, 0);
4249 ops[i].neg = !this_neg;
4250 changed = 1;
4251 canonicalized = 1;
4253 break;
4255 case CONST_INT:
4256 n_constants++;
4257 if (this_neg)
4259 ops[i].op = neg_const_int (mode, this_op);
4260 ops[i].neg = 0;
4261 changed = 1;
4262 canonicalized = 1;
4264 break;
4266 default:
4267 break;
4271 while (changed);
4273 if (n_constants > 1)
4274 canonicalized = 1;
4276 gcc_assert (n_ops >= 2);
4278 /* If we only have two operands, we can avoid the loops. */
4279 if (n_ops == 2)
4281 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4282 rtx lhs, rhs;
4284 /* Get the two operands. Be careful with the order, especially for
4285 the cases where code == MINUS. */
4286 if (ops[0].neg && ops[1].neg)
4288 lhs = gen_rtx_NEG (mode, ops[0].op);
4289 rhs = ops[1].op;
4291 else if (ops[0].neg)
4293 lhs = ops[1].op;
4294 rhs = ops[0].op;
4296 else
4298 lhs = ops[0].op;
4299 rhs = ops[1].op;
4302 return simplify_const_binary_operation (code, mode, lhs, rhs);
4305 /* Now simplify each pair of operands until nothing changes. */
4308 /* Insertion sort is good enough for an eight-element array. */
4309 for (i = 1; i < n_ops; i++)
4311 struct simplify_plus_minus_op_data save;
4312 j = i - 1;
4313 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4314 continue;
4316 canonicalized = 1;
4317 save = ops[i];
4319 ops[j + 1] = ops[j];
4320 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4321 ops[j + 1] = save;
4324 changed = 0;
4325 for (i = n_ops - 1; i > 0; i--)
4326 for (j = i - 1; j >= 0; j--)
4328 rtx lhs = ops[j].op, rhs = ops[i].op;
4329 int lneg = ops[j].neg, rneg = ops[i].neg;
4331 if (lhs != 0 && rhs != 0)
4333 enum rtx_code ncode = PLUS;
4335 if (lneg != rneg)
4337 ncode = MINUS;
4338 if (lneg)
4339 tem = lhs, lhs = rhs, rhs = tem;
4341 else if (swap_commutative_operands_p (lhs, rhs))
4342 tem = lhs, lhs = rhs, rhs = tem;
4344 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4345 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4347 rtx tem_lhs, tem_rhs;
4349 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4350 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4351 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4353 if (tem && !CONSTANT_P (tem))
4354 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4356 else
4357 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4359 /* Reject "simplifications" that just wrap the two
4360 arguments in a CONST. Failure to do so can result
4361 in infinite recursion with simplify_binary_operation
4362 when it calls us to simplify CONST operations. */
4363 if (tem
4364 && ! (GET_CODE (tem) == CONST
4365 && GET_CODE (XEXP (tem, 0)) == ncode
4366 && XEXP (XEXP (tem, 0), 0) == lhs
4367 && XEXP (XEXP (tem, 0), 1) == rhs))
4369 lneg &= rneg;
4370 if (GET_CODE (tem) == NEG)
4371 tem = XEXP (tem, 0), lneg = !lneg;
4372 if (CONST_INT_P (tem) && lneg)
4373 tem = neg_const_int (mode, tem), lneg = 0;
4375 ops[i].op = tem;
4376 ops[i].neg = lneg;
4377 ops[j].op = NULL_RTX;
4378 changed = 1;
4379 canonicalized = 1;
4384 /* If nothing changed, fail. */
4385 if (!canonicalized)
4386 return NULL_RTX;
4388 /* Pack all the operands to the lower-numbered entries. */
4389 for (i = 0, j = 0; j < n_ops; j++)
4390 if (ops[j].op)
4392 ops[i] = ops[j];
4393 i++;
4395 n_ops = i;
4397 while (changed);
4399 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4400 if (n_ops == 2
4401 && CONST_INT_P (ops[1].op)
4402 && CONSTANT_P (ops[0].op)
4403 && ops[0].neg)
4404 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4406 /* We suppressed creation of trivial CONST expressions in the
4407 combination loop to avoid recursion. Create one manually now.
4408 The combination loop should have ensured that there is exactly
4409 one CONST_INT, and the sort will have ensured that it is last
4410 in the array and that any other constant will be next-to-last. */
4412 if (n_ops > 1
4413 && CONST_INT_P (ops[n_ops - 1].op)
4414 && CONSTANT_P (ops[n_ops - 2].op))
4416 rtx value = ops[n_ops - 1].op;
4417 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4418 value = neg_const_int (mode, value);
4419 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4420 INTVAL (value));
4421 n_ops--;
4424 /* Put a non-negated operand first, if possible. */
4426 for (i = 0; i < n_ops && ops[i].neg; i++)
4427 continue;
4428 if (i == n_ops)
4429 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4430 else if (i != 0)
4432 tem = ops[0].op;
4433 ops[0] = ops[i];
4434 ops[i].op = tem;
4435 ops[i].neg = 1;
4438 /* Now make the result by performing the requested operations. */
4439 result = ops[0].op;
4440 for (i = 1; i < n_ops; i++)
4441 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4442 mode, result, ops[i].op);
4444 return result;
4447 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4448 static bool
4449 plus_minus_operand_p (const_rtx x)
4451 return GET_CODE (x) == PLUS
4452 || GET_CODE (x) == MINUS
4453 || (GET_CODE (x) == CONST
4454 && GET_CODE (XEXP (x, 0)) == PLUS
4455 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4456 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4459 /* Like simplify_binary_operation except used for relational operators.
4460 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4461 not also be VOIDmode.
4463 CMP_MODE specifies in which mode the comparison is done in, so it is
4464 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4465 the operands or, if both are VOIDmode, the operands are compared in
4466 "infinite precision". */
4468 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4469 enum machine_mode cmp_mode, rtx op0, rtx op1)
4471 rtx tem, trueop0, trueop1;
4473 if (cmp_mode == VOIDmode)
4474 cmp_mode = GET_MODE (op0);
4475 if (cmp_mode == VOIDmode)
4476 cmp_mode = GET_MODE (op1);
4478 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4479 if (tem)
4481 if (SCALAR_FLOAT_MODE_P (mode))
4483 if (tem == const0_rtx)
4484 return CONST0_RTX (mode);
4485 #ifdef FLOAT_STORE_FLAG_VALUE
4487 REAL_VALUE_TYPE val;
4488 val = FLOAT_STORE_FLAG_VALUE (mode);
4489 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4491 #else
4492 return NULL_RTX;
4493 #endif
4495 if (VECTOR_MODE_P (mode))
4497 if (tem == const0_rtx)
4498 return CONST0_RTX (mode);
4499 #ifdef VECTOR_STORE_FLAG_VALUE
4501 int i, units;
4502 rtvec v;
4504 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4505 if (val == NULL_RTX)
4506 return NULL_RTX;
4507 if (val == const1_rtx)
4508 return CONST1_RTX (mode);
4510 units = GET_MODE_NUNITS (mode);
4511 v = rtvec_alloc (units);
4512 for (i = 0; i < units; i++)
4513 RTVEC_ELT (v, i) = val;
4514 return gen_rtx_raw_CONST_VECTOR (mode, v);
4516 #else
4517 return NULL_RTX;
4518 #endif
4521 return tem;
4524 /* For the following tests, ensure const0_rtx is op1. */
4525 if (swap_commutative_operands_p (op0, op1)
4526 || (op0 == const0_rtx && op1 != const0_rtx))
4527 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4529 /* If op0 is a compare, extract the comparison arguments from it. */
4530 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4531 return simplify_gen_relational (code, mode, VOIDmode,
4532 XEXP (op0, 0), XEXP (op0, 1));
4534 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4535 || CC0_P (op0))
4536 return NULL_RTX;
4538 trueop0 = avoid_constant_pool_reference (op0);
4539 trueop1 = avoid_constant_pool_reference (op1);
4540 return simplify_relational_operation_1 (code, mode, cmp_mode,
4541 trueop0, trueop1);
4544 /* This part of simplify_relational_operation is only used when CMP_MODE
4545 is not in class MODE_CC (i.e. it is a real comparison).
4547 MODE is the mode of the result, while CMP_MODE specifies in which
4548 mode the comparison is done in, so it is the mode of the operands. */
4550 static rtx
4551 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4552 enum machine_mode cmp_mode, rtx op0, rtx op1)
4554 enum rtx_code op0code = GET_CODE (op0);
4556 if (op1 == const0_rtx && COMPARISON_P (op0))
4558 /* If op0 is a comparison, extract the comparison arguments
4559 from it. */
4560 if (code == NE)
4562 if (GET_MODE (op0) == mode)
4563 return simplify_rtx (op0);
4564 else
4565 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4566 XEXP (op0, 0), XEXP (op0, 1));
4568 else if (code == EQ)
4570 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4571 if (new_code != UNKNOWN)
4572 return simplify_gen_relational (new_code, mode, VOIDmode,
4573 XEXP (op0, 0), XEXP (op0, 1));
4577 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4578 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4579 if ((code == LTU || code == GEU)
4580 && GET_CODE (op0) == PLUS
4581 && CONST_INT_P (XEXP (op0, 1))
4582 && (rtx_equal_p (op1, XEXP (op0, 0))
4583 || rtx_equal_p (op1, XEXP (op0, 1)))
4584 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4585 && XEXP (op0, 1) != const0_rtx)
4587 rtx new_cmp
4588 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4589 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4590 cmp_mode, XEXP (op0, 0), new_cmp);
4593 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4594 if ((code == LTU || code == GEU)
4595 && GET_CODE (op0) == PLUS
4596 && rtx_equal_p (op1, XEXP (op0, 1))
4597 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4598 && !rtx_equal_p (op1, XEXP (op0, 0)))
4599 return simplify_gen_relational (code, mode, cmp_mode, op0,
4600 copy_rtx (XEXP (op0, 0)));
4602 if (op1 == const0_rtx)
4604 /* Canonicalize (GTU x 0) as (NE x 0). */
4605 if (code == GTU)
4606 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4607 /* Canonicalize (LEU x 0) as (EQ x 0). */
4608 if (code == LEU)
4609 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4611 else if (op1 == const1_rtx)
4613 switch (code)
4615 case GE:
4616 /* Canonicalize (GE x 1) as (GT x 0). */
4617 return simplify_gen_relational (GT, mode, cmp_mode,
4618 op0, const0_rtx);
4619 case GEU:
4620 /* Canonicalize (GEU x 1) as (NE x 0). */
4621 return simplify_gen_relational (NE, mode, cmp_mode,
4622 op0, const0_rtx);
4623 case LT:
4624 /* Canonicalize (LT x 1) as (LE x 0). */
4625 return simplify_gen_relational (LE, mode, cmp_mode,
4626 op0, const0_rtx);
4627 case LTU:
4628 /* Canonicalize (LTU x 1) as (EQ x 0). */
4629 return simplify_gen_relational (EQ, mode, cmp_mode,
4630 op0, const0_rtx);
4631 default:
4632 break;
4635 else if (op1 == constm1_rtx)
4637 /* Canonicalize (LE x -1) as (LT x 0). */
4638 if (code == LE)
4639 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4640 /* Canonicalize (GT x -1) as (GE x 0). */
4641 if (code == GT)
4642 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4645 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4646 if ((code == EQ || code == NE)
4647 && (op0code == PLUS || op0code == MINUS)
4648 && CONSTANT_P (op1)
4649 && CONSTANT_P (XEXP (op0, 1))
4650 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4652 rtx x = XEXP (op0, 0);
4653 rtx c = XEXP (op0, 1);
4654 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4655 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4657 /* Detect an infinite recursive condition, where we oscillate at this
4658 simplification case between:
4659 A + B == C <---> C - B == A,
4660 where A, B, and C are all constants with non-simplifiable expressions,
4661 usually SYMBOL_REFs. */
4662 if (GET_CODE (tem) == invcode
4663 && CONSTANT_P (x)
4664 && rtx_equal_p (c, XEXP (tem, 1)))
4665 return NULL_RTX;
4667 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4670 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4671 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4672 if (code == NE
4673 && op1 == const0_rtx
4674 && GET_MODE_CLASS (mode) == MODE_INT
4675 && cmp_mode != VOIDmode
4676 /* ??? Work-around BImode bugs in the ia64 backend. */
4677 && mode != BImode
4678 && cmp_mode != BImode
4679 && nonzero_bits (op0, cmp_mode) == 1
4680 && STORE_FLAG_VALUE == 1)
4681 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4682 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4683 : lowpart_subreg (mode, op0, cmp_mode);
4685 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4686 if ((code == EQ || code == NE)
4687 && op1 == const0_rtx
4688 && op0code == XOR)
4689 return simplify_gen_relational (code, mode, cmp_mode,
4690 XEXP (op0, 0), XEXP (op0, 1));
4692 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4693 if ((code == EQ || code == NE)
4694 && op0code == XOR
4695 && rtx_equal_p (XEXP (op0, 0), op1)
4696 && !side_effects_p (XEXP (op0, 0)))
4697 return simplify_gen_relational (code, mode, cmp_mode,
4698 XEXP (op0, 1), const0_rtx);
4700 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4701 if ((code == EQ || code == NE)
4702 && op0code == XOR
4703 && rtx_equal_p (XEXP (op0, 1), op1)
4704 && !side_effects_p (XEXP (op0, 1)))
4705 return simplify_gen_relational (code, mode, cmp_mode,
4706 XEXP (op0, 0), const0_rtx);
4708 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4709 if ((code == EQ || code == NE)
4710 && op0code == XOR
4711 && CONST_SCALAR_INT_P (op1)
4712 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4713 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4714 simplify_gen_binary (XOR, cmp_mode,
4715 XEXP (op0, 1), op1));
4717 if (op0code == POPCOUNT && op1 == const0_rtx)
4718 switch (code)
4720 case EQ:
4721 case LE:
4722 case LEU:
4723 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4724 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4725 XEXP (op0, 0), const0_rtx);
4727 case NE:
4728 case GT:
4729 case GTU:
4730 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4731 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4732 XEXP (op0, 0), const0_rtx);
4734 default:
4735 break;
4738 return NULL_RTX;
4741 enum
4743 CMP_EQ = 1,
4744 CMP_LT = 2,
4745 CMP_GT = 4,
4746 CMP_LTU = 8,
4747 CMP_GTU = 16
4751 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4752 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4753 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4754 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4755 For floating-point comparisons, assume that the operands were ordered. */
4757 static rtx
4758 comparison_result (enum rtx_code code, int known_results)
4760 switch (code)
4762 case EQ:
4763 case UNEQ:
4764 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4765 case NE:
4766 case LTGT:
4767 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4769 case LT:
4770 case UNLT:
4771 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4772 case GE:
4773 case UNGE:
4774 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4776 case GT:
4777 case UNGT:
4778 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4779 case LE:
4780 case UNLE:
4781 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4783 case LTU:
4784 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4785 case GEU:
4786 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4788 case GTU:
4789 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4790 case LEU:
4791 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4793 case ORDERED:
4794 return const_true_rtx;
4795 case UNORDERED:
4796 return const0_rtx;
4797 default:
4798 gcc_unreachable ();
4802 /* Check if the given comparison (done in the given MODE) is actually a
4803 tautology or a contradiction.
4804 If no simplification is possible, this function returns zero.
4805 Otherwise, it returns either const_true_rtx or const0_rtx. */
4808 simplify_const_relational_operation (enum rtx_code code,
4809 enum machine_mode mode,
4810 rtx op0, rtx op1)
4812 rtx tem;
4813 rtx trueop0;
4814 rtx trueop1;
4816 gcc_assert (mode != VOIDmode
4817 || (GET_MODE (op0) == VOIDmode
4818 && GET_MODE (op1) == VOIDmode));
4820 /* If op0 is a compare, extract the comparison arguments from it. */
4821 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4823 op1 = XEXP (op0, 1);
4824 op0 = XEXP (op0, 0);
4826 if (GET_MODE (op0) != VOIDmode)
4827 mode = GET_MODE (op0);
4828 else if (GET_MODE (op1) != VOIDmode)
4829 mode = GET_MODE (op1);
4830 else
4831 return 0;
4834 /* We can't simplify MODE_CC values since we don't know what the
4835 actual comparison is. */
4836 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4837 return 0;
4839 /* Make sure the constant is second. */
4840 if (swap_commutative_operands_p (op0, op1))
4842 tem = op0, op0 = op1, op1 = tem;
4843 code = swap_condition (code);
4846 trueop0 = avoid_constant_pool_reference (op0);
4847 trueop1 = avoid_constant_pool_reference (op1);
4849 /* For integer comparisons of A and B maybe we can simplify A - B and can
4850 then simplify a comparison of that with zero. If A and B are both either
4851 a register or a CONST_INT, this can't help; testing for these cases will
4852 prevent infinite recursion here and speed things up.
4854 We can only do this for EQ and NE comparisons as otherwise we may
4855 lose or introduce overflow which we cannot disregard as undefined as
4856 we do not know the signedness of the operation on either the left or
4857 the right hand side of the comparison. */
4859 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4860 && (code == EQ || code == NE)
4861 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4862 && (REG_P (op1) || CONST_INT_P (trueop1)))
4863 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4864 /* We cannot do this if tem is a nonzero address. */
4865 && ! nonzero_address_p (tem))
4866 return simplify_const_relational_operation (signed_condition (code),
4867 mode, tem, const0_rtx);
4869 if (! HONOR_NANS (mode) && code == ORDERED)
4870 return const_true_rtx;
4872 if (! HONOR_NANS (mode) && code == UNORDERED)
4873 return const0_rtx;
4875 /* For modes without NaNs, if the two operands are equal, we know the
4876 result except if they have side-effects. Even with NaNs we know
4877 the result of unordered comparisons and, if signaling NaNs are
4878 irrelevant, also the result of LT/GT/LTGT. */
4879 if ((! HONOR_NANS (GET_MODE (trueop0))
4880 || code == UNEQ || code == UNLE || code == UNGE
4881 || ((code == LT || code == GT || code == LTGT)
4882 && ! HONOR_SNANS (GET_MODE (trueop0))))
4883 && rtx_equal_p (trueop0, trueop1)
4884 && ! side_effects_p (trueop0))
4885 return comparison_result (code, CMP_EQ);
4887 /* If the operands are floating-point constants, see if we can fold
4888 the result. */
4889 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4890 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4891 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4893 REAL_VALUE_TYPE d0, d1;
4895 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4896 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4898 /* Comparisons are unordered iff at least one of the values is NaN. */
4899 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4900 switch (code)
4902 case UNEQ:
4903 case UNLT:
4904 case UNGT:
4905 case UNLE:
4906 case UNGE:
4907 case NE:
4908 case UNORDERED:
4909 return const_true_rtx;
4910 case EQ:
4911 case LT:
4912 case GT:
4913 case LE:
4914 case GE:
4915 case LTGT:
4916 case ORDERED:
4917 return const0_rtx;
4918 default:
4919 return 0;
4922 return comparison_result (code,
4923 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4924 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4927 /* Otherwise, see if the operands are both integers. */
4928 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4929 && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
4930 && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
4932 int width = GET_MODE_PRECISION (mode);
4933 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4934 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4936 /* Get the two words comprising each integer constant. */
4937 if (CONST_DOUBLE_AS_INT_P (trueop0))
4939 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4940 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4942 else
4944 l0u = l0s = INTVAL (trueop0);
4945 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4948 if (CONST_DOUBLE_AS_INT_P (trueop1))
4950 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4951 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4953 else
4955 l1u = l1s = INTVAL (trueop1);
4956 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4959 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4960 we have to sign or zero-extend the values. */
4961 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4963 l0u &= GET_MODE_MASK (mode);
4964 l1u &= GET_MODE_MASK (mode);
4966 if (val_signbit_known_set_p (mode, l0s))
4967 l0s |= ~GET_MODE_MASK (mode);
4969 if (val_signbit_known_set_p (mode, l1s))
4970 l1s |= ~GET_MODE_MASK (mode);
4972 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4973 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4975 if (h0u == h1u && l0u == l1u)
4976 return comparison_result (code, CMP_EQ);
4977 else
4979 int cr;
4980 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4981 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4982 return comparison_result (code, cr);
4986 /* Optimize comparisons with upper and lower bounds. */
4987 if (HWI_COMPUTABLE_MODE_P (mode)
4988 && CONST_INT_P (trueop1))
4990 int sign;
4991 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4992 HOST_WIDE_INT val = INTVAL (trueop1);
4993 HOST_WIDE_INT mmin, mmax;
4995 if (code == GEU
4996 || code == LEU
4997 || code == GTU
4998 || code == LTU)
4999 sign = 0;
5000 else
5001 sign = 1;
5003 /* Get a reduced range if the sign bit is zero. */
5004 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5006 mmin = 0;
5007 mmax = nonzero;
5009 else
5011 rtx mmin_rtx, mmax_rtx;
5012 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5014 mmin = INTVAL (mmin_rtx);
5015 mmax = INTVAL (mmax_rtx);
5016 if (sign)
5018 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5020 mmin >>= (sign_copies - 1);
5021 mmax >>= (sign_copies - 1);
5025 switch (code)
5027 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5028 case GEU:
5029 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5030 return const_true_rtx;
5031 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5032 return const0_rtx;
5033 break;
5034 case GE:
5035 if (val <= mmin)
5036 return const_true_rtx;
5037 if (val > mmax)
5038 return const0_rtx;
5039 break;
5041 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5042 case LEU:
5043 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5044 return const_true_rtx;
5045 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5046 return const0_rtx;
5047 break;
5048 case LE:
5049 if (val >= mmax)
5050 return const_true_rtx;
5051 if (val < mmin)
5052 return const0_rtx;
5053 break;
5055 case EQ:
5056 /* x == y is always false for y out of range. */
5057 if (val < mmin || val > mmax)
5058 return const0_rtx;
5059 break;
5061 /* x > y is always false for y >= mmax, always true for y < mmin. */
5062 case GTU:
5063 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5064 return const0_rtx;
5065 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5066 return const_true_rtx;
5067 break;
5068 case GT:
5069 if (val >= mmax)
5070 return const0_rtx;
5071 if (val < mmin)
5072 return const_true_rtx;
5073 break;
5075 /* x < y is always false for y <= mmin, always true for y > mmax. */
5076 case LTU:
5077 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5078 return const0_rtx;
5079 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5080 return const_true_rtx;
5081 break;
5082 case LT:
5083 if (val <= mmin)
5084 return const0_rtx;
5085 if (val > mmax)
5086 return const_true_rtx;
5087 break;
5089 case NE:
5090 /* x != y is always true for y out of range. */
5091 if (val < mmin || val > mmax)
5092 return const_true_rtx;
5093 break;
5095 default:
5096 break;
5100 /* Optimize integer comparisons with zero. */
5101 if (trueop1 == const0_rtx)
5103 /* Some addresses are known to be nonzero. We don't know
5104 their sign, but equality comparisons are known. */
5105 if (nonzero_address_p (trueop0))
5107 if (code == EQ || code == LEU)
5108 return const0_rtx;
5109 if (code == NE || code == GTU)
5110 return const_true_rtx;
5113 /* See if the first operand is an IOR with a constant. If so, we
5114 may be able to determine the result of this comparison. */
5115 if (GET_CODE (op0) == IOR)
5117 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5118 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5120 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5121 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5122 && (UINTVAL (inner_const)
5123 & ((unsigned HOST_WIDE_INT) 1
5124 << sign_bitnum)));
5126 switch (code)
5128 case EQ:
5129 case LEU:
5130 return const0_rtx;
5131 case NE:
5132 case GTU:
5133 return const_true_rtx;
5134 case LT:
5135 case LE:
5136 if (has_sign)
5137 return const_true_rtx;
5138 break;
5139 case GT:
5140 case GE:
5141 if (has_sign)
5142 return const0_rtx;
5143 break;
5144 default:
5145 break;
5151 /* Optimize comparison of ABS with zero. */
5152 if (trueop1 == CONST0_RTX (mode)
5153 && (GET_CODE (trueop0) == ABS
5154 || (GET_CODE (trueop0) == FLOAT_EXTEND
5155 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5157 switch (code)
5159 case LT:
5160 /* Optimize abs(x) < 0.0. */
5161 if (!HONOR_SNANS (mode)
5162 && (!INTEGRAL_MODE_P (mode)
5163 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5165 if (INTEGRAL_MODE_P (mode)
5166 && (issue_strict_overflow_warning
5167 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5168 warning (OPT_Wstrict_overflow,
5169 ("assuming signed overflow does not occur when "
5170 "assuming abs (x) < 0 is false"));
5171 return const0_rtx;
5173 break;
5175 case GE:
5176 /* Optimize abs(x) >= 0.0. */
5177 if (!HONOR_NANS (mode)
5178 && (!INTEGRAL_MODE_P (mode)
5179 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5181 if (INTEGRAL_MODE_P (mode)
5182 && (issue_strict_overflow_warning
5183 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5184 warning (OPT_Wstrict_overflow,
5185 ("assuming signed overflow does not occur when "
5186 "assuming abs (x) >= 0 is true"));
5187 return const_true_rtx;
5189 break;
5191 case UNGE:
5192 /* Optimize ! (abs(x) < 0.0). */
5193 return const_true_rtx;
5195 default:
5196 break;
5200 return 0;
5203 /* Simplify CODE, an operation with result mode MODE and three operands,
5204 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5205 a constant. Return 0 if no simplifications is possible. */
5208 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
5209 enum machine_mode op0_mode, rtx op0, rtx op1,
5210 rtx op2)
5212 unsigned int width = GET_MODE_PRECISION (mode);
5213 bool any_change = false;
5214 rtx tem;
5216 /* VOIDmode means "infinite" precision. */
5217 if (width == 0)
5218 width = HOST_BITS_PER_WIDE_INT;
5220 switch (code)
5222 case FMA:
5223 /* Simplify negations around the multiplication. */
5224 /* -a * -b + c => a * b + c. */
5225 if (GET_CODE (op0) == NEG)
5227 tem = simplify_unary_operation (NEG, mode, op1, mode);
5228 if (tem)
5229 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5231 else if (GET_CODE (op1) == NEG)
5233 tem = simplify_unary_operation (NEG, mode, op0, mode);
5234 if (tem)
5235 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5238 /* Canonicalize the two multiplication operands. */
5239 /* a * -b + c => -b * a + c. */
5240 if (swap_commutative_operands_p (op0, op1))
5241 tem = op0, op0 = op1, op1 = tem, any_change = true;
5243 if (any_change)
5244 return gen_rtx_FMA (mode, op0, op1, op2);
5245 return NULL_RTX;
5247 case SIGN_EXTRACT:
5248 case ZERO_EXTRACT:
5249 if (CONST_INT_P (op0)
5250 && CONST_INT_P (op1)
5251 && CONST_INT_P (op2)
5252 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5253 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5255 /* Extracting a bit-field from a constant */
5256 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5257 HOST_WIDE_INT op1val = INTVAL (op1);
5258 HOST_WIDE_INT op2val = INTVAL (op2);
5259 if (BITS_BIG_ENDIAN)
5260 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5261 else
5262 val >>= op2val;
5264 if (HOST_BITS_PER_WIDE_INT != op1val)
5266 /* First zero-extend. */
5267 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5268 /* If desired, propagate sign bit. */
5269 if (code == SIGN_EXTRACT
5270 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5271 != 0)
5272 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5275 return gen_int_mode (val, mode);
5277 break;
5279 case IF_THEN_ELSE:
5280 if (CONST_INT_P (op0))
5281 return op0 != const0_rtx ? op1 : op2;
5283 /* Convert c ? a : a into "a". */
5284 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5285 return op1;
5287 /* Convert a != b ? a : b into "a". */
5288 if (GET_CODE (op0) == NE
5289 && ! side_effects_p (op0)
5290 && ! HONOR_NANS (mode)
5291 && ! HONOR_SIGNED_ZEROS (mode)
5292 && ((rtx_equal_p (XEXP (op0, 0), op1)
5293 && rtx_equal_p (XEXP (op0, 1), op2))
5294 || (rtx_equal_p (XEXP (op0, 0), op2)
5295 && rtx_equal_p (XEXP (op0, 1), op1))))
5296 return op1;
5298 /* Convert a == b ? a : b into "b". */
5299 if (GET_CODE (op0) == EQ
5300 && ! side_effects_p (op0)
5301 && ! HONOR_NANS (mode)
5302 && ! HONOR_SIGNED_ZEROS (mode)
5303 && ((rtx_equal_p (XEXP (op0, 0), op1)
5304 && rtx_equal_p (XEXP (op0, 1), op2))
5305 || (rtx_equal_p (XEXP (op0, 0), op2)
5306 && rtx_equal_p (XEXP (op0, 1), op1))))
5307 return op2;
5309 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5311 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5312 ? GET_MODE (XEXP (op0, 1))
5313 : GET_MODE (XEXP (op0, 0)));
5314 rtx temp;
5316 /* Look for happy constants in op1 and op2. */
5317 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5319 HOST_WIDE_INT t = INTVAL (op1);
5320 HOST_WIDE_INT f = INTVAL (op2);
5322 if (t == STORE_FLAG_VALUE && f == 0)
5323 code = GET_CODE (op0);
5324 else if (t == 0 && f == STORE_FLAG_VALUE)
5326 enum rtx_code tmp;
5327 tmp = reversed_comparison_code (op0, NULL_RTX);
5328 if (tmp == UNKNOWN)
5329 break;
5330 code = tmp;
5332 else
5333 break;
5335 return simplify_gen_relational (code, mode, cmp_mode,
5336 XEXP (op0, 0), XEXP (op0, 1));
5339 if (cmp_mode == VOIDmode)
5340 cmp_mode = op0_mode;
5341 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5342 cmp_mode, XEXP (op0, 0),
5343 XEXP (op0, 1));
5345 /* See if any simplifications were possible. */
5346 if (temp)
5348 if (CONST_INT_P (temp))
5349 return temp == const0_rtx ? op2 : op1;
5350 else if (temp)
5351 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5354 break;
5356 case VEC_MERGE:
5357 gcc_assert (GET_MODE (op0) == mode);
5358 gcc_assert (GET_MODE (op1) == mode);
5359 gcc_assert (VECTOR_MODE_P (mode));
5360 op2 = avoid_constant_pool_reference (op2);
5361 if (CONST_INT_P (op2))
5363 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5364 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5365 int mask = (1 << n_elts) - 1;
5367 if (!(INTVAL (op2) & mask))
5368 return op1;
5369 if ((INTVAL (op2) & mask) == mask)
5370 return op0;
5372 op0 = avoid_constant_pool_reference (op0);
5373 op1 = avoid_constant_pool_reference (op1);
5374 if (GET_CODE (op0) == CONST_VECTOR
5375 && GET_CODE (op1) == CONST_VECTOR)
5377 rtvec v = rtvec_alloc (n_elts);
5378 unsigned int i;
5380 for (i = 0; i < n_elts; i++)
5381 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5382 ? CONST_VECTOR_ELT (op0, i)
5383 : CONST_VECTOR_ELT (op1, i));
5384 return gen_rtx_CONST_VECTOR (mode, v);
5387 break;
5389 default:
5390 gcc_unreachable ();
5393 return 0;
5396 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5397 or CONST_VECTOR,
5398 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5400 Works by unpacking OP into a collection of 8-bit values
5401 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5402 and then repacking them again for OUTERMODE. */
5404 static rtx
5405 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5406 enum machine_mode innermode, unsigned int byte)
5408 /* We support up to 512-bit values (for V8DFmode). */
5409 enum {
5410 max_bitsize = 512,
5411 value_bit = 8,
5412 value_mask = (1 << value_bit) - 1
5414 unsigned char value[max_bitsize / value_bit];
5415 int value_start;
5416 int i;
5417 int elem;
5419 int num_elem;
5420 rtx * elems;
5421 int elem_bitsize;
5422 rtx result_s;
5423 rtvec result_v = NULL;
5424 enum mode_class outer_class;
5425 enum machine_mode outer_submode;
5427 /* Some ports misuse CCmode. */
5428 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5429 return op;
5431 /* We have no way to represent a complex constant at the rtl level. */
5432 if (COMPLEX_MODE_P (outermode))
5433 return NULL_RTX;
5435 /* Unpack the value. */
5437 if (GET_CODE (op) == CONST_VECTOR)
5439 num_elem = CONST_VECTOR_NUNITS (op);
5440 elems = &CONST_VECTOR_ELT (op, 0);
5441 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5443 else
5445 num_elem = 1;
5446 elems = &op;
5447 elem_bitsize = max_bitsize;
5449 /* If this asserts, it is too complicated; reducing value_bit may help. */
5450 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5451 /* I don't know how to handle endianness of sub-units. */
5452 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5454 for (elem = 0; elem < num_elem; elem++)
5456 unsigned char * vp;
5457 rtx el = elems[elem];
5459 /* Vectors are kept in target memory order. (This is probably
5460 a mistake.) */
5462 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5463 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5464 / BITS_PER_UNIT);
5465 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5466 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5467 unsigned bytele = (subword_byte % UNITS_PER_WORD
5468 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5469 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5472 switch (GET_CODE (el))
5474 case CONST_INT:
5475 for (i = 0;
5476 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5477 i += value_bit)
5478 *vp++ = INTVAL (el) >> i;
5479 /* CONST_INTs are always logically sign-extended. */
5480 for (; i < elem_bitsize; i += value_bit)
5481 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5482 break;
5484 case CONST_DOUBLE:
5485 if (GET_MODE (el) == VOIDmode)
5487 unsigned char extend = 0;
5488 /* If this triggers, someone should have generated a
5489 CONST_INT instead. */
5490 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5492 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5493 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5494 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5496 *vp++
5497 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5498 i += value_bit;
5501 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5502 extend = -1;
5503 for (; i < elem_bitsize; i += value_bit)
5504 *vp++ = extend;
5506 else
5508 long tmp[max_bitsize / 32];
5509 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5511 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5512 gcc_assert (bitsize <= elem_bitsize);
5513 gcc_assert (bitsize % value_bit == 0);
5515 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5516 GET_MODE (el));
5518 /* real_to_target produces its result in words affected by
5519 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5520 and use WORDS_BIG_ENDIAN instead; see the documentation
5521 of SUBREG in rtl.texi. */
5522 for (i = 0; i < bitsize; i += value_bit)
5524 int ibase;
5525 if (WORDS_BIG_ENDIAN)
5526 ibase = bitsize - 1 - i;
5527 else
5528 ibase = i;
5529 *vp++ = tmp[ibase / 32] >> i % 32;
5532 /* It shouldn't matter what's done here, so fill it with
5533 zero. */
5534 for (; i < elem_bitsize; i += value_bit)
5535 *vp++ = 0;
5537 break;
5539 case CONST_FIXED:
5540 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5542 for (i = 0; i < elem_bitsize; i += value_bit)
5543 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5545 else
5547 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5548 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5549 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5550 i += value_bit)
5551 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5552 >> (i - HOST_BITS_PER_WIDE_INT);
5553 for (; i < elem_bitsize; i += value_bit)
5554 *vp++ = 0;
5556 break;
5558 default:
5559 gcc_unreachable ();
5563 /* Now, pick the right byte to start with. */
5564 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5565 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5566 will already have offset 0. */
5567 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5569 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5570 - byte);
5571 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5572 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5573 byte = (subword_byte % UNITS_PER_WORD
5574 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5577 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5578 so if it's become negative it will instead be very large.) */
5579 gcc_assert (byte < GET_MODE_SIZE (innermode));
5581 /* Convert from bytes to chunks of size value_bit. */
5582 value_start = byte * (BITS_PER_UNIT / value_bit);
5584 /* Re-pack the value. */
5586 if (VECTOR_MODE_P (outermode))
5588 num_elem = GET_MODE_NUNITS (outermode);
5589 result_v = rtvec_alloc (num_elem);
5590 elems = &RTVEC_ELT (result_v, 0);
5591 outer_submode = GET_MODE_INNER (outermode);
5593 else
5595 num_elem = 1;
5596 elems = &result_s;
5597 outer_submode = outermode;
5600 outer_class = GET_MODE_CLASS (outer_submode);
5601 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5603 gcc_assert (elem_bitsize % value_bit == 0);
5604 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5606 for (elem = 0; elem < num_elem; elem++)
5608 unsigned char *vp;
5610 /* Vectors are stored in target memory order. (This is probably
5611 a mistake.) */
5613 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5614 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5615 / BITS_PER_UNIT);
5616 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5617 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5618 unsigned bytele = (subword_byte % UNITS_PER_WORD
5619 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5620 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5623 switch (outer_class)
5625 case MODE_INT:
5626 case MODE_PARTIAL_INT:
5628 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5630 for (i = 0;
5631 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5632 i += value_bit)
5633 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5634 for (; i < elem_bitsize; i += value_bit)
5635 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5636 << (i - HOST_BITS_PER_WIDE_INT);
5638 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5639 know why. */
5640 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5641 elems[elem] = gen_int_mode (lo, outer_submode);
5642 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5643 elems[elem] = immed_double_const (lo, hi, outer_submode);
5644 else
5645 return NULL_RTX;
5647 break;
5649 case MODE_FLOAT:
5650 case MODE_DECIMAL_FLOAT:
5652 REAL_VALUE_TYPE r;
5653 long tmp[max_bitsize / 32];
5655 /* real_from_target wants its input in words affected by
5656 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5657 and use WORDS_BIG_ENDIAN instead; see the documentation
5658 of SUBREG in rtl.texi. */
5659 for (i = 0; i < max_bitsize / 32; i++)
5660 tmp[i] = 0;
5661 for (i = 0; i < elem_bitsize; i += value_bit)
5663 int ibase;
5664 if (WORDS_BIG_ENDIAN)
5665 ibase = elem_bitsize - 1 - i;
5666 else
5667 ibase = i;
5668 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5671 real_from_target (&r, tmp, outer_submode);
5672 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5674 break;
5676 case MODE_FRACT:
5677 case MODE_UFRACT:
5678 case MODE_ACCUM:
5679 case MODE_UACCUM:
5681 FIXED_VALUE_TYPE f;
5682 f.data.low = 0;
5683 f.data.high = 0;
5684 f.mode = outer_submode;
5686 for (i = 0;
5687 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5688 i += value_bit)
5689 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5690 for (; i < elem_bitsize; i += value_bit)
5691 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5692 << (i - HOST_BITS_PER_WIDE_INT));
5694 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5696 break;
5698 default:
5699 gcc_unreachable ();
5702 if (VECTOR_MODE_P (outermode))
5703 return gen_rtx_CONST_VECTOR (outermode, result_v);
5704 else
5705 return result_s;
5708 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5709 Return 0 if no simplifications are possible. */
5711 simplify_subreg (enum machine_mode outermode, rtx op,
5712 enum machine_mode innermode, unsigned int byte)
5714 /* Little bit of sanity checking. */
5715 gcc_assert (innermode != VOIDmode);
5716 gcc_assert (outermode != VOIDmode);
5717 gcc_assert (innermode != BLKmode);
5718 gcc_assert (outermode != BLKmode);
5720 gcc_assert (GET_MODE (op) == innermode
5721 || GET_MODE (op) == VOIDmode);
5723 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5724 return NULL_RTX;
5726 if (byte >= GET_MODE_SIZE (innermode))
5727 return NULL_RTX;
5729 if (outermode == innermode && !byte)
5730 return op;
5732 if (CONST_SCALAR_INT_P (op)
5733 || CONST_DOUBLE_AS_FLOAT_P (op)
5734 || GET_CODE (op) == CONST_FIXED
5735 || GET_CODE (op) == CONST_VECTOR)
5736 return simplify_immed_subreg (outermode, op, innermode, byte);
5738 /* Changing mode twice with SUBREG => just change it once,
5739 or not at all if changing back op starting mode. */
5740 if (GET_CODE (op) == SUBREG)
5742 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5743 int final_offset = byte + SUBREG_BYTE (op);
5744 rtx newx;
5746 if (outermode == innermostmode
5747 && byte == 0 && SUBREG_BYTE (op) == 0)
5748 return SUBREG_REG (op);
5750 /* The SUBREG_BYTE represents offset, as if the value were stored
5751 in memory. Irritating exception is paradoxical subreg, where
5752 we define SUBREG_BYTE to be 0. On big endian machines, this
5753 value should be negative. For a moment, undo this exception. */
5754 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5756 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5757 if (WORDS_BIG_ENDIAN)
5758 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5759 if (BYTES_BIG_ENDIAN)
5760 final_offset += difference % UNITS_PER_WORD;
5762 if (SUBREG_BYTE (op) == 0
5763 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5765 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5766 if (WORDS_BIG_ENDIAN)
5767 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5768 if (BYTES_BIG_ENDIAN)
5769 final_offset += difference % UNITS_PER_WORD;
5772 /* See whether resulting subreg will be paradoxical. */
5773 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5775 /* In nonparadoxical subregs we can't handle negative offsets. */
5776 if (final_offset < 0)
5777 return NULL_RTX;
5778 /* Bail out in case resulting subreg would be incorrect. */
5779 if (final_offset % GET_MODE_SIZE (outermode)
5780 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5781 return NULL_RTX;
5783 else
5785 int offset = 0;
5786 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5788 /* In paradoxical subreg, see if we are still looking on lower part.
5789 If so, our SUBREG_BYTE will be 0. */
5790 if (WORDS_BIG_ENDIAN)
5791 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5792 if (BYTES_BIG_ENDIAN)
5793 offset += difference % UNITS_PER_WORD;
5794 if (offset == final_offset)
5795 final_offset = 0;
5796 else
5797 return NULL_RTX;
5800 /* Recurse for further possible simplifications. */
5801 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5802 final_offset);
5803 if (newx)
5804 return newx;
5805 if (validate_subreg (outermode, innermostmode,
5806 SUBREG_REG (op), final_offset))
5808 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5809 if (SUBREG_PROMOTED_VAR_P (op)
5810 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5811 && GET_MODE_CLASS (outermode) == MODE_INT
5812 && IN_RANGE (GET_MODE_SIZE (outermode),
5813 GET_MODE_SIZE (innermode),
5814 GET_MODE_SIZE (innermostmode))
5815 && subreg_lowpart_p (newx))
5817 SUBREG_PROMOTED_VAR_P (newx) = 1;
5818 SUBREG_PROMOTED_UNSIGNED_SET
5819 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5821 return newx;
5823 return NULL_RTX;
5826 /* SUBREG of a hard register => just change the register number
5827 and/or mode. If the hard register is not valid in that mode,
5828 suppress this simplification. If the hard register is the stack,
5829 frame, or argument pointer, leave this as a SUBREG. */
5831 if (REG_P (op) && HARD_REGISTER_P (op))
5833 unsigned int regno, final_regno;
5835 regno = REGNO (op);
5836 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5837 if (HARD_REGISTER_NUM_P (final_regno))
5839 rtx x;
5840 int final_offset = byte;
5842 /* Adjust offset for paradoxical subregs. */
5843 if (byte == 0
5844 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5846 int difference = (GET_MODE_SIZE (innermode)
5847 - GET_MODE_SIZE (outermode));
5848 if (WORDS_BIG_ENDIAN)
5849 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5850 if (BYTES_BIG_ENDIAN)
5851 final_offset += difference % UNITS_PER_WORD;
5854 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5856 /* Propagate original regno. We don't have any way to specify
5857 the offset inside original regno, so do so only for lowpart.
5858 The information is used only by alias analysis that can not
5859 grog partial register anyway. */
5861 if (subreg_lowpart_offset (outermode, innermode) == byte)
5862 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5863 return x;
5867 /* If we have a SUBREG of a register that we are replacing and we are
5868 replacing it with a MEM, make a new MEM and try replacing the
5869 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5870 or if we would be widening it. */
5872 if (MEM_P (op)
5873 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5874 /* Allow splitting of volatile memory references in case we don't
5875 have instruction to move the whole thing. */
5876 && (! MEM_VOLATILE_P (op)
5877 || ! have_insn_for (SET, innermode))
5878 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5879 return adjust_address_nv (op, outermode, byte);
5881 /* Handle complex values represented as CONCAT
5882 of real and imaginary part. */
5883 if (GET_CODE (op) == CONCAT)
5885 unsigned int part_size, final_offset;
5886 rtx part, res;
5888 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5889 if (byte < part_size)
5891 part = XEXP (op, 0);
5892 final_offset = byte;
5894 else
5896 part = XEXP (op, 1);
5897 final_offset = byte - part_size;
5900 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5901 return NULL_RTX;
5903 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5904 if (res)
5905 return res;
5906 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5907 return gen_rtx_SUBREG (outermode, part, final_offset);
5908 return NULL_RTX;
5911 /* A SUBREG resulting from a zero extension may fold to zero if
5912 it extracts higher bits that the ZERO_EXTEND's source bits. */
5913 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5915 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5916 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5917 return CONST0_RTX (outermode);
5920 if (SCALAR_INT_MODE_P (outermode)
5921 && SCALAR_INT_MODE_P (innermode)
5922 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5923 && byte == subreg_lowpart_offset (outermode, innermode))
5925 rtx tem = simplify_truncation (outermode, op, innermode);
5926 if (tem)
5927 return tem;
5930 return NULL_RTX;
5933 /* Make a SUBREG operation or equivalent if it folds. */
5936 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5937 enum machine_mode innermode, unsigned int byte)
5939 rtx newx;
5941 newx = simplify_subreg (outermode, op, innermode, byte);
5942 if (newx)
5943 return newx;
5945 if (GET_CODE (op) == SUBREG
5946 || GET_CODE (op) == CONCAT
5947 || GET_MODE (op) == VOIDmode)
5948 return NULL_RTX;
5950 if (validate_subreg (outermode, innermode, op, byte))
5951 return gen_rtx_SUBREG (outermode, op, byte);
5953 return NULL_RTX;
5956 /* Simplify X, an rtx expression.
5958 Return the simplified expression or NULL if no simplifications
5959 were possible.
5961 This is the preferred entry point into the simplification routines;
5962 however, we still allow passes to call the more specific routines.
5964 Right now GCC has three (yes, three) major bodies of RTL simplification
5965 code that need to be unified.
5967 1. fold_rtx in cse.c. This code uses various CSE specific
5968 information to aid in RTL simplification.
5970 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5971 it uses combine specific information to aid in RTL
5972 simplification.
5974 3. The routines in this file.
5977 Long term we want to only have one body of simplification code; to
5978 get to that state I recommend the following steps:
5980 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5981 which are not pass dependent state into these routines.
5983 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5984 use this routine whenever possible.
5986 3. Allow for pass dependent state to be provided to these
5987 routines and add simplifications based on the pass dependent
5988 state. Remove code from cse.c & combine.c that becomes
5989 redundant/dead.
5991 It will take time, but ultimately the compiler will be easier to
5992 maintain and improve. It's totally silly that when we add a
5993 simplification that it needs to be added to 4 places (3 for RTL
5994 simplification and 1 for tree simplification. */
5997 simplify_rtx (const_rtx x)
5999 const enum rtx_code code = GET_CODE (x);
6000 const enum machine_mode mode = GET_MODE (x);
6002 switch (GET_RTX_CLASS (code))
6004 case RTX_UNARY:
6005 return simplify_unary_operation (code, mode,
6006 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6007 case RTX_COMM_ARITH:
6008 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6009 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6011 /* Fall through.... */
6013 case RTX_BIN_ARITH:
6014 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6016 case RTX_TERNARY:
6017 case RTX_BITFIELD_OPS:
6018 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6019 XEXP (x, 0), XEXP (x, 1),
6020 XEXP (x, 2));
6022 case RTX_COMPARE:
6023 case RTX_COMM_COMPARE:
6024 return simplify_relational_operation (code, mode,
6025 ((GET_MODE (XEXP (x, 0))
6026 != VOIDmode)
6027 ? GET_MODE (XEXP (x, 0))
6028 : GET_MODE (XEXP (x, 1))),
6029 XEXP (x, 0),
6030 XEXP (x, 1));
6032 case RTX_EXTRA:
6033 if (code == SUBREG)
6034 return simplify_subreg (mode, SUBREG_REG (x),
6035 GET_MODE (SUBREG_REG (x)),
6036 SUBREG_BYTE (x));
6037 break;
6039 case RTX_OBJ:
6040 if (code == LO_SUM)
6042 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6043 if (GET_CODE (XEXP (x, 0)) == HIGH
6044 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6045 return XEXP (x, 1);
6047 break;
6049 default:
6050 break;
6052 return NULL;