Daily bump.
[official-gcc.git] / gcc / simplify-rtx.c
blob560dc8143792e3898934dad8e8f6c7b416466ca9
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "optabs.h"
30 #include "emit-rtl.h"
31 #include "recog.h"
32 #include "diagnostic-core.h"
33 #include "varasm.h"
34 #include "flags.h"
36 /* Simplification and canonicalization of RTL. */
38 /* Much code operates on (low, high) pairs; the low value is an
39 unsigned wide int, the high value a signed wide int. We
40 occasionally need to sign extend from low to high as if low were a
41 signed wide int. */
42 #define HWI_SIGN_EXTEND(low) \
43 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
45 static rtx neg_const_int (machine_mode, const_rtx);
46 static bool plus_minus_operand_p (const_rtx);
47 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
48 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
49 unsigned int);
50 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
51 rtx, rtx);
52 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
53 machine_mode, rtx, rtx);
54 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
55 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
56 rtx, rtx, rtx, rtx);
58 /* Negate a CONST_INT rtx. */
59 static rtx
60 neg_const_int (machine_mode mode, const_rtx i)
62 unsigned HOST_WIDE_INT val = -UINTVAL (i);
64 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
65 && val == UINTVAL (i))
66 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
67 mode);
68 return gen_int_mode (val, mode);
71 /* Test whether expression, X, is an immediate constant that represents
72 the most significant bit of machine mode MODE. */
74 bool
75 mode_signbit_p (machine_mode mode, const_rtx x)
77 unsigned HOST_WIDE_INT val;
78 unsigned int width;
80 if (GET_MODE_CLASS (mode) != MODE_INT)
81 return false;
83 width = GET_MODE_PRECISION (mode);
84 if (width == 0)
85 return false;
87 if (width <= HOST_BITS_PER_WIDE_INT
88 && CONST_INT_P (x))
89 val = INTVAL (x);
90 #if TARGET_SUPPORTS_WIDE_INT
91 else if (CONST_WIDE_INT_P (x))
93 unsigned int i;
94 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
95 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
96 return false;
97 for (i = 0; i < elts - 1; i++)
98 if (CONST_WIDE_INT_ELT (x, i) != 0)
99 return false;
100 val = CONST_WIDE_INT_ELT (x, elts - 1);
101 width %= HOST_BITS_PER_WIDE_INT;
102 if (width == 0)
103 width = HOST_BITS_PER_WIDE_INT;
105 #else
106 else if (width <= HOST_BITS_PER_DOUBLE_INT
107 && CONST_DOUBLE_AS_INT_P (x)
108 && CONST_DOUBLE_LOW (x) == 0)
110 val = CONST_DOUBLE_HIGH (x);
111 width -= HOST_BITS_PER_WIDE_INT;
113 #endif
114 else
115 /* X is not an integer constant. */
116 return false;
118 if (width < HOST_BITS_PER_WIDE_INT)
119 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
120 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
123 /* Test whether VAL is equal to the most significant bit of mode MODE
124 (after masking with the mode mask of MODE). Returns false if the
125 precision of MODE is too large to handle. */
127 bool
128 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
130 unsigned int width;
132 if (GET_MODE_CLASS (mode) != MODE_INT)
133 return false;
135 width = GET_MODE_PRECISION (mode);
136 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
137 return false;
139 val &= GET_MODE_MASK (mode);
140 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
143 /* Test whether the most significant bit of mode MODE is set in VAL.
144 Returns false if the precision of MODE is too large to handle. */
145 bool
146 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
148 unsigned int width;
150 if (GET_MODE_CLASS (mode) != MODE_INT)
151 return false;
153 width = GET_MODE_PRECISION (mode);
154 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
155 return false;
157 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
158 return val != 0;
161 /* Test whether the most significant bit of mode MODE is clear in VAL.
162 Returns false if the precision of MODE is too large to handle. */
163 bool
164 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
166 unsigned int width;
168 if (GET_MODE_CLASS (mode) != MODE_INT)
169 return false;
171 width = GET_MODE_PRECISION (mode);
172 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
173 return false;
175 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
176 return val == 0;
179 /* Make a binary operation by properly ordering the operands and
180 seeing if the expression folds. */
183 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
184 rtx op1)
186 rtx tem;
188 /* If this simplifies, do it. */
189 tem = simplify_binary_operation (code, mode, op0, op1);
190 if (tem)
191 return tem;
193 /* Put complex operands first and constants second if commutative. */
194 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
195 && swap_commutative_operands_p (op0, op1))
196 std::swap (op0, op1);
198 return gen_rtx_fmt_ee (code, mode, op0, op1);
201 /* If X is a MEM referencing the constant pool, return the real value.
202 Otherwise return X. */
204 avoid_constant_pool_reference (rtx x)
206 rtx c, tmp, addr;
207 machine_mode cmode;
208 HOST_WIDE_INT offset = 0;
210 switch (GET_CODE (x))
212 case MEM:
213 break;
215 case FLOAT_EXTEND:
216 /* Handle float extensions of constant pool references. */
217 tmp = XEXP (x, 0);
218 c = avoid_constant_pool_reference (tmp);
219 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
220 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
221 GET_MODE (x));
222 return x;
224 default:
225 return x;
228 if (GET_MODE (x) == BLKmode)
229 return x;
231 addr = XEXP (x, 0);
233 /* Call target hook to avoid the effects of -fpic etc.... */
234 addr = targetm.delegitimize_address (addr);
236 /* Split the address into a base and integer offset. */
237 if (GET_CODE (addr) == CONST
238 && GET_CODE (XEXP (addr, 0)) == PLUS
239 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
241 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
242 addr = XEXP (XEXP (addr, 0), 0);
245 if (GET_CODE (addr) == LO_SUM)
246 addr = XEXP (addr, 1);
248 /* If this is a constant pool reference, we can turn it into its
249 constant and hope that simplifications happen. */
250 if (GET_CODE (addr) == SYMBOL_REF
251 && CONSTANT_POOL_ADDRESS_P (addr))
253 c = get_pool_constant (addr);
254 cmode = get_pool_mode (addr);
256 /* If we're accessing the constant in a different mode than it was
257 originally stored, attempt to fix that up via subreg simplifications.
258 If that fails we have no choice but to return the original memory. */
259 if (offset == 0 && cmode == GET_MODE (x))
260 return c;
261 else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
263 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
264 if (tem && CONSTANT_P (tem))
265 return tem;
269 return x;
272 /* Simplify a MEM based on its attributes. This is the default
273 delegitimize_address target hook, and it's recommended that every
274 overrider call it. */
277 delegitimize_mem_from_attrs (rtx x)
279 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
280 use their base addresses as equivalent. */
281 if (MEM_P (x)
282 && MEM_EXPR (x)
283 && MEM_OFFSET_KNOWN_P (x))
285 tree decl = MEM_EXPR (x);
286 machine_mode mode = GET_MODE (x);
287 HOST_WIDE_INT offset = 0;
289 switch (TREE_CODE (decl))
291 default:
292 decl = NULL;
293 break;
295 case VAR_DECL:
296 break;
298 case ARRAY_REF:
299 case ARRAY_RANGE_REF:
300 case COMPONENT_REF:
301 case BIT_FIELD_REF:
302 case REALPART_EXPR:
303 case IMAGPART_EXPR:
304 case VIEW_CONVERT_EXPR:
306 HOST_WIDE_INT bitsize, bitpos;
307 tree toffset;
308 int unsignedp, reversep, volatilep = 0;
310 decl
311 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
312 &unsignedp, &reversep, &volatilep, false);
313 if (bitsize != GET_MODE_BITSIZE (mode)
314 || (bitpos % BITS_PER_UNIT)
315 || (toffset && !tree_fits_shwi_p (toffset)))
316 decl = NULL;
317 else
319 offset += bitpos / BITS_PER_UNIT;
320 if (toffset)
321 offset += tree_to_shwi (toffset);
323 break;
327 if (decl
328 && mode == GET_MODE (x)
329 && TREE_CODE (decl) == VAR_DECL
330 && (TREE_STATIC (decl)
331 || DECL_THREAD_LOCAL_P (decl))
332 && DECL_RTL_SET_P (decl)
333 && MEM_P (DECL_RTL (decl)))
335 rtx newx;
337 offset += MEM_OFFSET (x);
339 newx = DECL_RTL (decl);
341 if (MEM_P (newx))
343 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
345 /* Avoid creating a new MEM needlessly if we already had
346 the same address. We do if there's no OFFSET and the
347 old address X is identical to NEWX, or if X is of the
348 form (plus NEWX OFFSET), or the NEWX is of the form
349 (plus Y (const_int Z)) and X is that with the offset
350 added: (plus Y (const_int Z+OFFSET)). */
351 if (!((offset == 0
352 || (GET_CODE (o) == PLUS
353 && GET_CODE (XEXP (o, 1)) == CONST_INT
354 && (offset == INTVAL (XEXP (o, 1))
355 || (GET_CODE (n) == PLUS
356 && GET_CODE (XEXP (n, 1)) == CONST_INT
357 && (INTVAL (XEXP (n, 1)) + offset
358 == INTVAL (XEXP (o, 1)))
359 && (n = XEXP (n, 0))))
360 && (o = XEXP (o, 0))))
361 && rtx_equal_p (o, n)))
362 x = adjust_address_nv (newx, mode, offset);
364 else if (GET_MODE (x) == GET_MODE (newx)
365 && offset == 0)
366 x = newx;
370 return x;
373 /* Make a unary operation by first seeing if it folds and otherwise making
374 the specified operation. */
377 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
378 machine_mode op_mode)
380 rtx tem;
382 /* If this simplifies, use it. */
383 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
384 return tem;
386 return gen_rtx_fmt_e (code, mode, op);
389 /* Likewise for ternary operations. */
392 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
393 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
395 rtx tem;
397 /* If this simplifies, use it. */
398 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
399 op0, op1, op2)))
400 return tem;
402 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
405 /* Likewise, for relational operations.
406 CMP_MODE specifies mode comparison is done in. */
409 simplify_gen_relational (enum rtx_code code, machine_mode mode,
410 machine_mode cmp_mode, rtx op0, rtx op1)
412 rtx tem;
414 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
415 op0, op1)))
416 return tem;
418 return gen_rtx_fmt_ee (code, mode, op0, op1);
421 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
422 and simplify the result. If FN is non-NULL, call this callback on each
423 X, if it returns non-NULL, replace X with its return value and simplify the
424 result. */
427 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
428 rtx (*fn) (rtx, const_rtx, void *), void *data)
430 enum rtx_code code = GET_CODE (x);
431 machine_mode mode = GET_MODE (x);
432 machine_mode op_mode;
433 const char *fmt;
434 rtx op0, op1, op2, newx, op;
435 rtvec vec, newvec;
436 int i, j;
438 if (__builtin_expect (fn != NULL, 0))
440 newx = fn (x, old_rtx, data);
441 if (newx)
442 return newx;
444 else if (rtx_equal_p (x, old_rtx))
445 return copy_rtx ((rtx) data);
447 switch (GET_RTX_CLASS (code))
449 case RTX_UNARY:
450 op0 = XEXP (x, 0);
451 op_mode = GET_MODE (op0);
452 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
453 if (op0 == XEXP (x, 0))
454 return x;
455 return simplify_gen_unary (code, mode, op0, op_mode);
457 case RTX_BIN_ARITH:
458 case RTX_COMM_ARITH:
459 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
460 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
461 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
462 return x;
463 return simplify_gen_binary (code, mode, op0, op1);
465 case RTX_COMPARE:
466 case RTX_COMM_COMPARE:
467 op0 = XEXP (x, 0);
468 op1 = XEXP (x, 1);
469 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
470 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
471 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
472 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
473 return x;
474 return simplify_gen_relational (code, mode, op_mode, op0, op1);
476 case RTX_TERNARY:
477 case RTX_BITFIELD_OPS:
478 op0 = XEXP (x, 0);
479 op_mode = GET_MODE (op0);
480 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
481 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
482 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
483 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
484 return x;
485 if (op_mode == VOIDmode)
486 op_mode = GET_MODE (op0);
487 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
489 case RTX_EXTRA:
490 if (code == SUBREG)
492 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
493 if (op0 == SUBREG_REG (x))
494 return x;
495 op0 = simplify_gen_subreg (GET_MODE (x), op0,
496 GET_MODE (SUBREG_REG (x)),
497 SUBREG_BYTE (x));
498 return op0 ? op0 : x;
500 break;
502 case RTX_OBJ:
503 if (code == MEM)
505 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
506 if (op0 == XEXP (x, 0))
507 return x;
508 return replace_equiv_address_nv (x, op0);
510 else if (code == LO_SUM)
512 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
513 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
515 /* (lo_sum (high x) y) -> y where x and y have the same base. */
516 if (GET_CODE (op0) == HIGH)
518 rtx base0, base1, offset0, offset1;
519 split_const (XEXP (op0, 0), &base0, &offset0);
520 split_const (op1, &base1, &offset1);
521 if (rtx_equal_p (base0, base1))
522 return op1;
525 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
526 return x;
527 return gen_rtx_LO_SUM (mode, op0, op1);
529 break;
531 default:
532 break;
535 newx = x;
536 fmt = GET_RTX_FORMAT (code);
537 for (i = 0; fmt[i]; i++)
538 switch (fmt[i])
540 case 'E':
541 vec = XVEC (x, i);
542 newvec = XVEC (newx, i);
543 for (j = 0; j < GET_NUM_ELEM (vec); j++)
545 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
546 old_rtx, fn, data);
547 if (op != RTVEC_ELT (vec, j))
549 if (newvec == vec)
551 newvec = shallow_copy_rtvec (vec);
552 if (x == newx)
553 newx = shallow_copy_rtx (x);
554 XVEC (newx, i) = newvec;
556 RTVEC_ELT (newvec, j) = op;
559 break;
561 case 'e':
562 if (XEXP (x, i))
564 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
565 if (op != XEXP (x, i))
567 if (x == newx)
568 newx = shallow_copy_rtx (x);
569 XEXP (newx, i) = op;
572 break;
574 return newx;
577 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
578 resulting RTX. Return a new RTX which is as simplified as possible. */
581 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
583 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
586 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
587 Only handle cases where the truncated value is inherently an rvalue.
589 RTL provides two ways of truncating a value:
591 1. a lowpart subreg. This form is only a truncation when both
592 the outer and inner modes (here MODE and OP_MODE respectively)
593 are scalar integers, and only then when the subreg is used as
594 an rvalue.
596 It is only valid to form such truncating subregs if the
597 truncation requires no action by the target. The onus for
598 proving this is on the creator of the subreg -- e.g. the
599 caller to simplify_subreg or simplify_gen_subreg -- and typically
600 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
602 2. a TRUNCATE. This form handles both scalar and compound integers.
604 The first form is preferred where valid. However, the TRUNCATE
605 handling in simplify_unary_operation turns the second form into the
606 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
607 so it is generally safe to form rvalue truncations using:
609 simplify_gen_unary (TRUNCATE, ...)
611 and leave simplify_unary_operation to work out which representation
612 should be used.
614 Because of the proof requirements on (1), simplify_truncation must
615 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
616 regardless of whether the outer truncation came from a SUBREG or a
617 TRUNCATE. For example, if the caller has proven that an SImode
618 truncation of:
620 (and:DI X Y)
622 is a no-op and can be represented as a subreg, it does not follow
623 that SImode truncations of X and Y are also no-ops. On a target
624 like 64-bit MIPS that requires SImode values to be stored in
625 sign-extended form, an SImode truncation of:
627 (and:DI (reg:DI X) (const_int 63))
629 is trivially a no-op because only the lower 6 bits can be set.
630 However, X is still an arbitrary 64-bit number and so we cannot
631 assume that truncating it too is a no-op. */
633 static rtx
634 simplify_truncation (machine_mode mode, rtx op,
635 machine_mode op_mode)
637 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
638 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
639 gcc_assert (precision <= op_precision);
641 /* Optimize truncations of zero and sign extended values. */
642 if (GET_CODE (op) == ZERO_EXTEND
643 || GET_CODE (op) == SIGN_EXTEND)
645 /* There are three possibilities. If MODE is the same as the
646 origmode, we can omit both the extension and the subreg.
647 If MODE is not larger than the origmode, we can apply the
648 truncation without the extension. Finally, if the outermode
649 is larger than the origmode, we can just extend to the appropriate
650 mode. */
651 machine_mode origmode = GET_MODE (XEXP (op, 0));
652 if (mode == origmode)
653 return XEXP (op, 0);
654 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
655 return simplify_gen_unary (TRUNCATE, mode,
656 XEXP (op, 0), origmode);
657 else
658 return simplify_gen_unary (GET_CODE (op), mode,
659 XEXP (op, 0), origmode);
662 /* If the machine can perform operations in the truncated mode, distribute
663 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
664 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
665 if (1
666 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
667 && (GET_CODE (op) == PLUS
668 || GET_CODE (op) == MINUS
669 || GET_CODE (op) == MULT))
671 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
672 if (op0)
674 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
675 if (op1)
676 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
680 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
681 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
682 the outer subreg is effectively a truncation to the original mode. */
683 if ((GET_CODE (op) == LSHIFTRT
684 || GET_CODE (op) == ASHIFTRT)
685 /* Ensure that OP_MODE is at least twice as wide as MODE
686 to avoid the possibility that an outer LSHIFTRT shifts by more
687 than the sign extension's sign_bit_copies and introduces zeros
688 into the high bits of the result. */
689 && 2 * precision <= op_precision
690 && CONST_INT_P (XEXP (op, 1))
691 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
692 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
693 && UINTVAL (XEXP (op, 1)) < precision)
694 return simplify_gen_binary (ASHIFTRT, mode,
695 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
697 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
698 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
699 the outer subreg is effectively a truncation to the original mode. */
700 if ((GET_CODE (op) == LSHIFTRT
701 || GET_CODE (op) == ASHIFTRT)
702 && CONST_INT_P (XEXP (op, 1))
703 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
704 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
705 && UINTVAL (XEXP (op, 1)) < precision)
706 return simplify_gen_binary (LSHIFTRT, mode,
707 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
709 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
710 to (ashift:QI (x:QI) C), where C is a suitable small constant and
711 the outer subreg is effectively a truncation to the original mode. */
712 if (GET_CODE (op) == ASHIFT
713 && CONST_INT_P (XEXP (op, 1))
714 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
715 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
716 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
717 && UINTVAL (XEXP (op, 1)) < precision)
718 return simplify_gen_binary (ASHIFT, mode,
719 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
721 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
722 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
723 and C2. */
724 if (GET_CODE (op) == AND
725 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
726 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
727 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
728 && CONST_INT_P (XEXP (op, 1)))
730 rtx op0 = (XEXP (XEXP (op, 0), 0));
731 rtx shift_op = XEXP (XEXP (op, 0), 1);
732 rtx mask_op = XEXP (op, 1);
733 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
734 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
736 if (shift < precision
737 /* If doing this transform works for an X with all bits set,
738 it works for any X. */
739 && ((GET_MODE_MASK (mode) >> shift) & mask)
740 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
741 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
742 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
744 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
745 return simplify_gen_binary (AND, mode, op0, mask_op);
749 /* Recognize a word extraction from a multi-word subreg. */
750 if ((GET_CODE (op) == LSHIFTRT
751 || GET_CODE (op) == ASHIFTRT)
752 && SCALAR_INT_MODE_P (mode)
753 && SCALAR_INT_MODE_P (op_mode)
754 && precision >= BITS_PER_WORD
755 && 2 * precision <= op_precision
756 && CONST_INT_P (XEXP (op, 1))
757 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
758 && UINTVAL (XEXP (op, 1)) < op_precision)
760 int byte = subreg_lowpart_offset (mode, op_mode);
761 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
762 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
763 (WORDS_BIG_ENDIAN
764 ? byte - shifted_bytes
765 : byte + shifted_bytes));
768 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
769 and try replacing the TRUNCATE and shift with it. Don't do this
770 if the MEM has a mode-dependent address. */
771 if ((GET_CODE (op) == LSHIFTRT
772 || GET_CODE (op) == ASHIFTRT)
773 && SCALAR_INT_MODE_P (op_mode)
774 && MEM_P (XEXP (op, 0))
775 && CONST_INT_P (XEXP (op, 1))
776 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
777 && INTVAL (XEXP (op, 1)) > 0
778 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
779 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
780 MEM_ADDR_SPACE (XEXP (op, 0)))
781 && ! MEM_VOLATILE_P (XEXP (op, 0))
782 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
783 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
785 int byte = subreg_lowpart_offset (mode, op_mode);
786 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
787 return adjust_address_nv (XEXP (op, 0), mode,
788 (WORDS_BIG_ENDIAN
789 ? byte - shifted_bytes
790 : byte + shifted_bytes));
793 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
794 (OP:SI foo:SI) if OP is NEG or ABS. */
795 if ((GET_CODE (op) == ABS
796 || GET_CODE (op) == NEG)
797 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
798 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
799 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
800 return simplify_gen_unary (GET_CODE (op), mode,
801 XEXP (XEXP (op, 0), 0), mode);
803 /* (truncate:A (subreg:B (truncate:C X) 0)) is
804 (truncate:A X). */
805 if (GET_CODE (op) == SUBREG
806 && SCALAR_INT_MODE_P (mode)
807 && SCALAR_INT_MODE_P (op_mode)
808 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
809 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
810 && subreg_lowpart_p (op))
812 rtx inner = XEXP (SUBREG_REG (op), 0);
813 if (GET_MODE_PRECISION (mode)
814 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
815 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
816 else
817 /* If subreg above is paradoxical and C is narrower
818 than A, return (subreg:A (truncate:C X) 0). */
819 return simplify_gen_subreg (mode, SUBREG_REG (op),
820 GET_MODE (SUBREG_REG (op)), 0);
823 /* (truncate:A (truncate:B X)) is (truncate:A X). */
824 if (GET_CODE (op) == TRUNCATE)
825 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
826 GET_MODE (XEXP (op, 0)));
828 return NULL_RTX;
831 /* Try to simplify a unary operation CODE whose output mode is to be
832 MODE with input operand OP whose mode was originally OP_MODE.
833 Return zero if no simplification can be made. */
835 simplify_unary_operation (enum rtx_code code, machine_mode mode,
836 rtx op, machine_mode op_mode)
838 rtx trueop, tem;
840 trueop = avoid_constant_pool_reference (op);
842 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
843 if (tem)
844 return tem;
846 return simplify_unary_operation_1 (code, mode, op);
849 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
850 to be exact. */
852 static bool
853 exact_int_to_float_conversion_p (const_rtx op)
855 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
856 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
857 /* Constants shouldn't reach here. */
858 gcc_assert (op0_mode != VOIDmode);
859 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
860 int in_bits = in_prec;
861 if (HWI_COMPUTABLE_MODE_P (op0_mode))
863 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
864 if (GET_CODE (op) == FLOAT)
865 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
866 else if (GET_CODE (op) == UNSIGNED_FLOAT)
867 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
868 else
869 gcc_unreachable ();
870 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
872 return in_bits <= out_bits;
875 /* Perform some simplifications we can do even if the operands
876 aren't constant. */
877 static rtx
878 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
880 enum rtx_code reversed;
881 rtx temp;
883 switch (code)
885 case NOT:
886 /* (not (not X)) == X. */
887 if (GET_CODE (op) == NOT)
888 return XEXP (op, 0);
890 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
891 comparison is all ones. */
892 if (COMPARISON_P (op)
893 && (mode == BImode || STORE_FLAG_VALUE == -1)
894 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
895 return simplify_gen_relational (reversed, mode, VOIDmode,
896 XEXP (op, 0), XEXP (op, 1));
898 /* (not (plus X -1)) can become (neg X). */
899 if (GET_CODE (op) == PLUS
900 && XEXP (op, 1) == constm1_rtx)
901 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
903 /* Similarly, (not (neg X)) is (plus X -1). */
904 if (GET_CODE (op) == NEG)
905 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
906 CONSTM1_RTX (mode));
908 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
909 if (GET_CODE (op) == XOR
910 && CONST_INT_P (XEXP (op, 1))
911 && (temp = simplify_unary_operation (NOT, mode,
912 XEXP (op, 1), mode)) != 0)
913 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
915 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
916 if (GET_CODE (op) == PLUS
917 && CONST_INT_P (XEXP (op, 1))
918 && mode_signbit_p (mode, XEXP (op, 1))
919 && (temp = simplify_unary_operation (NOT, mode,
920 XEXP (op, 1), mode)) != 0)
921 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
924 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
925 operands other than 1, but that is not valid. We could do a
926 similar simplification for (not (lshiftrt C X)) where C is
927 just the sign bit, but this doesn't seem common enough to
928 bother with. */
929 if (GET_CODE (op) == ASHIFT
930 && XEXP (op, 0) == const1_rtx)
932 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
933 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
936 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
937 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
938 so we can perform the above simplification. */
939 if (STORE_FLAG_VALUE == -1
940 && GET_CODE (op) == ASHIFTRT
941 && CONST_INT_P (XEXP (op, 1))
942 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
943 return simplify_gen_relational (GE, mode, VOIDmode,
944 XEXP (op, 0), const0_rtx);
947 if (GET_CODE (op) == SUBREG
948 && subreg_lowpart_p (op)
949 && (GET_MODE_SIZE (GET_MODE (op))
950 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
951 && GET_CODE (SUBREG_REG (op)) == ASHIFT
952 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
954 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
955 rtx x;
957 x = gen_rtx_ROTATE (inner_mode,
958 simplify_gen_unary (NOT, inner_mode, const1_rtx,
959 inner_mode),
960 XEXP (SUBREG_REG (op), 1));
961 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
962 if (temp)
963 return temp;
966 /* Apply De Morgan's laws to reduce number of patterns for machines
967 with negating logical insns (and-not, nand, etc.). If result has
968 only one NOT, put it first, since that is how the patterns are
969 coded. */
970 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
972 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
973 machine_mode op_mode;
975 op_mode = GET_MODE (in1);
976 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
978 op_mode = GET_MODE (in2);
979 if (op_mode == VOIDmode)
980 op_mode = mode;
981 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
983 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
984 std::swap (in1, in2);
986 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
987 mode, in1, in2);
990 /* (not (bswap x)) -> (bswap (not x)). */
991 if (GET_CODE (op) == BSWAP)
993 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
994 return simplify_gen_unary (BSWAP, mode, x, mode);
996 break;
998 case NEG:
999 /* (neg (neg X)) == X. */
1000 if (GET_CODE (op) == NEG)
1001 return XEXP (op, 0);
1003 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1004 If comparison is not reversible use
1005 x ? y : (neg y). */
1006 if (GET_CODE (op) == IF_THEN_ELSE)
1008 rtx cond = XEXP (op, 0);
1009 rtx true_rtx = XEXP (op, 1);
1010 rtx false_rtx = XEXP (op, 2);
1012 if ((GET_CODE (true_rtx) == NEG
1013 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1014 || (GET_CODE (false_rtx) == NEG
1015 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1017 if (reversed_comparison_code (cond, NULL_RTX) != UNKNOWN)
1018 temp = reversed_comparison (cond, mode);
1019 else
1021 temp = cond;
1022 std::swap (true_rtx, false_rtx);
1024 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1025 mode, temp, true_rtx, false_rtx);
1029 /* (neg (plus X 1)) can become (not X). */
1030 if (GET_CODE (op) == PLUS
1031 && XEXP (op, 1) == const1_rtx)
1032 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1034 /* Similarly, (neg (not X)) is (plus X 1). */
1035 if (GET_CODE (op) == NOT)
1036 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1037 CONST1_RTX (mode));
1039 /* (neg (minus X Y)) can become (minus Y X). This transformation
1040 isn't safe for modes with signed zeros, since if X and Y are
1041 both +0, (minus Y X) is the same as (minus X Y). If the
1042 rounding mode is towards +infinity (or -infinity) then the two
1043 expressions will be rounded differently. */
1044 if (GET_CODE (op) == MINUS
1045 && !HONOR_SIGNED_ZEROS (mode)
1046 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1047 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1049 if (GET_CODE (op) == PLUS
1050 && !HONOR_SIGNED_ZEROS (mode)
1051 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1053 /* (neg (plus A C)) is simplified to (minus -C A). */
1054 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1055 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1057 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1058 if (temp)
1059 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1062 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1063 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1064 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1067 /* (neg (mult A B)) becomes (mult A (neg B)).
1068 This works even for floating-point values. */
1069 if (GET_CODE (op) == MULT
1070 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1072 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1073 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1076 /* NEG commutes with ASHIFT since it is multiplication. Only do
1077 this if we can then eliminate the NEG (e.g., if the operand
1078 is a constant). */
1079 if (GET_CODE (op) == ASHIFT)
1081 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1082 if (temp)
1083 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1086 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1087 C is equal to the width of MODE minus 1. */
1088 if (GET_CODE (op) == ASHIFTRT
1089 && CONST_INT_P (XEXP (op, 1))
1090 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1091 return simplify_gen_binary (LSHIFTRT, mode,
1092 XEXP (op, 0), XEXP (op, 1));
1094 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1095 C is equal to the width of MODE minus 1. */
1096 if (GET_CODE (op) == LSHIFTRT
1097 && CONST_INT_P (XEXP (op, 1))
1098 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1099 return simplify_gen_binary (ASHIFTRT, mode,
1100 XEXP (op, 0), XEXP (op, 1));
1102 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1103 if (GET_CODE (op) == XOR
1104 && XEXP (op, 1) == const1_rtx
1105 && nonzero_bits (XEXP (op, 0), mode) == 1)
1106 return plus_constant (mode, XEXP (op, 0), -1);
1108 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1109 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1110 if (GET_CODE (op) == LT
1111 && XEXP (op, 1) == const0_rtx
1112 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1114 machine_mode inner = GET_MODE (XEXP (op, 0));
1115 int isize = GET_MODE_PRECISION (inner);
1116 if (STORE_FLAG_VALUE == 1)
1118 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1119 GEN_INT (isize - 1));
1120 if (mode == inner)
1121 return temp;
1122 if (GET_MODE_PRECISION (mode) > isize)
1123 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1124 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1126 else if (STORE_FLAG_VALUE == -1)
1128 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1129 GEN_INT (isize - 1));
1130 if (mode == inner)
1131 return temp;
1132 if (GET_MODE_PRECISION (mode) > isize)
1133 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1134 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1137 break;
1139 case TRUNCATE:
1140 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1141 with the umulXi3_highpart patterns. */
1142 if (GET_CODE (op) == LSHIFTRT
1143 && GET_CODE (XEXP (op, 0)) == MULT)
1144 break;
1146 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1148 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1150 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1151 if (temp)
1152 return temp;
1154 /* We can't handle truncation to a partial integer mode here
1155 because we don't know the real bitsize of the partial
1156 integer mode. */
1157 break;
1160 if (GET_MODE (op) != VOIDmode)
1162 temp = simplify_truncation (mode, op, GET_MODE (op));
1163 if (temp)
1164 return temp;
1167 /* If we know that the value is already truncated, we can
1168 replace the TRUNCATE with a SUBREG. */
1169 if (GET_MODE_NUNITS (mode) == 1
1170 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1171 || truncated_to_mode (mode, op)))
1173 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1174 if (temp)
1175 return temp;
1178 /* A truncate of a comparison can be replaced with a subreg if
1179 STORE_FLAG_VALUE permits. This is like the previous test,
1180 but it works even if the comparison is done in a mode larger
1181 than HOST_BITS_PER_WIDE_INT. */
1182 if (HWI_COMPUTABLE_MODE_P (mode)
1183 && COMPARISON_P (op)
1184 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1186 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1187 if (temp)
1188 return temp;
1191 /* A truncate of a memory is just loading the low part of the memory
1192 if we are not changing the meaning of the address. */
1193 if (GET_CODE (op) == MEM
1194 && !VECTOR_MODE_P (mode)
1195 && !MEM_VOLATILE_P (op)
1196 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1198 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1199 if (temp)
1200 return temp;
1203 break;
1205 case FLOAT_TRUNCATE:
1206 if (DECIMAL_FLOAT_MODE_P (mode))
1207 break;
1209 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1210 if (GET_CODE (op) == FLOAT_EXTEND
1211 && GET_MODE (XEXP (op, 0)) == mode)
1212 return XEXP (op, 0);
1214 /* (float_truncate:SF (float_truncate:DF foo:XF))
1215 = (float_truncate:SF foo:XF).
1216 This may eliminate double rounding, so it is unsafe.
1218 (float_truncate:SF (float_extend:XF foo:DF))
1219 = (float_truncate:SF foo:DF).
1221 (float_truncate:DF (float_extend:XF foo:SF))
1222 = (float_extend:DF foo:SF). */
1223 if ((GET_CODE (op) == FLOAT_TRUNCATE
1224 && flag_unsafe_math_optimizations)
1225 || GET_CODE (op) == FLOAT_EXTEND)
1226 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1227 0)))
1228 > GET_MODE_SIZE (mode)
1229 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1230 mode,
1231 XEXP (op, 0), mode);
1233 /* (float_truncate (float x)) is (float x) */
1234 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1235 && (flag_unsafe_math_optimizations
1236 || exact_int_to_float_conversion_p (op)))
1237 return simplify_gen_unary (GET_CODE (op), mode,
1238 XEXP (op, 0),
1239 GET_MODE (XEXP (op, 0)));
1241 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1242 (OP:SF foo:SF) if OP is NEG or ABS. */
1243 if ((GET_CODE (op) == ABS
1244 || GET_CODE (op) == NEG)
1245 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1246 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1247 return simplify_gen_unary (GET_CODE (op), mode,
1248 XEXP (XEXP (op, 0), 0), mode);
1250 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1251 is (float_truncate:SF x). */
1252 if (GET_CODE (op) == SUBREG
1253 && subreg_lowpart_p (op)
1254 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1255 return SUBREG_REG (op);
1256 break;
1258 case FLOAT_EXTEND:
1259 if (DECIMAL_FLOAT_MODE_P (mode))
1260 break;
1262 /* (float_extend (float_extend x)) is (float_extend x)
1264 (float_extend (float x)) is (float x) assuming that double
1265 rounding can't happen.
1267 if (GET_CODE (op) == FLOAT_EXTEND
1268 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1269 && exact_int_to_float_conversion_p (op)))
1270 return simplify_gen_unary (GET_CODE (op), mode,
1271 XEXP (op, 0),
1272 GET_MODE (XEXP (op, 0)));
1274 break;
1276 case ABS:
1277 /* (abs (neg <foo>)) -> (abs <foo>) */
1278 if (GET_CODE (op) == NEG)
1279 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1280 GET_MODE (XEXP (op, 0)));
1282 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1283 do nothing. */
1284 if (GET_MODE (op) == VOIDmode)
1285 break;
1287 /* If operand is something known to be positive, ignore the ABS. */
1288 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1289 || val_signbit_known_clear_p (GET_MODE (op),
1290 nonzero_bits (op, GET_MODE (op))))
1291 return op;
1293 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1294 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1295 return gen_rtx_NEG (mode, op);
1297 break;
1299 case FFS:
1300 /* (ffs (*_extend <X>)) = (ffs <X>) */
1301 if (GET_CODE (op) == SIGN_EXTEND
1302 || GET_CODE (op) == ZERO_EXTEND)
1303 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1304 GET_MODE (XEXP (op, 0)));
1305 break;
1307 case POPCOUNT:
1308 switch (GET_CODE (op))
1310 case BSWAP:
1311 case ZERO_EXTEND:
1312 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1313 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1314 GET_MODE (XEXP (op, 0)));
1316 case ROTATE:
1317 case ROTATERT:
1318 /* Rotations don't affect popcount. */
1319 if (!side_effects_p (XEXP (op, 1)))
1320 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1321 GET_MODE (XEXP (op, 0)));
1322 break;
1324 default:
1325 break;
1327 break;
1329 case PARITY:
1330 switch (GET_CODE (op))
1332 case NOT:
1333 case BSWAP:
1334 case ZERO_EXTEND:
1335 case SIGN_EXTEND:
1336 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1337 GET_MODE (XEXP (op, 0)));
1339 case ROTATE:
1340 case ROTATERT:
1341 /* Rotations don't affect parity. */
1342 if (!side_effects_p (XEXP (op, 1)))
1343 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1344 GET_MODE (XEXP (op, 0)));
1345 break;
1347 default:
1348 break;
1350 break;
1352 case BSWAP:
1353 /* (bswap (bswap x)) -> x. */
1354 if (GET_CODE (op) == BSWAP)
1355 return XEXP (op, 0);
1356 break;
1358 case FLOAT:
1359 /* (float (sign_extend <X>)) = (float <X>). */
1360 if (GET_CODE (op) == SIGN_EXTEND)
1361 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1362 GET_MODE (XEXP (op, 0)));
1363 break;
1365 case SIGN_EXTEND:
1366 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1367 becomes just the MINUS if its mode is MODE. This allows
1368 folding switch statements on machines using casesi (such as
1369 the VAX). */
1370 if (GET_CODE (op) == TRUNCATE
1371 && GET_MODE (XEXP (op, 0)) == mode
1372 && GET_CODE (XEXP (op, 0)) == MINUS
1373 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1374 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1375 return XEXP (op, 0);
1377 /* Extending a widening multiplication should be canonicalized to
1378 a wider widening multiplication. */
1379 if (GET_CODE (op) == MULT)
1381 rtx lhs = XEXP (op, 0);
1382 rtx rhs = XEXP (op, 1);
1383 enum rtx_code lcode = GET_CODE (lhs);
1384 enum rtx_code rcode = GET_CODE (rhs);
1386 /* Widening multiplies usually extend both operands, but sometimes
1387 they use a shift to extract a portion of a register. */
1388 if ((lcode == SIGN_EXTEND
1389 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1390 && (rcode == SIGN_EXTEND
1391 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1393 machine_mode lmode = GET_MODE (lhs);
1394 machine_mode rmode = GET_MODE (rhs);
1395 int bits;
1397 if (lcode == ASHIFTRT)
1398 /* Number of bits not shifted off the end. */
1399 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1400 else /* lcode == SIGN_EXTEND */
1401 /* Size of inner mode. */
1402 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1404 if (rcode == ASHIFTRT)
1405 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1406 else /* rcode == SIGN_EXTEND */
1407 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1409 /* We can only widen multiplies if the result is mathematiclly
1410 equivalent. I.e. if overflow was impossible. */
1411 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1412 return simplify_gen_binary
1413 (MULT, mode,
1414 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1415 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1419 /* Check for a sign extension of a subreg of a promoted
1420 variable, where the promotion is sign-extended, and the
1421 target mode is the same as the variable's promotion. */
1422 if (GET_CODE (op) == SUBREG
1423 && SUBREG_PROMOTED_VAR_P (op)
1424 && SUBREG_PROMOTED_SIGNED_P (op)
1425 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1427 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1428 if (temp)
1429 return temp;
1432 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1433 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1434 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1436 gcc_assert (GET_MODE_PRECISION (mode)
1437 > GET_MODE_PRECISION (GET_MODE (op)));
1438 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1439 GET_MODE (XEXP (op, 0)));
1442 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1443 is (sign_extend:M (subreg:O <X>)) if there is mode with
1444 GET_MODE_BITSIZE (N) - I bits.
1445 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1446 is similarly (zero_extend:M (subreg:O <X>)). */
1447 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1448 && GET_CODE (XEXP (op, 0)) == ASHIFT
1449 && CONST_INT_P (XEXP (op, 1))
1450 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1451 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1453 machine_mode tmode
1454 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1455 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1456 gcc_assert (GET_MODE_BITSIZE (mode)
1457 > GET_MODE_BITSIZE (GET_MODE (op)));
1458 if (tmode != BLKmode)
1460 rtx inner =
1461 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1462 if (inner)
1463 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1464 ? SIGN_EXTEND : ZERO_EXTEND,
1465 mode, inner, tmode);
1469 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1470 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1471 if (GET_CODE (op) == LSHIFTRT
1472 && CONST_INT_P (XEXP (op, 1))
1473 && XEXP (op, 1) != const0_rtx)
1474 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1476 #if defined(POINTERS_EXTEND_UNSIGNED)
1477 /* As we do not know which address space the pointer is referring to,
1478 we can do this only if the target does not support different pointer
1479 or address modes depending on the address space. */
1480 if (target_default_pointer_address_modes_p ()
1481 && ! POINTERS_EXTEND_UNSIGNED
1482 && mode == Pmode && GET_MODE (op) == ptr_mode
1483 && (CONSTANT_P (op)
1484 || (GET_CODE (op) == SUBREG
1485 && REG_P (SUBREG_REG (op))
1486 && REG_POINTER (SUBREG_REG (op))
1487 && GET_MODE (SUBREG_REG (op)) == Pmode))
1488 && !targetm.have_ptr_extend ())
1490 temp
1491 = convert_memory_address_addr_space_1 (Pmode, op,
1492 ADDR_SPACE_GENERIC, false,
1493 true);
1494 if (temp)
1495 return temp;
1497 #endif
1498 break;
1500 case ZERO_EXTEND:
1501 /* Check for a zero extension of a subreg of a promoted
1502 variable, where the promotion is zero-extended, and the
1503 target mode is the same as the variable's promotion. */
1504 if (GET_CODE (op) == SUBREG
1505 && SUBREG_PROMOTED_VAR_P (op)
1506 && SUBREG_PROMOTED_UNSIGNED_P (op)
1507 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1509 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1510 if (temp)
1511 return temp;
1514 /* Extending a widening multiplication should be canonicalized to
1515 a wider widening multiplication. */
1516 if (GET_CODE (op) == MULT)
1518 rtx lhs = XEXP (op, 0);
1519 rtx rhs = XEXP (op, 1);
1520 enum rtx_code lcode = GET_CODE (lhs);
1521 enum rtx_code rcode = GET_CODE (rhs);
1523 /* Widening multiplies usually extend both operands, but sometimes
1524 they use a shift to extract a portion of a register. */
1525 if ((lcode == ZERO_EXTEND
1526 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1527 && (rcode == ZERO_EXTEND
1528 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1530 machine_mode lmode = GET_MODE (lhs);
1531 machine_mode rmode = GET_MODE (rhs);
1532 int bits;
1534 if (lcode == LSHIFTRT)
1535 /* Number of bits not shifted off the end. */
1536 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1537 else /* lcode == ZERO_EXTEND */
1538 /* Size of inner mode. */
1539 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1541 if (rcode == LSHIFTRT)
1542 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1543 else /* rcode == ZERO_EXTEND */
1544 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1546 /* We can only widen multiplies if the result is mathematiclly
1547 equivalent. I.e. if overflow was impossible. */
1548 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1549 return simplify_gen_binary
1550 (MULT, mode,
1551 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1552 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1556 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1557 if (GET_CODE (op) == ZERO_EXTEND)
1558 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1559 GET_MODE (XEXP (op, 0)));
1561 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1562 is (zero_extend:M (subreg:O <X>)) if there is mode with
1563 GET_MODE_PRECISION (N) - I bits. */
1564 if (GET_CODE (op) == LSHIFTRT
1565 && GET_CODE (XEXP (op, 0)) == ASHIFT
1566 && CONST_INT_P (XEXP (op, 1))
1567 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1568 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1570 machine_mode tmode
1571 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1572 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1573 if (tmode != BLKmode)
1575 rtx inner =
1576 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1577 if (inner)
1578 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1582 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1583 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1584 of mode N. E.g.
1585 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1586 (and:SI (reg:SI) (const_int 63)). */
1587 if (GET_CODE (op) == SUBREG
1588 && GET_MODE_PRECISION (GET_MODE (op))
1589 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1590 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1591 <= HOST_BITS_PER_WIDE_INT
1592 && GET_MODE_PRECISION (mode)
1593 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1594 && subreg_lowpart_p (op)
1595 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1596 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1598 if (GET_MODE_PRECISION (mode)
1599 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1600 return SUBREG_REG (op);
1601 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1602 GET_MODE (SUBREG_REG (op)));
1605 #if defined(POINTERS_EXTEND_UNSIGNED)
1606 /* As we do not know which address space the pointer is referring to,
1607 we can do this only if the target does not support different pointer
1608 or address modes depending on the address space. */
1609 if (target_default_pointer_address_modes_p ()
1610 && POINTERS_EXTEND_UNSIGNED > 0
1611 && mode == Pmode && GET_MODE (op) == ptr_mode
1612 && (CONSTANT_P (op)
1613 || (GET_CODE (op) == SUBREG
1614 && REG_P (SUBREG_REG (op))
1615 && REG_POINTER (SUBREG_REG (op))
1616 && GET_MODE (SUBREG_REG (op)) == Pmode))
1617 && !targetm.have_ptr_extend ())
1619 temp
1620 = convert_memory_address_addr_space_1 (Pmode, op,
1621 ADDR_SPACE_GENERIC, false,
1622 true);
1623 if (temp)
1624 return temp;
1626 #endif
1627 break;
1629 default:
1630 break;
1633 return 0;
1636 /* Try to compute the value of a unary operation CODE whose output mode is to
1637 be MODE with input operand OP whose mode was originally OP_MODE.
1638 Return zero if the value cannot be computed. */
1640 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1641 rtx op, machine_mode op_mode)
1643 unsigned int width = GET_MODE_PRECISION (mode);
1645 if (code == VEC_DUPLICATE)
1647 gcc_assert (VECTOR_MODE_P (mode));
1648 if (GET_MODE (op) != VOIDmode)
1650 if (!VECTOR_MODE_P (GET_MODE (op)))
1651 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1652 else
1653 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1654 (GET_MODE (op)));
1656 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1657 || GET_CODE (op) == CONST_VECTOR)
1659 int elt_size = GET_MODE_UNIT_SIZE (mode);
1660 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1661 rtvec v = rtvec_alloc (n_elts);
1662 unsigned int i;
1664 if (GET_CODE (op) != CONST_VECTOR)
1665 for (i = 0; i < n_elts; i++)
1666 RTVEC_ELT (v, i) = op;
1667 else
1669 machine_mode inmode = GET_MODE (op);
1670 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1671 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1673 gcc_assert (in_n_elts < n_elts);
1674 gcc_assert ((n_elts % in_n_elts) == 0);
1675 for (i = 0; i < n_elts; i++)
1676 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1678 return gen_rtx_CONST_VECTOR (mode, v);
1682 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1684 int elt_size = GET_MODE_UNIT_SIZE (mode);
1685 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1686 machine_mode opmode = GET_MODE (op);
1687 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1688 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1689 rtvec v = rtvec_alloc (n_elts);
1690 unsigned int i;
1692 gcc_assert (op_n_elts == n_elts);
1693 for (i = 0; i < n_elts; i++)
1695 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1696 CONST_VECTOR_ELT (op, i),
1697 GET_MODE_INNER (opmode));
1698 if (!x)
1699 return 0;
1700 RTVEC_ELT (v, i) = x;
1702 return gen_rtx_CONST_VECTOR (mode, v);
1705 /* The order of these tests is critical so that, for example, we don't
1706 check the wrong mode (input vs. output) for a conversion operation,
1707 such as FIX. At some point, this should be simplified. */
1709 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1711 REAL_VALUE_TYPE d;
1713 if (op_mode == VOIDmode)
1715 /* CONST_INT have VOIDmode as the mode. We assume that all
1716 the bits of the constant are significant, though, this is
1717 a dangerous assumption as many times CONST_INTs are
1718 created and used with garbage in the bits outside of the
1719 precision of the implied mode of the const_int. */
1720 op_mode = MAX_MODE_INT;
1723 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1725 /* Avoid the folding if flag_signaling_nans is on and
1726 operand is a signaling NaN. */
1727 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1728 return 0;
1730 d = real_value_truncate (mode, d);
1731 return const_double_from_real_value (d, mode);
1733 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1735 REAL_VALUE_TYPE d;
1737 if (op_mode == VOIDmode)
1739 /* CONST_INT have VOIDmode as the mode. We assume that all
1740 the bits of the constant are significant, though, this is
1741 a dangerous assumption as many times CONST_INTs are
1742 created and used with garbage in the bits outside of the
1743 precision of the implied mode of the const_int. */
1744 op_mode = MAX_MODE_INT;
1747 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1749 /* Avoid the folding if flag_signaling_nans is on and
1750 operand is a signaling NaN. */
1751 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1752 return 0;
1754 d = real_value_truncate (mode, d);
1755 return const_double_from_real_value (d, mode);
1758 if (CONST_SCALAR_INT_P (op) && width > 0)
1760 wide_int result;
1761 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1762 rtx_mode_t op0 = std::make_pair (op, imode);
1763 int int_value;
1765 #if TARGET_SUPPORTS_WIDE_INT == 0
1766 /* This assert keeps the simplification from producing a result
1767 that cannot be represented in a CONST_DOUBLE but a lot of
1768 upstream callers expect that this function never fails to
1769 simplify something and so you if you added this to the test
1770 above the code would die later anyway. If this assert
1771 happens, you just need to make the port support wide int. */
1772 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1773 #endif
1775 switch (code)
1777 case NOT:
1778 result = wi::bit_not (op0);
1779 break;
1781 case NEG:
1782 result = wi::neg (op0);
1783 break;
1785 case ABS:
1786 result = wi::abs (op0);
1787 break;
1789 case FFS:
1790 result = wi::shwi (wi::ffs (op0), mode);
1791 break;
1793 case CLZ:
1794 if (wi::ne_p (op0, 0))
1795 int_value = wi::clz (op0);
1796 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1797 int_value = GET_MODE_PRECISION (mode);
1798 result = wi::shwi (int_value, mode);
1799 break;
1801 case CLRSB:
1802 result = wi::shwi (wi::clrsb (op0), mode);
1803 break;
1805 case CTZ:
1806 if (wi::ne_p (op0, 0))
1807 int_value = wi::ctz (op0);
1808 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1809 int_value = GET_MODE_PRECISION (mode);
1810 result = wi::shwi (int_value, mode);
1811 break;
1813 case POPCOUNT:
1814 result = wi::shwi (wi::popcount (op0), mode);
1815 break;
1817 case PARITY:
1818 result = wi::shwi (wi::parity (op0), mode);
1819 break;
1821 case BSWAP:
1822 result = wide_int (op0).bswap ();
1823 break;
1825 case TRUNCATE:
1826 case ZERO_EXTEND:
1827 result = wide_int::from (op0, width, UNSIGNED);
1828 break;
1830 case SIGN_EXTEND:
1831 result = wide_int::from (op0, width, SIGNED);
1832 break;
1834 case SQRT:
1835 default:
1836 return 0;
1839 return immed_wide_int_const (result, mode);
1842 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1843 && SCALAR_FLOAT_MODE_P (mode)
1844 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1846 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1847 switch (code)
1849 case SQRT:
1850 return 0;
1851 case ABS:
1852 d = real_value_abs (&d);
1853 break;
1854 case NEG:
1855 d = real_value_negate (&d);
1856 break;
1857 case FLOAT_TRUNCATE:
1858 /* Don't perform the operation if flag_signaling_nans is on
1859 and the operand is a signaling NaN. */
1860 if (!(HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)))
1861 d = real_value_truncate (mode, d);
1862 break;
1863 case FLOAT_EXTEND:
1864 /* All this does is change the mode, unless changing
1865 mode class. */
1866 /* Don't perform the operation if flag_signaling_nans is on
1867 and the operand is a signaling NaN. */
1868 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op))
1869 && !(HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)))
1870 real_convert (&d, mode, &d);
1871 break;
1872 case FIX:
1873 /* Don't perform the operation if flag_signaling_nans is on
1874 and the operand is a signaling NaN. */
1875 if (!(HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)))
1876 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1877 break;
1878 case NOT:
1880 long tmp[4];
1881 int i;
1883 real_to_target (tmp, &d, GET_MODE (op));
1884 for (i = 0; i < 4; i++)
1885 tmp[i] = ~tmp[i];
1886 real_from_target (&d, tmp, mode);
1887 break;
1889 default:
1890 gcc_unreachable ();
1892 return const_double_from_real_value (d, mode);
1894 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1895 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1896 && GET_MODE_CLASS (mode) == MODE_INT
1897 && width > 0)
1899 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1900 operators are intentionally left unspecified (to ease implementation
1901 by target backends), for consistency, this routine implements the
1902 same semantics for constant folding as used by the middle-end. */
1904 /* This was formerly used only for non-IEEE float.
1905 eggert@twinsun.com says it is safe for IEEE also. */
1906 REAL_VALUE_TYPE t;
1907 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1908 wide_int wmax, wmin;
1909 /* This is part of the abi to real_to_integer, but we check
1910 things before making this call. */
1911 bool fail;
1913 switch (code)
1915 case FIX:
1916 if (REAL_VALUE_ISNAN (*x))
1917 return const0_rtx;
1919 /* Test against the signed upper bound. */
1920 wmax = wi::max_value (width, SIGNED);
1921 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1922 if (real_less (&t, x))
1923 return immed_wide_int_const (wmax, mode);
1925 /* Test against the signed lower bound. */
1926 wmin = wi::min_value (width, SIGNED);
1927 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1928 if (real_less (x, &t))
1929 return immed_wide_int_const (wmin, mode);
1931 return immed_wide_int_const (real_to_integer (x, &fail, width),
1932 mode);
1934 case UNSIGNED_FIX:
1935 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1936 return const0_rtx;
1938 /* Test against the unsigned upper bound. */
1939 wmax = wi::max_value (width, UNSIGNED);
1940 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1941 if (real_less (&t, x))
1942 return immed_wide_int_const (wmax, mode);
1944 return immed_wide_int_const (real_to_integer (x, &fail, width),
1945 mode);
1947 default:
1948 gcc_unreachable ();
1952 return NULL_RTX;
1955 /* Subroutine of simplify_binary_operation to simplify a binary operation
1956 CODE that can commute with byte swapping, with result mode MODE and
1957 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1958 Return zero if no simplification or canonicalization is possible. */
1960 static rtx
1961 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1962 rtx op0, rtx op1)
1964 rtx tem;
1966 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1967 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1969 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1970 simplify_gen_unary (BSWAP, mode, op1, mode));
1971 return simplify_gen_unary (BSWAP, mode, tem, mode);
1974 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1975 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1977 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1978 return simplify_gen_unary (BSWAP, mode, tem, mode);
1981 return NULL_RTX;
1984 /* Subroutine of simplify_binary_operation to simplify a commutative,
1985 associative binary operation CODE with result mode MODE, operating
1986 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1987 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1988 canonicalization is possible. */
1990 static rtx
1991 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1992 rtx op0, rtx op1)
1994 rtx tem;
1996 /* Linearize the operator to the left. */
1997 if (GET_CODE (op1) == code)
1999 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2000 if (GET_CODE (op0) == code)
2002 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2003 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2006 /* "a op (b op c)" becomes "(b op c) op a". */
2007 if (! swap_commutative_operands_p (op1, op0))
2008 return simplify_gen_binary (code, mode, op1, op0);
2010 std::swap (op0, op1);
2013 if (GET_CODE (op0) == code)
2015 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2016 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2018 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2019 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2022 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2023 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2024 if (tem != 0)
2025 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2027 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2028 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2029 if (tem != 0)
2030 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2033 return 0;
2037 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2038 and OP1. Return 0 if no simplification is possible.
2040 Don't use this for relational operations such as EQ or LT.
2041 Use simplify_relational_operation instead. */
2043 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2044 rtx op0, rtx op1)
2046 rtx trueop0, trueop1;
2047 rtx tem;
2049 /* Relational operations don't work here. We must know the mode
2050 of the operands in order to do the comparison correctly.
2051 Assuming a full word can give incorrect results.
2052 Consider comparing 128 with -128 in QImode. */
2053 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2054 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2056 /* Make sure the constant is second. */
2057 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2058 && swap_commutative_operands_p (op0, op1))
2059 std::swap (op0, op1);
2061 trueop0 = avoid_constant_pool_reference (op0);
2062 trueop1 = avoid_constant_pool_reference (op1);
2064 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2065 if (tem)
2066 return tem;
2067 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2069 if (tem)
2070 return tem;
2072 /* If the above steps did not result in a simplification and op0 or op1
2073 were constant pool references, use the referenced constants directly. */
2074 if (trueop0 != op0 || trueop1 != op1)
2075 return simplify_gen_binary (code, mode, trueop0, trueop1);
2077 return NULL_RTX;
2080 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2081 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2082 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2083 actual constants. */
2085 static rtx
2086 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2087 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2089 rtx tem, reversed, opleft, opright;
2090 HOST_WIDE_INT val;
2091 unsigned int width = GET_MODE_PRECISION (mode);
2093 /* Even if we can't compute a constant result,
2094 there are some cases worth simplifying. */
2096 switch (code)
2098 case PLUS:
2099 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2100 when x is NaN, infinite, or finite and nonzero. They aren't
2101 when x is -0 and the rounding mode is not towards -infinity,
2102 since (-0) + 0 is then 0. */
2103 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2104 return op0;
2106 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2107 transformations are safe even for IEEE. */
2108 if (GET_CODE (op0) == NEG)
2109 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2110 else if (GET_CODE (op1) == NEG)
2111 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2113 /* (~a) + 1 -> -a */
2114 if (INTEGRAL_MODE_P (mode)
2115 && GET_CODE (op0) == NOT
2116 && trueop1 == const1_rtx)
2117 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2119 /* Handle both-operands-constant cases. We can only add
2120 CONST_INTs to constants since the sum of relocatable symbols
2121 can't be handled by most assemblers. Don't add CONST_INT
2122 to CONST_INT since overflow won't be computed properly if wider
2123 than HOST_BITS_PER_WIDE_INT. */
2125 if ((GET_CODE (op0) == CONST
2126 || GET_CODE (op0) == SYMBOL_REF
2127 || GET_CODE (op0) == LABEL_REF)
2128 && CONST_INT_P (op1))
2129 return plus_constant (mode, op0, INTVAL (op1));
2130 else if ((GET_CODE (op1) == CONST
2131 || GET_CODE (op1) == SYMBOL_REF
2132 || GET_CODE (op1) == LABEL_REF)
2133 && CONST_INT_P (op0))
2134 return plus_constant (mode, op1, INTVAL (op0));
2136 /* See if this is something like X * C - X or vice versa or
2137 if the multiplication is written as a shift. If so, we can
2138 distribute and make a new multiply, shift, or maybe just
2139 have X (if C is 2 in the example above). But don't make
2140 something more expensive than we had before. */
2142 if (SCALAR_INT_MODE_P (mode))
2144 rtx lhs = op0, rhs = op1;
2146 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2147 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2149 if (GET_CODE (lhs) == NEG)
2151 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2152 lhs = XEXP (lhs, 0);
2154 else if (GET_CODE (lhs) == MULT
2155 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2157 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2158 lhs = XEXP (lhs, 0);
2160 else if (GET_CODE (lhs) == ASHIFT
2161 && CONST_INT_P (XEXP (lhs, 1))
2162 && INTVAL (XEXP (lhs, 1)) >= 0
2163 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2165 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2166 GET_MODE_PRECISION (mode));
2167 lhs = XEXP (lhs, 0);
2170 if (GET_CODE (rhs) == NEG)
2172 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2173 rhs = XEXP (rhs, 0);
2175 else if (GET_CODE (rhs) == MULT
2176 && CONST_INT_P (XEXP (rhs, 1)))
2178 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2179 rhs = XEXP (rhs, 0);
2181 else if (GET_CODE (rhs) == ASHIFT
2182 && CONST_INT_P (XEXP (rhs, 1))
2183 && INTVAL (XEXP (rhs, 1)) >= 0
2184 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2186 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2187 GET_MODE_PRECISION (mode));
2188 rhs = XEXP (rhs, 0);
2191 if (rtx_equal_p (lhs, rhs))
2193 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2194 rtx coeff;
2195 bool speed = optimize_function_for_speed_p (cfun);
2197 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2199 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2200 return (set_src_cost (tem, mode, speed)
2201 <= set_src_cost (orig, mode, speed) ? tem : 0);
2205 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2206 if (CONST_SCALAR_INT_P (op1)
2207 && GET_CODE (op0) == XOR
2208 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2209 && mode_signbit_p (mode, op1))
2210 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2211 simplify_gen_binary (XOR, mode, op1,
2212 XEXP (op0, 1)));
2214 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2215 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2216 && GET_CODE (op0) == MULT
2217 && GET_CODE (XEXP (op0, 0)) == NEG)
2219 rtx in1, in2;
2221 in1 = XEXP (XEXP (op0, 0), 0);
2222 in2 = XEXP (op0, 1);
2223 return simplify_gen_binary (MINUS, mode, op1,
2224 simplify_gen_binary (MULT, mode,
2225 in1, in2));
2228 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2229 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2230 is 1. */
2231 if (COMPARISON_P (op0)
2232 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2233 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2234 && (reversed = reversed_comparison (op0, mode)))
2235 return
2236 simplify_gen_unary (NEG, mode, reversed, mode);
2238 /* If one of the operands is a PLUS or a MINUS, see if we can
2239 simplify this by the associative law.
2240 Don't use the associative law for floating point.
2241 The inaccuracy makes it nonassociative,
2242 and subtle programs can break if operations are associated. */
2244 if (INTEGRAL_MODE_P (mode)
2245 && (plus_minus_operand_p (op0)
2246 || plus_minus_operand_p (op1))
2247 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2248 return tem;
2250 /* Reassociate floating point addition only when the user
2251 specifies associative math operations. */
2252 if (FLOAT_MODE_P (mode)
2253 && flag_associative_math)
2255 tem = simplify_associative_operation (code, mode, op0, op1);
2256 if (tem)
2257 return tem;
2259 break;
2261 case COMPARE:
2262 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2263 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2264 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2265 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2267 rtx xop00 = XEXP (op0, 0);
2268 rtx xop10 = XEXP (op1, 0);
2270 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2271 return xop00;
2273 if (REG_P (xop00) && REG_P (xop10)
2274 && GET_MODE (xop00) == GET_MODE (xop10)
2275 && REGNO (xop00) == REGNO (xop10)
2276 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2277 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2278 return xop00;
2280 break;
2282 case MINUS:
2283 /* We can't assume x-x is 0 even with non-IEEE floating point,
2284 but since it is zero except in very strange circumstances, we
2285 will treat it as zero with -ffinite-math-only. */
2286 if (rtx_equal_p (trueop0, trueop1)
2287 && ! side_effects_p (op0)
2288 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2289 return CONST0_RTX (mode);
2291 /* Change subtraction from zero into negation. (0 - x) is the
2292 same as -x when x is NaN, infinite, or finite and nonzero.
2293 But if the mode has signed zeros, and does not round towards
2294 -infinity, then 0 - 0 is 0, not -0. */
2295 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2296 return simplify_gen_unary (NEG, mode, op1, mode);
2298 /* (-1 - a) is ~a, unless the expression contains symbolic
2299 constants, in which case not retaining additions and
2300 subtractions could cause invalid assembly to be produced. */
2301 if (trueop0 == constm1_rtx
2302 && !contains_symbolic_reference_p (op1))
2303 return simplify_gen_unary (NOT, mode, op1, mode);
2305 /* Subtracting 0 has no effect unless the mode has signed zeros
2306 and supports rounding towards -infinity. In such a case,
2307 0 - 0 is -0. */
2308 if (!(HONOR_SIGNED_ZEROS (mode)
2309 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2310 && trueop1 == CONST0_RTX (mode))
2311 return op0;
2313 /* See if this is something like X * C - X or vice versa or
2314 if the multiplication is written as a shift. If so, we can
2315 distribute and make a new multiply, shift, or maybe just
2316 have X (if C is 2 in the example above). But don't make
2317 something more expensive than we had before. */
2319 if (SCALAR_INT_MODE_P (mode))
2321 rtx lhs = op0, rhs = op1;
2323 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2324 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2326 if (GET_CODE (lhs) == NEG)
2328 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2329 lhs = XEXP (lhs, 0);
2331 else if (GET_CODE (lhs) == MULT
2332 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2334 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2335 lhs = XEXP (lhs, 0);
2337 else if (GET_CODE (lhs) == ASHIFT
2338 && CONST_INT_P (XEXP (lhs, 1))
2339 && INTVAL (XEXP (lhs, 1)) >= 0
2340 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2342 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2343 GET_MODE_PRECISION (mode));
2344 lhs = XEXP (lhs, 0);
2347 if (GET_CODE (rhs) == NEG)
2349 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2350 rhs = XEXP (rhs, 0);
2352 else if (GET_CODE (rhs) == MULT
2353 && CONST_INT_P (XEXP (rhs, 1)))
2355 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2356 rhs = XEXP (rhs, 0);
2358 else if (GET_CODE (rhs) == ASHIFT
2359 && CONST_INT_P (XEXP (rhs, 1))
2360 && INTVAL (XEXP (rhs, 1)) >= 0
2361 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2363 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2364 GET_MODE_PRECISION (mode));
2365 negcoeff1 = -negcoeff1;
2366 rhs = XEXP (rhs, 0);
2369 if (rtx_equal_p (lhs, rhs))
2371 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2372 rtx coeff;
2373 bool speed = optimize_function_for_speed_p (cfun);
2375 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2377 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2378 return (set_src_cost (tem, mode, speed)
2379 <= set_src_cost (orig, mode, speed) ? tem : 0);
2383 /* (a - (-b)) -> (a + b). True even for IEEE. */
2384 if (GET_CODE (op1) == NEG)
2385 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2387 /* (-x - c) may be simplified as (-c - x). */
2388 if (GET_CODE (op0) == NEG
2389 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2391 tem = simplify_unary_operation (NEG, mode, op1, mode);
2392 if (tem)
2393 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2396 /* Don't let a relocatable value get a negative coeff. */
2397 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2398 return simplify_gen_binary (PLUS, mode,
2399 op0,
2400 neg_const_int (mode, op1));
2402 /* (x - (x & y)) -> (x & ~y) */
2403 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2405 if (rtx_equal_p (op0, XEXP (op1, 0)))
2407 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2408 GET_MODE (XEXP (op1, 1)));
2409 return simplify_gen_binary (AND, mode, op0, tem);
2411 if (rtx_equal_p (op0, XEXP (op1, 1)))
2413 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2414 GET_MODE (XEXP (op1, 0)));
2415 return simplify_gen_binary (AND, mode, op0, tem);
2419 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2420 by reversing the comparison code if valid. */
2421 if (STORE_FLAG_VALUE == 1
2422 && trueop0 == const1_rtx
2423 && COMPARISON_P (op1)
2424 && (reversed = reversed_comparison (op1, mode)))
2425 return reversed;
2427 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2428 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2429 && GET_CODE (op1) == MULT
2430 && GET_CODE (XEXP (op1, 0)) == NEG)
2432 rtx in1, in2;
2434 in1 = XEXP (XEXP (op1, 0), 0);
2435 in2 = XEXP (op1, 1);
2436 return simplify_gen_binary (PLUS, mode,
2437 simplify_gen_binary (MULT, mode,
2438 in1, in2),
2439 op0);
2442 /* Canonicalize (minus (neg A) (mult B C)) to
2443 (minus (mult (neg B) C) A). */
2444 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2445 && GET_CODE (op1) == MULT
2446 && GET_CODE (op0) == NEG)
2448 rtx in1, in2;
2450 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2451 in2 = XEXP (op1, 1);
2452 return simplify_gen_binary (MINUS, mode,
2453 simplify_gen_binary (MULT, mode,
2454 in1, in2),
2455 XEXP (op0, 0));
2458 /* If one of the operands is a PLUS or a MINUS, see if we can
2459 simplify this by the associative law. This will, for example,
2460 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2461 Don't use the associative law for floating point.
2462 The inaccuracy makes it nonassociative,
2463 and subtle programs can break if operations are associated. */
2465 if (INTEGRAL_MODE_P (mode)
2466 && (plus_minus_operand_p (op0)
2467 || plus_minus_operand_p (op1))
2468 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2469 return tem;
2470 break;
2472 case MULT:
2473 if (trueop1 == constm1_rtx)
2474 return simplify_gen_unary (NEG, mode, op0, mode);
2476 if (GET_CODE (op0) == NEG)
2478 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2479 /* If op1 is a MULT as well and simplify_unary_operation
2480 just moved the NEG to the second operand, simplify_gen_binary
2481 below could through simplify_associative_operation move
2482 the NEG around again and recurse endlessly. */
2483 if (temp
2484 && GET_CODE (op1) == MULT
2485 && GET_CODE (temp) == MULT
2486 && XEXP (op1, 0) == XEXP (temp, 0)
2487 && GET_CODE (XEXP (temp, 1)) == NEG
2488 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2489 temp = NULL_RTX;
2490 if (temp)
2491 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2493 if (GET_CODE (op1) == NEG)
2495 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2496 /* If op0 is a MULT as well and simplify_unary_operation
2497 just moved the NEG to the second operand, simplify_gen_binary
2498 below could through simplify_associative_operation move
2499 the NEG around again and recurse endlessly. */
2500 if (temp
2501 && GET_CODE (op0) == MULT
2502 && GET_CODE (temp) == MULT
2503 && XEXP (op0, 0) == XEXP (temp, 0)
2504 && GET_CODE (XEXP (temp, 1)) == NEG
2505 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2506 temp = NULL_RTX;
2507 if (temp)
2508 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2511 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2512 x is NaN, since x * 0 is then also NaN. Nor is it valid
2513 when the mode has signed zeros, since multiplying a negative
2514 number by 0 will give -0, not 0. */
2515 if (!HONOR_NANS (mode)
2516 && !HONOR_SIGNED_ZEROS (mode)
2517 && trueop1 == CONST0_RTX (mode)
2518 && ! side_effects_p (op0))
2519 return op1;
2521 /* In IEEE floating point, x*1 is not equivalent to x for
2522 signalling NaNs. */
2523 if (!HONOR_SNANS (mode)
2524 && trueop1 == CONST1_RTX (mode))
2525 return op0;
2527 /* Convert multiply by constant power of two into shift. */
2528 if (CONST_SCALAR_INT_P (trueop1))
2530 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2531 if (val >= 0)
2532 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2535 /* x*2 is x+x and x*(-1) is -x */
2536 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2537 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2538 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2539 && GET_MODE (op0) == mode)
2541 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2543 if (real_equal (d1, &dconst2))
2544 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2546 if (!HONOR_SNANS (mode)
2547 && real_equal (d1, &dconstm1))
2548 return simplify_gen_unary (NEG, mode, op0, mode);
2551 /* Optimize -x * -x as x * x. */
2552 if (FLOAT_MODE_P (mode)
2553 && GET_CODE (op0) == NEG
2554 && GET_CODE (op1) == NEG
2555 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2556 && !side_effects_p (XEXP (op0, 0)))
2557 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2559 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2560 if (SCALAR_FLOAT_MODE_P (mode)
2561 && GET_CODE (op0) == ABS
2562 && GET_CODE (op1) == ABS
2563 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2564 && !side_effects_p (XEXP (op0, 0)))
2565 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2567 /* Reassociate multiplication, but for floating point MULTs
2568 only when the user specifies unsafe math optimizations. */
2569 if (! FLOAT_MODE_P (mode)
2570 || flag_unsafe_math_optimizations)
2572 tem = simplify_associative_operation (code, mode, op0, op1);
2573 if (tem)
2574 return tem;
2576 break;
2578 case IOR:
2579 if (trueop1 == CONST0_RTX (mode))
2580 return op0;
2581 if (INTEGRAL_MODE_P (mode)
2582 && trueop1 == CONSTM1_RTX (mode)
2583 && !side_effects_p (op0))
2584 return op1;
2585 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2586 return op0;
2587 /* A | (~A) -> -1 */
2588 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2589 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2590 && ! side_effects_p (op0)
2591 && SCALAR_INT_MODE_P (mode))
2592 return constm1_rtx;
2594 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2595 if (CONST_INT_P (op1)
2596 && HWI_COMPUTABLE_MODE_P (mode)
2597 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2598 && !side_effects_p (op0))
2599 return op1;
2601 /* Canonicalize (X & C1) | C2. */
2602 if (GET_CODE (op0) == AND
2603 && CONST_INT_P (trueop1)
2604 && CONST_INT_P (XEXP (op0, 1)))
2606 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2607 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2608 HOST_WIDE_INT c2 = INTVAL (trueop1);
2610 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2611 if ((c1 & c2) == c1
2612 && !side_effects_p (XEXP (op0, 0)))
2613 return trueop1;
2615 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2616 if (((c1|c2) & mask) == mask)
2617 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2619 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2620 if (((c1 & ~c2) & mask) != (c1 & mask))
2622 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2623 gen_int_mode (c1 & ~c2, mode));
2624 return simplify_gen_binary (IOR, mode, tem, op1);
2628 /* Convert (A & B) | A to A. */
2629 if (GET_CODE (op0) == AND
2630 && (rtx_equal_p (XEXP (op0, 0), op1)
2631 || rtx_equal_p (XEXP (op0, 1), op1))
2632 && ! side_effects_p (XEXP (op0, 0))
2633 && ! side_effects_p (XEXP (op0, 1)))
2634 return op1;
2636 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2637 mode size to (rotate A CX). */
2639 if (GET_CODE (op1) == ASHIFT
2640 || GET_CODE (op1) == SUBREG)
2642 opleft = op1;
2643 opright = op0;
2645 else
2647 opright = op1;
2648 opleft = op0;
2651 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2652 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2653 && CONST_INT_P (XEXP (opleft, 1))
2654 && CONST_INT_P (XEXP (opright, 1))
2655 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2656 == GET_MODE_PRECISION (mode)))
2657 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2659 /* Same, but for ashift that has been "simplified" to a wider mode
2660 by simplify_shift_const. */
2662 if (GET_CODE (opleft) == SUBREG
2663 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2664 && GET_CODE (opright) == LSHIFTRT
2665 && GET_CODE (XEXP (opright, 0)) == SUBREG
2666 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2667 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2668 && (GET_MODE_SIZE (GET_MODE (opleft))
2669 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2670 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2671 SUBREG_REG (XEXP (opright, 0)))
2672 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2673 && CONST_INT_P (XEXP (opright, 1))
2674 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2675 == GET_MODE_PRECISION (mode)))
2676 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2677 XEXP (SUBREG_REG (opleft), 1));
2679 /* If we have (ior (and (X C1) C2)), simplify this by making
2680 C1 as small as possible if C1 actually changes. */
2681 if (CONST_INT_P (op1)
2682 && (HWI_COMPUTABLE_MODE_P (mode)
2683 || INTVAL (op1) > 0)
2684 && GET_CODE (op0) == AND
2685 && CONST_INT_P (XEXP (op0, 1))
2686 && CONST_INT_P (op1)
2687 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2689 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2690 gen_int_mode (UINTVAL (XEXP (op0, 1))
2691 & ~UINTVAL (op1),
2692 mode));
2693 return simplify_gen_binary (IOR, mode, tmp, op1);
2696 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2697 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2698 the PLUS does not affect any of the bits in OP1: then we can do
2699 the IOR as a PLUS and we can associate. This is valid if OP1
2700 can be safely shifted left C bits. */
2701 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2702 && GET_CODE (XEXP (op0, 0)) == PLUS
2703 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2704 && CONST_INT_P (XEXP (op0, 1))
2705 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2707 int count = INTVAL (XEXP (op0, 1));
2708 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2710 if (mask >> count == INTVAL (trueop1)
2711 && trunc_int_for_mode (mask, mode) == mask
2712 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2713 return simplify_gen_binary (ASHIFTRT, mode,
2714 plus_constant (mode, XEXP (op0, 0),
2715 mask),
2716 XEXP (op0, 1));
2719 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2720 if (tem)
2721 return tem;
2723 tem = simplify_associative_operation (code, mode, op0, op1);
2724 if (tem)
2725 return tem;
2726 break;
2728 case XOR:
2729 if (trueop1 == CONST0_RTX (mode))
2730 return op0;
2731 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2732 return simplify_gen_unary (NOT, mode, op0, mode);
2733 if (rtx_equal_p (trueop0, trueop1)
2734 && ! side_effects_p (op0)
2735 && GET_MODE_CLASS (mode) != MODE_CC)
2736 return CONST0_RTX (mode);
2738 /* Canonicalize XOR of the most significant bit to PLUS. */
2739 if (CONST_SCALAR_INT_P (op1)
2740 && mode_signbit_p (mode, op1))
2741 return simplify_gen_binary (PLUS, mode, op0, op1);
2742 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2743 if (CONST_SCALAR_INT_P (op1)
2744 && GET_CODE (op0) == PLUS
2745 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2746 && mode_signbit_p (mode, XEXP (op0, 1)))
2747 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2748 simplify_gen_binary (XOR, mode, op1,
2749 XEXP (op0, 1)));
2751 /* If we are XORing two things that have no bits in common,
2752 convert them into an IOR. This helps to detect rotation encoded
2753 using those methods and possibly other simplifications. */
2755 if (HWI_COMPUTABLE_MODE_P (mode)
2756 && (nonzero_bits (op0, mode)
2757 & nonzero_bits (op1, mode)) == 0)
2758 return (simplify_gen_binary (IOR, mode, op0, op1));
2760 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2761 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2762 (NOT y). */
2764 int num_negated = 0;
2766 if (GET_CODE (op0) == NOT)
2767 num_negated++, op0 = XEXP (op0, 0);
2768 if (GET_CODE (op1) == NOT)
2769 num_negated++, op1 = XEXP (op1, 0);
2771 if (num_negated == 2)
2772 return simplify_gen_binary (XOR, mode, op0, op1);
2773 else if (num_negated == 1)
2774 return simplify_gen_unary (NOT, mode,
2775 simplify_gen_binary (XOR, mode, op0, op1),
2776 mode);
2779 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2780 correspond to a machine insn or result in further simplifications
2781 if B is a constant. */
2783 if (GET_CODE (op0) == AND
2784 && rtx_equal_p (XEXP (op0, 1), op1)
2785 && ! side_effects_p (op1))
2786 return simplify_gen_binary (AND, mode,
2787 simplify_gen_unary (NOT, mode,
2788 XEXP (op0, 0), mode),
2789 op1);
2791 else if (GET_CODE (op0) == AND
2792 && rtx_equal_p (XEXP (op0, 0), op1)
2793 && ! side_effects_p (op1))
2794 return simplify_gen_binary (AND, mode,
2795 simplify_gen_unary (NOT, mode,
2796 XEXP (op0, 1), mode),
2797 op1);
2799 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2800 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2801 out bits inverted twice and not set by C. Similarly, given
2802 (xor (and (xor A B) C) D), simplify without inverting C in
2803 the xor operand: (xor (and A C) (B&C)^D).
2805 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2806 && GET_CODE (XEXP (op0, 0)) == XOR
2807 && CONST_INT_P (op1)
2808 && CONST_INT_P (XEXP (op0, 1))
2809 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2811 enum rtx_code op = GET_CODE (op0);
2812 rtx a = XEXP (XEXP (op0, 0), 0);
2813 rtx b = XEXP (XEXP (op0, 0), 1);
2814 rtx c = XEXP (op0, 1);
2815 rtx d = op1;
2816 HOST_WIDE_INT bval = INTVAL (b);
2817 HOST_WIDE_INT cval = INTVAL (c);
2818 HOST_WIDE_INT dval = INTVAL (d);
2819 HOST_WIDE_INT xcval;
2821 if (op == IOR)
2822 xcval = ~cval;
2823 else
2824 xcval = cval;
2826 return simplify_gen_binary (XOR, mode,
2827 simplify_gen_binary (op, mode, a, c),
2828 gen_int_mode ((bval & xcval) ^ dval,
2829 mode));
2832 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2833 we can transform like this:
2834 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2835 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2836 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2837 Attempt a few simplifications when B and C are both constants. */
2838 if (GET_CODE (op0) == AND
2839 && CONST_INT_P (op1)
2840 && CONST_INT_P (XEXP (op0, 1)))
2842 rtx a = XEXP (op0, 0);
2843 rtx b = XEXP (op0, 1);
2844 rtx c = op1;
2845 HOST_WIDE_INT bval = INTVAL (b);
2846 HOST_WIDE_INT cval = INTVAL (c);
2848 /* Instead of computing ~A&C, we compute its negated value,
2849 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2850 optimize for sure. If it does not simplify, we still try
2851 to compute ~A&C below, but since that always allocates
2852 RTL, we don't try that before committing to returning a
2853 simplified expression. */
2854 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2855 GEN_INT (~cval));
2857 if ((~cval & bval) == 0)
2859 rtx na_c = NULL_RTX;
2860 if (n_na_c)
2861 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2862 else
2864 /* If ~A does not simplify, don't bother: we don't
2865 want to simplify 2 operations into 3, and if na_c
2866 were to simplify with na, n_na_c would have
2867 simplified as well. */
2868 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2869 if (na)
2870 na_c = simplify_gen_binary (AND, mode, na, c);
2873 /* Try to simplify ~A&C | ~B&C. */
2874 if (na_c != NULL_RTX)
2875 return simplify_gen_binary (IOR, mode, na_c,
2876 gen_int_mode (~bval & cval, mode));
2878 else
2880 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2881 if (n_na_c == CONSTM1_RTX (mode))
2883 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2884 gen_int_mode (~cval & bval,
2885 mode));
2886 return simplify_gen_binary (IOR, mode, a_nc_b,
2887 gen_int_mode (~bval & cval,
2888 mode));
2893 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2894 comparison if STORE_FLAG_VALUE is 1. */
2895 if (STORE_FLAG_VALUE == 1
2896 && trueop1 == const1_rtx
2897 && COMPARISON_P (op0)
2898 && (reversed = reversed_comparison (op0, mode)))
2899 return reversed;
2901 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2902 is (lt foo (const_int 0)), so we can perform the above
2903 simplification if STORE_FLAG_VALUE is 1. */
2905 if (STORE_FLAG_VALUE == 1
2906 && trueop1 == const1_rtx
2907 && GET_CODE (op0) == LSHIFTRT
2908 && CONST_INT_P (XEXP (op0, 1))
2909 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2910 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2912 /* (xor (comparison foo bar) (const_int sign-bit))
2913 when STORE_FLAG_VALUE is the sign bit. */
2914 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2915 && trueop1 == const_true_rtx
2916 && COMPARISON_P (op0)
2917 && (reversed = reversed_comparison (op0, mode)))
2918 return reversed;
2920 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2921 if (tem)
2922 return tem;
2924 tem = simplify_associative_operation (code, mode, op0, op1);
2925 if (tem)
2926 return tem;
2927 break;
2929 case AND:
2930 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2931 return trueop1;
2932 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2933 return op0;
2934 if (HWI_COMPUTABLE_MODE_P (mode))
2936 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2937 HOST_WIDE_INT nzop1;
2938 if (CONST_INT_P (trueop1))
2940 HOST_WIDE_INT val1 = INTVAL (trueop1);
2941 /* If we are turning off bits already known off in OP0, we need
2942 not do an AND. */
2943 if ((nzop0 & ~val1) == 0)
2944 return op0;
2946 nzop1 = nonzero_bits (trueop1, mode);
2947 /* If we are clearing all the nonzero bits, the result is zero. */
2948 if ((nzop1 & nzop0) == 0
2949 && !side_effects_p (op0) && !side_effects_p (op1))
2950 return CONST0_RTX (mode);
2952 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2953 && GET_MODE_CLASS (mode) != MODE_CC)
2954 return op0;
2955 /* A & (~A) -> 0 */
2956 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2957 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2958 && ! side_effects_p (op0)
2959 && GET_MODE_CLASS (mode) != MODE_CC)
2960 return CONST0_RTX (mode);
2962 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2963 there are no nonzero bits of C outside of X's mode. */
2964 if ((GET_CODE (op0) == SIGN_EXTEND
2965 || GET_CODE (op0) == ZERO_EXTEND)
2966 && CONST_INT_P (trueop1)
2967 && HWI_COMPUTABLE_MODE_P (mode)
2968 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2969 & UINTVAL (trueop1)) == 0)
2971 machine_mode imode = GET_MODE (XEXP (op0, 0));
2972 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2973 gen_int_mode (INTVAL (trueop1),
2974 imode));
2975 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2978 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2979 we might be able to further simplify the AND with X and potentially
2980 remove the truncation altogether. */
2981 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2983 rtx x = XEXP (op0, 0);
2984 machine_mode xmode = GET_MODE (x);
2985 tem = simplify_gen_binary (AND, xmode, x,
2986 gen_int_mode (INTVAL (trueop1), xmode));
2987 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2990 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2991 if (GET_CODE (op0) == IOR
2992 && CONST_INT_P (trueop1)
2993 && CONST_INT_P (XEXP (op0, 1)))
2995 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2996 return simplify_gen_binary (IOR, mode,
2997 simplify_gen_binary (AND, mode,
2998 XEXP (op0, 0), op1),
2999 gen_int_mode (tmp, mode));
3002 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3003 insn (and may simplify more). */
3004 if (GET_CODE (op0) == XOR
3005 && rtx_equal_p (XEXP (op0, 0), op1)
3006 && ! side_effects_p (op1))
3007 return simplify_gen_binary (AND, mode,
3008 simplify_gen_unary (NOT, mode,
3009 XEXP (op0, 1), mode),
3010 op1);
3012 if (GET_CODE (op0) == XOR
3013 && rtx_equal_p (XEXP (op0, 1), op1)
3014 && ! side_effects_p (op1))
3015 return simplify_gen_binary (AND, mode,
3016 simplify_gen_unary (NOT, mode,
3017 XEXP (op0, 0), mode),
3018 op1);
3020 /* Similarly for (~(A ^ B)) & A. */
3021 if (GET_CODE (op0) == NOT
3022 && GET_CODE (XEXP (op0, 0)) == XOR
3023 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3024 && ! side_effects_p (op1))
3025 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3027 if (GET_CODE (op0) == NOT
3028 && GET_CODE (XEXP (op0, 0)) == XOR
3029 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3030 && ! side_effects_p (op1))
3031 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3033 /* Convert (A | B) & A to A. */
3034 if (GET_CODE (op0) == IOR
3035 && (rtx_equal_p (XEXP (op0, 0), op1)
3036 || rtx_equal_p (XEXP (op0, 1), op1))
3037 && ! side_effects_p (XEXP (op0, 0))
3038 && ! side_effects_p (XEXP (op0, 1)))
3039 return op1;
3041 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3042 ((A & N) + B) & M -> (A + B) & M
3043 Similarly if (N & M) == 0,
3044 ((A | N) + B) & M -> (A + B) & M
3045 and for - instead of + and/or ^ instead of |.
3046 Also, if (N & M) == 0, then
3047 (A +- N) & M -> A & M. */
3048 if (CONST_INT_P (trueop1)
3049 && HWI_COMPUTABLE_MODE_P (mode)
3050 && ~UINTVAL (trueop1)
3051 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3052 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3054 rtx pmop[2];
3055 int which;
3057 pmop[0] = XEXP (op0, 0);
3058 pmop[1] = XEXP (op0, 1);
3060 if (CONST_INT_P (pmop[1])
3061 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3062 return simplify_gen_binary (AND, mode, pmop[0], op1);
3064 for (which = 0; which < 2; which++)
3066 tem = pmop[which];
3067 switch (GET_CODE (tem))
3069 case AND:
3070 if (CONST_INT_P (XEXP (tem, 1))
3071 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3072 == UINTVAL (trueop1))
3073 pmop[which] = XEXP (tem, 0);
3074 break;
3075 case IOR:
3076 case XOR:
3077 if (CONST_INT_P (XEXP (tem, 1))
3078 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3079 pmop[which] = XEXP (tem, 0);
3080 break;
3081 default:
3082 break;
3086 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3088 tem = simplify_gen_binary (GET_CODE (op0), mode,
3089 pmop[0], pmop[1]);
3090 return simplify_gen_binary (code, mode, tem, op1);
3094 /* (and X (ior (not X) Y) -> (and X Y) */
3095 if (GET_CODE (op1) == IOR
3096 && GET_CODE (XEXP (op1, 0)) == NOT
3097 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3098 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3100 /* (and (ior (not X) Y) X) -> (and X Y) */
3101 if (GET_CODE (op0) == IOR
3102 && GET_CODE (XEXP (op0, 0)) == NOT
3103 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3104 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3106 /* (and X (ior Y (not X)) -> (and X Y) */
3107 if (GET_CODE (op1) == IOR
3108 && GET_CODE (XEXP (op1, 1)) == NOT
3109 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3110 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3112 /* (and (ior Y (not X)) X) -> (and X Y) */
3113 if (GET_CODE (op0) == IOR
3114 && GET_CODE (XEXP (op0, 1)) == NOT
3115 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3116 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3118 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3119 if (tem)
3120 return tem;
3122 tem = simplify_associative_operation (code, mode, op0, op1);
3123 if (tem)
3124 return tem;
3125 break;
3127 case UDIV:
3128 /* 0/x is 0 (or x&0 if x has side-effects). */
3129 if (trueop0 == CONST0_RTX (mode))
3131 if (side_effects_p (op1))
3132 return simplify_gen_binary (AND, mode, op1, trueop0);
3133 return trueop0;
3135 /* x/1 is x. */
3136 if (trueop1 == CONST1_RTX (mode))
3138 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3139 if (tem)
3140 return tem;
3142 /* Convert divide by power of two into shift. */
3143 if (CONST_INT_P (trueop1)
3144 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3145 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3146 break;
3148 case DIV:
3149 /* Handle floating point and integers separately. */
3150 if (SCALAR_FLOAT_MODE_P (mode))
3152 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3153 safe for modes with NaNs, since 0.0 / 0.0 will then be
3154 NaN rather than 0.0. Nor is it safe for modes with signed
3155 zeros, since dividing 0 by a negative number gives -0.0 */
3156 if (trueop0 == CONST0_RTX (mode)
3157 && !HONOR_NANS (mode)
3158 && !HONOR_SIGNED_ZEROS (mode)
3159 && ! side_effects_p (op1))
3160 return op0;
3161 /* x/1.0 is x. */
3162 if (trueop1 == CONST1_RTX (mode)
3163 && !HONOR_SNANS (mode))
3164 return op0;
3166 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3167 && trueop1 != CONST0_RTX (mode))
3169 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3171 /* x/-1.0 is -x. */
3172 if (real_equal (d1, &dconstm1)
3173 && !HONOR_SNANS (mode))
3174 return simplify_gen_unary (NEG, mode, op0, mode);
3176 /* Change FP division by a constant into multiplication.
3177 Only do this with -freciprocal-math. */
3178 if (flag_reciprocal_math
3179 && !real_equal (d1, &dconst0))
3181 REAL_VALUE_TYPE d;
3182 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3183 tem = const_double_from_real_value (d, mode);
3184 return simplify_gen_binary (MULT, mode, op0, tem);
3188 else if (SCALAR_INT_MODE_P (mode))
3190 /* 0/x is 0 (or x&0 if x has side-effects). */
3191 if (trueop0 == CONST0_RTX (mode)
3192 && !cfun->can_throw_non_call_exceptions)
3194 if (side_effects_p (op1))
3195 return simplify_gen_binary (AND, mode, op1, trueop0);
3196 return trueop0;
3198 /* x/1 is x. */
3199 if (trueop1 == CONST1_RTX (mode))
3201 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3202 if (tem)
3203 return tem;
3205 /* x/-1 is -x. */
3206 if (trueop1 == constm1_rtx)
3208 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3209 if (x)
3210 return simplify_gen_unary (NEG, mode, x, mode);
3213 break;
3215 case UMOD:
3216 /* 0%x is 0 (or x&0 if x has side-effects). */
3217 if (trueop0 == CONST0_RTX (mode))
3219 if (side_effects_p (op1))
3220 return simplify_gen_binary (AND, mode, op1, trueop0);
3221 return trueop0;
3223 /* x%1 is 0 (of x&0 if x has side-effects). */
3224 if (trueop1 == CONST1_RTX (mode))
3226 if (side_effects_p (op0))
3227 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3228 return CONST0_RTX (mode);
3230 /* Implement modulus by power of two as AND. */
3231 if (CONST_INT_P (trueop1)
3232 && exact_log2 (UINTVAL (trueop1)) > 0)
3233 return simplify_gen_binary (AND, mode, op0,
3234 gen_int_mode (INTVAL (op1) - 1, mode));
3235 break;
3237 case MOD:
3238 /* 0%x is 0 (or x&0 if x has side-effects). */
3239 if (trueop0 == CONST0_RTX (mode))
3241 if (side_effects_p (op1))
3242 return simplify_gen_binary (AND, mode, op1, trueop0);
3243 return trueop0;
3245 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3246 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3248 if (side_effects_p (op0))
3249 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3250 return CONST0_RTX (mode);
3252 break;
3254 case ROTATERT:
3255 case ROTATE:
3256 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3257 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3258 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3259 amount instead. */
3260 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3261 if (CONST_INT_P (trueop1)
3262 && IN_RANGE (INTVAL (trueop1),
3263 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3264 GET_MODE_PRECISION (mode) - 1))
3265 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3266 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3267 - INTVAL (trueop1)));
3268 #endif
3269 /* FALLTHRU */
3270 case ASHIFTRT:
3271 if (trueop1 == CONST0_RTX (mode))
3272 return op0;
3273 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3274 return op0;
3275 /* Rotating ~0 always results in ~0. */
3276 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3277 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3278 && ! side_effects_p (op1))
3279 return op0;
3280 /* Given:
3281 scalar modes M1, M2
3282 scalar constants c1, c2
3283 size (M2) > size (M1)
3284 c1 == size (M2) - size (M1)
3285 optimize:
3286 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3287 <low_part>)
3288 (const_int <c2>))
3290 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3291 <low_part>). */
3292 if (code == ASHIFTRT
3293 && !VECTOR_MODE_P (mode)
3294 && SUBREG_P (op0)
3295 && CONST_INT_P (op1)
3296 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3297 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3298 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3299 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3300 > GET_MODE_BITSIZE (mode))
3301 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3302 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3303 - GET_MODE_BITSIZE (mode)))
3304 && subreg_lowpart_p (op0))
3306 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3307 + INTVAL (op1));
3308 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3309 tmp = simplify_gen_binary (ASHIFTRT,
3310 GET_MODE (SUBREG_REG (op0)),
3311 XEXP (SUBREG_REG (op0), 0),
3312 tmp);
3313 return lowpart_subreg (mode, tmp, inner_mode);
3315 canonicalize_shift:
3316 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3318 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3319 if (val != INTVAL (op1))
3320 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3322 break;
3324 case ASHIFT:
3325 case SS_ASHIFT:
3326 case US_ASHIFT:
3327 if (trueop1 == CONST0_RTX (mode))
3328 return op0;
3329 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3330 return op0;
3331 goto canonicalize_shift;
3333 case LSHIFTRT:
3334 if (trueop1 == CONST0_RTX (mode))
3335 return op0;
3336 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3337 return op0;
3338 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3339 if (GET_CODE (op0) == CLZ
3340 && CONST_INT_P (trueop1)
3341 && STORE_FLAG_VALUE == 1
3342 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3344 machine_mode imode = GET_MODE (XEXP (op0, 0));
3345 unsigned HOST_WIDE_INT zero_val = 0;
3347 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3348 && zero_val == GET_MODE_PRECISION (imode)
3349 && INTVAL (trueop1) == exact_log2 (zero_val))
3350 return simplify_gen_relational (EQ, mode, imode,
3351 XEXP (op0, 0), const0_rtx);
3353 goto canonicalize_shift;
3355 case SMIN:
3356 if (width <= HOST_BITS_PER_WIDE_INT
3357 && mode_signbit_p (mode, trueop1)
3358 && ! side_effects_p (op0))
3359 return op1;
3360 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3361 return op0;
3362 tem = simplify_associative_operation (code, mode, op0, op1);
3363 if (tem)
3364 return tem;
3365 break;
3367 case SMAX:
3368 if (width <= HOST_BITS_PER_WIDE_INT
3369 && CONST_INT_P (trueop1)
3370 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3371 && ! side_effects_p (op0))
3372 return op1;
3373 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3374 return op0;
3375 tem = simplify_associative_operation (code, mode, op0, op1);
3376 if (tem)
3377 return tem;
3378 break;
3380 case UMIN:
3381 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3382 return op1;
3383 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3384 return op0;
3385 tem = simplify_associative_operation (code, mode, op0, op1);
3386 if (tem)
3387 return tem;
3388 break;
3390 case UMAX:
3391 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3392 return op1;
3393 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3394 return op0;
3395 tem = simplify_associative_operation (code, mode, op0, op1);
3396 if (tem)
3397 return tem;
3398 break;
3400 case SS_PLUS:
3401 case US_PLUS:
3402 case SS_MINUS:
3403 case US_MINUS:
3404 case SS_MULT:
3405 case US_MULT:
3406 case SS_DIV:
3407 case US_DIV:
3408 /* ??? There are simplifications that can be done. */
3409 return 0;
3411 case VEC_SELECT:
3412 if (!VECTOR_MODE_P (mode))
3414 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3415 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3416 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3417 gcc_assert (XVECLEN (trueop1, 0) == 1);
3418 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3420 if (GET_CODE (trueop0) == CONST_VECTOR)
3421 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3422 (trueop1, 0, 0)));
3424 /* Extract a scalar element from a nested VEC_SELECT expression
3425 (with optional nested VEC_CONCAT expression). Some targets
3426 (i386) extract scalar element from a vector using chain of
3427 nested VEC_SELECT expressions. When input operand is a memory
3428 operand, this operation can be simplified to a simple scalar
3429 load from an offseted memory address. */
3430 if (GET_CODE (trueop0) == VEC_SELECT)
3432 rtx op0 = XEXP (trueop0, 0);
3433 rtx op1 = XEXP (trueop0, 1);
3435 machine_mode opmode = GET_MODE (op0);
3436 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3437 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3439 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3440 int elem;
3442 rtvec vec;
3443 rtx tmp_op, tmp;
3445 gcc_assert (GET_CODE (op1) == PARALLEL);
3446 gcc_assert (i < n_elts);
3448 /* Select element, pointed by nested selector. */
3449 elem = INTVAL (XVECEXP (op1, 0, i));
3451 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3452 if (GET_CODE (op0) == VEC_CONCAT)
3454 rtx op00 = XEXP (op0, 0);
3455 rtx op01 = XEXP (op0, 1);
3457 machine_mode mode00, mode01;
3458 int n_elts00, n_elts01;
3460 mode00 = GET_MODE (op00);
3461 mode01 = GET_MODE (op01);
3463 /* Find out number of elements of each operand. */
3464 if (VECTOR_MODE_P (mode00))
3466 elt_size = GET_MODE_UNIT_SIZE (mode00);
3467 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3469 else
3470 n_elts00 = 1;
3472 if (VECTOR_MODE_P (mode01))
3474 elt_size = GET_MODE_UNIT_SIZE (mode01);
3475 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3477 else
3478 n_elts01 = 1;
3480 gcc_assert (n_elts == n_elts00 + n_elts01);
3482 /* Select correct operand of VEC_CONCAT
3483 and adjust selector. */
3484 if (elem < n_elts01)
3485 tmp_op = op00;
3486 else
3488 tmp_op = op01;
3489 elem -= n_elts00;
3492 else
3493 tmp_op = op0;
3495 vec = rtvec_alloc (1);
3496 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3498 tmp = gen_rtx_fmt_ee (code, mode,
3499 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3500 return tmp;
3502 if (GET_CODE (trueop0) == VEC_DUPLICATE
3503 && GET_MODE (XEXP (trueop0, 0)) == mode)
3504 return XEXP (trueop0, 0);
3506 else
3508 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3509 gcc_assert (GET_MODE_INNER (mode)
3510 == GET_MODE_INNER (GET_MODE (trueop0)));
3511 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3513 if (GET_CODE (trueop0) == CONST_VECTOR)
3515 int elt_size = GET_MODE_UNIT_SIZE (mode);
3516 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3517 rtvec v = rtvec_alloc (n_elts);
3518 unsigned int i;
3520 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3521 for (i = 0; i < n_elts; i++)
3523 rtx x = XVECEXP (trueop1, 0, i);
3525 gcc_assert (CONST_INT_P (x));
3526 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3527 INTVAL (x));
3530 return gen_rtx_CONST_VECTOR (mode, v);
3533 /* Recognize the identity. */
3534 if (GET_MODE (trueop0) == mode)
3536 bool maybe_ident = true;
3537 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3539 rtx j = XVECEXP (trueop1, 0, i);
3540 if (!CONST_INT_P (j) || INTVAL (j) != i)
3542 maybe_ident = false;
3543 break;
3546 if (maybe_ident)
3547 return trueop0;
3550 /* If we build {a,b} then permute it, build the result directly. */
3551 if (XVECLEN (trueop1, 0) == 2
3552 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3553 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3554 && GET_CODE (trueop0) == VEC_CONCAT
3555 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3556 && GET_MODE (XEXP (trueop0, 0)) == mode
3557 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3558 && GET_MODE (XEXP (trueop0, 1)) == mode)
3560 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3561 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3562 rtx subop0, subop1;
3564 gcc_assert (i0 < 4 && i1 < 4);
3565 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3566 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3568 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3571 if (XVECLEN (trueop1, 0) == 2
3572 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3573 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3574 && GET_CODE (trueop0) == VEC_CONCAT
3575 && GET_MODE (trueop0) == mode)
3577 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3578 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3579 rtx subop0, subop1;
3581 gcc_assert (i0 < 2 && i1 < 2);
3582 subop0 = XEXP (trueop0, i0);
3583 subop1 = XEXP (trueop0, i1);
3585 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3588 /* If we select one half of a vec_concat, return that. */
3589 if (GET_CODE (trueop0) == VEC_CONCAT
3590 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3592 rtx subop0 = XEXP (trueop0, 0);
3593 rtx subop1 = XEXP (trueop0, 1);
3594 machine_mode mode0 = GET_MODE (subop0);
3595 machine_mode mode1 = GET_MODE (subop1);
3596 int li = GET_MODE_UNIT_SIZE (mode0);
3597 int l0 = GET_MODE_SIZE (mode0) / li;
3598 int l1 = GET_MODE_SIZE (mode1) / li;
3599 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3600 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3602 bool success = true;
3603 for (int i = 1; i < l0; ++i)
3605 rtx j = XVECEXP (trueop1, 0, i);
3606 if (!CONST_INT_P (j) || INTVAL (j) != i)
3608 success = false;
3609 break;
3612 if (success)
3613 return subop0;
3615 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3617 bool success = true;
3618 for (int i = 1; i < l1; ++i)
3620 rtx j = XVECEXP (trueop1, 0, i);
3621 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3623 success = false;
3624 break;
3627 if (success)
3628 return subop1;
3633 if (XVECLEN (trueop1, 0) == 1
3634 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3635 && GET_CODE (trueop0) == VEC_CONCAT)
3637 rtx vec = trueop0;
3638 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3640 /* Try to find the element in the VEC_CONCAT. */
3641 while (GET_MODE (vec) != mode
3642 && GET_CODE (vec) == VEC_CONCAT)
3644 HOST_WIDE_INT vec_size;
3646 if (CONST_INT_P (XEXP (vec, 0)))
3648 /* vec_concat of two const_ints doesn't make sense with
3649 respect to modes. */
3650 if (CONST_INT_P (XEXP (vec, 1)))
3651 return 0;
3653 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3654 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3656 else
3657 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3659 if (offset < vec_size)
3660 vec = XEXP (vec, 0);
3661 else
3663 offset -= vec_size;
3664 vec = XEXP (vec, 1);
3666 vec = avoid_constant_pool_reference (vec);
3669 if (GET_MODE (vec) == mode)
3670 return vec;
3673 /* If we select elements in a vec_merge that all come from the same
3674 operand, select from that operand directly. */
3675 if (GET_CODE (op0) == VEC_MERGE)
3677 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3678 if (CONST_INT_P (trueop02))
3680 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3681 bool all_operand0 = true;
3682 bool all_operand1 = true;
3683 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3685 rtx j = XVECEXP (trueop1, 0, i);
3686 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3687 all_operand1 = false;
3688 else
3689 all_operand0 = false;
3691 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3692 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3693 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3694 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3698 /* If we have two nested selects that are inverses of each
3699 other, replace them with the source operand. */
3700 if (GET_CODE (trueop0) == VEC_SELECT
3701 && GET_MODE (XEXP (trueop0, 0)) == mode)
3703 rtx op0_subop1 = XEXP (trueop0, 1);
3704 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3705 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3707 /* Apply the outer ordering vector to the inner one. (The inner
3708 ordering vector is expressly permitted to be of a different
3709 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3710 then the two VEC_SELECTs cancel. */
3711 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3713 rtx x = XVECEXP (trueop1, 0, i);
3714 if (!CONST_INT_P (x))
3715 return 0;
3716 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3717 if (!CONST_INT_P (y) || i != INTVAL (y))
3718 return 0;
3720 return XEXP (trueop0, 0);
3723 return 0;
3724 case VEC_CONCAT:
3726 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3727 ? GET_MODE (trueop0)
3728 : GET_MODE_INNER (mode));
3729 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3730 ? GET_MODE (trueop1)
3731 : GET_MODE_INNER (mode));
3733 gcc_assert (VECTOR_MODE_P (mode));
3734 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3735 == GET_MODE_SIZE (mode));
3737 if (VECTOR_MODE_P (op0_mode))
3738 gcc_assert (GET_MODE_INNER (mode)
3739 == GET_MODE_INNER (op0_mode));
3740 else
3741 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3743 if (VECTOR_MODE_P (op1_mode))
3744 gcc_assert (GET_MODE_INNER (mode)
3745 == GET_MODE_INNER (op1_mode));
3746 else
3747 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3749 if ((GET_CODE (trueop0) == CONST_VECTOR
3750 || CONST_SCALAR_INT_P (trueop0)
3751 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3752 && (GET_CODE (trueop1) == CONST_VECTOR
3753 || CONST_SCALAR_INT_P (trueop1)
3754 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3756 int elt_size = GET_MODE_UNIT_SIZE (mode);
3757 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3758 rtvec v = rtvec_alloc (n_elts);
3759 unsigned int i;
3760 unsigned in_n_elts = 1;
3762 if (VECTOR_MODE_P (op0_mode))
3763 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3764 for (i = 0; i < n_elts; i++)
3766 if (i < in_n_elts)
3768 if (!VECTOR_MODE_P (op0_mode))
3769 RTVEC_ELT (v, i) = trueop0;
3770 else
3771 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3773 else
3775 if (!VECTOR_MODE_P (op1_mode))
3776 RTVEC_ELT (v, i) = trueop1;
3777 else
3778 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3779 i - in_n_elts);
3783 return gen_rtx_CONST_VECTOR (mode, v);
3786 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3787 Restrict the transformation to avoid generating a VEC_SELECT with a
3788 mode unrelated to its operand. */
3789 if (GET_CODE (trueop0) == VEC_SELECT
3790 && GET_CODE (trueop1) == VEC_SELECT
3791 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3792 && GET_MODE (XEXP (trueop0, 0)) == mode)
3794 rtx par0 = XEXP (trueop0, 1);
3795 rtx par1 = XEXP (trueop1, 1);
3796 int len0 = XVECLEN (par0, 0);
3797 int len1 = XVECLEN (par1, 0);
3798 rtvec vec = rtvec_alloc (len0 + len1);
3799 for (int i = 0; i < len0; i++)
3800 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3801 for (int i = 0; i < len1; i++)
3802 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3803 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3804 gen_rtx_PARALLEL (VOIDmode, vec));
3807 return 0;
3809 default:
3810 gcc_unreachable ();
3813 return 0;
3817 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3818 rtx op0, rtx op1)
3820 unsigned int width = GET_MODE_PRECISION (mode);
3822 if (VECTOR_MODE_P (mode)
3823 && code != VEC_CONCAT
3824 && GET_CODE (op0) == CONST_VECTOR
3825 && GET_CODE (op1) == CONST_VECTOR)
3827 unsigned n_elts = GET_MODE_NUNITS (mode);
3828 machine_mode op0mode = GET_MODE (op0);
3829 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3830 machine_mode op1mode = GET_MODE (op1);
3831 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3832 rtvec v = rtvec_alloc (n_elts);
3833 unsigned int i;
3835 gcc_assert (op0_n_elts == n_elts);
3836 gcc_assert (op1_n_elts == n_elts);
3837 for (i = 0; i < n_elts; i++)
3839 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3840 CONST_VECTOR_ELT (op0, i),
3841 CONST_VECTOR_ELT (op1, i));
3842 if (!x)
3843 return 0;
3844 RTVEC_ELT (v, i) = x;
3847 return gen_rtx_CONST_VECTOR (mode, v);
3850 if (VECTOR_MODE_P (mode)
3851 && code == VEC_CONCAT
3852 && (CONST_SCALAR_INT_P (op0)
3853 || GET_CODE (op0) == CONST_FIXED
3854 || CONST_DOUBLE_AS_FLOAT_P (op0))
3855 && (CONST_SCALAR_INT_P (op1)
3856 || CONST_DOUBLE_AS_FLOAT_P (op1)
3857 || GET_CODE (op1) == CONST_FIXED))
3859 unsigned n_elts = GET_MODE_NUNITS (mode);
3860 rtvec v = rtvec_alloc (n_elts);
3862 gcc_assert (n_elts >= 2);
3863 if (n_elts == 2)
3865 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3866 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3868 RTVEC_ELT (v, 0) = op0;
3869 RTVEC_ELT (v, 1) = op1;
3871 else
3873 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3874 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3875 unsigned i;
3877 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3878 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3879 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3881 for (i = 0; i < op0_n_elts; ++i)
3882 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3883 for (i = 0; i < op1_n_elts; ++i)
3884 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3887 return gen_rtx_CONST_VECTOR (mode, v);
3890 if (SCALAR_FLOAT_MODE_P (mode)
3891 && CONST_DOUBLE_AS_FLOAT_P (op0)
3892 && CONST_DOUBLE_AS_FLOAT_P (op1)
3893 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3895 if (code == AND
3896 || code == IOR
3897 || code == XOR)
3899 long tmp0[4];
3900 long tmp1[4];
3901 REAL_VALUE_TYPE r;
3902 int i;
3904 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3905 GET_MODE (op0));
3906 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3907 GET_MODE (op1));
3908 for (i = 0; i < 4; i++)
3910 switch (code)
3912 case AND:
3913 tmp0[i] &= tmp1[i];
3914 break;
3915 case IOR:
3916 tmp0[i] |= tmp1[i];
3917 break;
3918 case XOR:
3919 tmp0[i] ^= tmp1[i];
3920 break;
3921 default:
3922 gcc_unreachable ();
3925 real_from_target (&r, tmp0, mode);
3926 return const_double_from_real_value (r, mode);
3928 else
3930 REAL_VALUE_TYPE f0, f1, value, result;
3931 const REAL_VALUE_TYPE *opr0, *opr1;
3932 bool inexact;
3934 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
3935 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
3937 if (HONOR_SNANS (mode)
3938 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
3939 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
3940 return 0;
3942 real_convert (&f0, mode, opr0);
3943 real_convert (&f1, mode, opr1);
3945 if (code == DIV
3946 && real_equal (&f1, &dconst0)
3947 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3948 return 0;
3950 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3951 && flag_trapping_math
3952 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3954 int s0 = REAL_VALUE_NEGATIVE (f0);
3955 int s1 = REAL_VALUE_NEGATIVE (f1);
3957 switch (code)
3959 case PLUS:
3960 /* Inf + -Inf = NaN plus exception. */
3961 if (s0 != s1)
3962 return 0;
3963 break;
3964 case MINUS:
3965 /* Inf - Inf = NaN plus exception. */
3966 if (s0 == s1)
3967 return 0;
3968 break;
3969 case DIV:
3970 /* Inf / Inf = NaN plus exception. */
3971 return 0;
3972 default:
3973 break;
3977 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3978 && flag_trapping_math
3979 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
3980 || (REAL_VALUE_ISINF (f1)
3981 && real_equal (&f0, &dconst0))))
3982 /* Inf * 0 = NaN plus exception. */
3983 return 0;
3985 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3986 &f0, &f1);
3987 real_convert (&result, mode, &value);
3989 /* Don't constant fold this floating point operation if
3990 the result has overflowed and flag_trapping_math. */
3992 if (flag_trapping_math
3993 && MODE_HAS_INFINITIES (mode)
3994 && REAL_VALUE_ISINF (result)
3995 && !REAL_VALUE_ISINF (f0)
3996 && !REAL_VALUE_ISINF (f1))
3997 /* Overflow plus exception. */
3998 return 0;
4000 /* Don't constant fold this floating point operation if the
4001 result may dependent upon the run-time rounding mode and
4002 flag_rounding_math is set, or if GCC's software emulation
4003 is unable to accurately represent the result. */
4005 if ((flag_rounding_math
4006 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4007 && (inexact || !real_identical (&result, &value)))
4008 return NULL_RTX;
4010 return const_double_from_real_value (result, mode);
4014 /* We can fold some multi-word operations. */
4015 if ((GET_MODE_CLASS (mode) == MODE_INT
4016 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
4017 && CONST_SCALAR_INT_P (op0)
4018 && CONST_SCALAR_INT_P (op1))
4020 wide_int result;
4021 bool overflow;
4022 rtx_mode_t pop0 = std::make_pair (op0, mode);
4023 rtx_mode_t pop1 = std::make_pair (op1, mode);
4025 #if TARGET_SUPPORTS_WIDE_INT == 0
4026 /* This assert keeps the simplification from producing a result
4027 that cannot be represented in a CONST_DOUBLE but a lot of
4028 upstream callers expect that this function never fails to
4029 simplify something and so you if you added this to the test
4030 above the code would die later anyway. If this assert
4031 happens, you just need to make the port support wide int. */
4032 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
4033 #endif
4034 switch (code)
4036 case MINUS:
4037 result = wi::sub (pop0, pop1);
4038 break;
4040 case PLUS:
4041 result = wi::add (pop0, pop1);
4042 break;
4044 case MULT:
4045 result = wi::mul (pop0, pop1);
4046 break;
4048 case DIV:
4049 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4050 if (overflow)
4051 return NULL_RTX;
4052 break;
4054 case MOD:
4055 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4056 if (overflow)
4057 return NULL_RTX;
4058 break;
4060 case UDIV:
4061 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4062 if (overflow)
4063 return NULL_RTX;
4064 break;
4066 case UMOD:
4067 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4068 if (overflow)
4069 return NULL_RTX;
4070 break;
4072 case AND:
4073 result = wi::bit_and (pop0, pop1);
4074 break;
4076 case IOR:
4077 result = wi::bit_or (pop0, pop1);
4078 break;
4080 case XOR:
4081 result = wi::bit_xor (pop0, pop1);
4082 break;
4084 case SMIN:
4085 result = wi::smin (pop0, pop1);
4086 break;
4088 case SMAX:
4089 result = wi::smax (pop0, pop1);
4090 break;
4092 case UMIN:
4093 result = wi::umin (pop0, pop1);
4094 break;
4096 case UMAX:
4097 result = wi::umax (pop0, pop1);
4098 break;
4100 case LSHIFTRT:
4101 case ASHIFTRT:
4102 case ASHIFT:
4104 wide_int wop1 = pop1;
4105 if (SHIFT_COUNT_TRUNCATED)
4106 wop1 = wi::umod_trunc (wop1, width);
4107 else if (wi::geu_p (wop1, width))
4108 return NULL_RTX;
4110 switch (code)
4112 case LSHIFTRT:
4113 result = wi::lrshift (pop0, wop1);
4114 break;
4116 case ASHIFTRT:
4117 result = wi::arshift (pop0, wop1);
4118 break;
4120 case ASHIFT:
4121 result = wi::lshift (pop0, wop1);
4122 break;
4124 default:
4125 gcc_unreachable ();
4127 break;
4129 case ROTATE:
4130 case ROTATERT:
4132 if (wi::neg_p (pop1))
4133 return NULL_RTX;
4135 switch (code)
4137 case ROTATE:
4138 result = wi::lrotate (pop0, pop1);
4139 break;
4141 case ROTATERT:
4142 result = wi::rrotate (pop0, pop1);
4143 break;
4145 default:
4146 gcc_unreachable ();
4148 break;
4150 default:
4151 return NULL_RTX;
4153 return immed_wide_int_const (result, mode);
4156 return NULL_RTX;
4161 /* Return a positive integer if X should sort after Y. The value
4162 returned is 1 if and only if X and Y are both regs. */
4164 static int
4165 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4167 int result;
4169 result = (commutative_operand_precedence (y)
4170 - commutative_operand_precedence (x));
4171 if (result)
4172 return result + result;
4174 /* Group together equal REGs to do more simplification. */
4175 if (REG_P (x) && REG_P (y))
4176 return REGNO (x) > REGNO (y);
4178 return 0;
4181 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4182 operands may be another PLUS or MINUS.
4184 Rather than test for specific case, we do this by a brute-force method
4185 and do all possible simplifications until no more changes occur. Then
4186 we rebuild the operation.
4188 May return NULL_RTX when no changes were made. */
4190 static rtx
4191 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4192 rtx op1)
4194 struct simplify_plus_minus_op_data
4196 rtx op;
4197 short neg;
4198 } ops[16];
4199 rtx result, tem;
4200 int n_ops = 2;
4201 int changed, n_constants, canonicalized = 0;
4202 int i, j;
4204 memset (ops, 0, sizeof ops);
4206 /* Set up the two operands and then expand them until nothing has been
4207 changed. If we run out of room in our array, give up; this should
4208 almost never happen. */
4210 ops[0].op = op0;
4211 ops[0].neg = 0;
4212 ops[1].op = op1;
4213 ops[1].neg = (code == MINUS);
4217 changed = 0;
4218 n_constants = 0;
4220 for (i = 0; i < n_ops; i++)
4222 rtx this_op = ops[i].op;
4223 int this_neg = ops[i].neg;
4224 enum rtx_code this_code = GET_CODE (this_op);
4226 switch (this_code)
4228 case PLUS:
4229 case MINUS:
4230 if (n_ops == ARRAY_SIZE (ops))
4231 return NULL_RTX;
4233 ops[n_ops].op = XEXP (this_op, 1);
4234 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4235 n_ops++;
4237 ops[i].op = XEXP (this_op, 0);
4238 changed = 1;
4239 /* If this operand was negated then we will potentially
4240 canonicalize the expression. Similarly if we don't
4241 place the operands adjacent we're re-ordering the
4242 expression and thus might be performing a
4243 canonicalization. Ignore register re-ordering.
4244 ??? It might be better to shuffle the ops array here,
4245 but then (plus (plus (A, B), plus (C, D))) wouldn't
4246 be seen as non-canonical. */
4247 if (this_neg
4248 || (i != n_ops - 2
4249 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4250 canonicalized = 1;
4251 break;
4253 case NEG:
4254 ops[i].op = XEXP (this_op, 0);
4255 ops[i].neg = ! this_neg;
4256 changed = 1;
4257 canonicalized = 1;
4258 break;
4260 case CONST:
4261 if (n_ops != ARRAY_SIZE (ops)
4262 && GET_CODE (XEXP (this_op, 0)) == PLUS
4263 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4264 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4266 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4267 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4268 ops[n_ops].neg = this_neg;
4269 n_ops++;
4270 changed = 1;
4271 canonicalized = 1;
4273 break;
4275 case NOT:
4276 /* ~a -> (-a - 1) */
4277 if (n_ops != ARRAY_SIZE (ops))
4279 ops[n_ops].op = CONSTM1_RTX (mode);
4280 ops[n_ops++].neg = this_neg;
4281 ops[i].op = XEXP (this_op, 0);
4282 ops[i].neg = !this_neg;
4283 changed = 1;
4284 canonicalized = 1;
4286 break;
4288 case CONST_INT:
4289 n_constants++;
4290 if (this_neg)
4292 ops[i].op = neg_const_int (mode, this_op);
4293 ops[i].neg = 0;
4294 changed = 1;
4295 canonicalized = 1;
4297 break;
4299 default:
4300 break;
4304 while (changed);
4306 if (n_constants > 1)
4307 canonicalized = 1;
4309 gcc_assert (n_ops >= 2);
4311 /* If we only have two operands, we can avoid the loops. */
4312 if (n_ops == 2)
4314 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4315 rtx lhs, rhs;
4317 /* Get the two operands. Be careful with the order, especially for
4318 the cases where code == MINUS. */
4319 if (ops[0].neg && ops[1].neg)
4321 lhs = gen_rtx_NEG (mode, ops[0].op);
4322 rhs = ops[1].op;
4324 else if (ops[0].neg)
4326 lhs = ops[1].op;
4327 rhs = ops[0].op;
4329 else
4331 lhs = ops[0].op;
4332 rhs = ops[1].op;
4335 return simplify_const_binary_operation (code, mode, lhs, rhs);
4338 /* Now simplify each pair of operands until nothing changes. */
4339 while (1)
4341 /* Insertion sort is good enough for a small array. */
4342 for (i = 1; i < n_ops; i++)
4344 struct simplify_plus_minus_op_data save;
4345 int cmp;
4347 j = i - 1;
4348 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4349 if (cmp <= 0)
4350 continue;
4351 /* Just swapping registers doesn't count as canonicalization. */
4352 if (cmp != 1)
4353 canonicalized = 1;
4355 save = ops[i];
4357 ops[j + 1] = ops[j];
4358 while (j--
4359 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4360 ops[j + 1] = save;
4363 changed = 0;
4364 for (i = n_ops - 1; i > 0; i--)
4365 for (j = i - 1; j >= 0; j--)
4367 rtx lhs = ops[j].op, rhs = ops[i].op;
4368 int lneg = ops[j].neg, rneg = ops[i].neg;
4370 if (lhs != 0 && rhs != 0)
4372 enum rtx_code ncode = PLUS;
4374 if (lneg != rneg)
4376 ncode = MINUS;
4377 if (lneg)
4378 std::swap (lhs, rhs);
4380 else if (swap_commutative_operands_p (lhs, rhs))
4381 std::swap (lhs, rhs);
4383 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4384 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4386 rtx tem_lhs, tem_rhs;
4388 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4389 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4390 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4391 tem_rhs);
4393 if (tem && !CONSTANT_P (tem))
4394 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4396 else
4397 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4399 if (tem)
4401 /* Reject "simplifications" that just wrap the two
4402 arguments in a CONST. Failure to do so can result
4403 in infinite recursion with simplify_binary_operation
4404 when it calls us to simplify CONST operations.
4405 Also, if we find such a simplification, don't try
4406 any more combinations with this rhs: We must have
4407 something like symbol+offset, ie. one of the
4408 trivial CONST expressions we handle later. */
4409 if (GET_CODE (tem) == CONST
4410 && GET_CODE (XEXP (tem, 0)) == ncode
4411 && XEXP (XEXP (tem, 0), 0) == lhs
4412 && XEXP (XEXP (tem, 0), 1) == rhs)
4413 break;
4414 lneg &= rneg;
4415 if (GET_CODE (tem) == NEG)
4416 tem = XEXP (tem, 0), lneg = !lneg;
4417 if (CONST_INT_P (tem) && lneg)
4418 tem = neg_const_int (mode, tem), lneg = 0;
4420 ops[i].op = tem;
4421 ops[i].neg = lneg;
4422 ops[j].op = NULL_RTX;
4423 changed = 1;
4424 canonicalized = 1;
4429 if (!changed)
4430 break;
4432 /* Pack all the operands to the lower-numbered entries. */
4433 for (i = 0, j = 0; j < n_ops; j++)
4434 if (ops[j].op)
4436 ops[i] = ops[j];
4437 i++;
4439 n_ops = i;
4442 /* If nothing changed, check that rematerialization of rtl instructions
4443 is still required. */
4444 if (!canonicalized)
4446 /* Perform rematerialization if only all operands are registers and
4447 all operations are PLUS. */
4448 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4449 around rs6000 and how it uses the CA register. See PR67145. */
4450 for (i = 0; i < n_ops; i++)
4451 if (ops[i].neg
4452 || !REG_P (ops[i].op)
4453 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4454 && fixed_regs[REGNO (ops[i].op)]
4455 && !global_regs[REGNO (ops[i].op)]
4456 && ops[i].op != frame_pointer_rtx
4457 && ops[i].op != arg_pointer_rtx
4458 && ops[i].op != stack_pointer_rtx))
4459 return NULL_RTX;
4460 goto gen_result;
4463 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4464 if (n_ops == 2
4465 && CONST_INT_P (ops[1].op)
4466 && CONSTANT_P (ops[0].op)
4467 && ops[0].neg)
4468 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4470 /* We suppressed creation of trivial CONST expressions in the
4471 combination loop to avoid recursion. Create one manually now.
4472 The combination loop should have ensured that there is exactly
4473 one CONST_INT, and the sort will have ensured that it is last
4474 in the array and that any other constant will be next-to-last. */
4476 if (n_ops > 1
4477 && CONST_INT_P (ops[n_ops - 1].op)
4478 && CONSTANT_P (ops[n_ops - 2].op))
4480 rtx value = ops[n_ops - 1].op;
4481 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4482 value = neg_const_int (mode, value);
4483 if (CONST_INT_P (value))
4485 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4486 INTVAL (value));
4487 n_ops--;
4491 /* Put a non-negated operand first, if possible. */
4493 for (i = 0; i < n_ops && ops[i].neg; i++)
4494 continue;
4495 if (i == n_ops)
4496 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4497 else if (i != 0)
4499 tem = ops[0].op;
4500 ops[0] = ops[i];
4501 ops[i].op = tem;
4502 ops[i].neg = 1;
4505 /* Now make the result by performing the requested operations. */
4506 gen_result:
4507 result = ops[0].op;
4508 for (i = 1; i < n_ops; i++)
4509 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4510 mode, result, ops[i].op);
4512 return result;
4515 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4516 static bool
4517 plus_minus_operand_p (const_rtx x)
4519 return GET_CODE (x) == PLUS
4520 || GET_CODE (x) == MINUS
4521 || (GET_CODE (x) == CONST
4522 && GET_CODE (XEXP (x, 0)) == PLUS
4523 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4524 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4527 /* Like simplify_binary_operation except used for relational operators.
4528 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4529 not also be VOIDmode.
4531 CMP_MODE specifies in which mode the comparison is done in, so it is
4532 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4533 the operands or, if both are VOIDmode, the operands are compared in
4534 "infinite precision". */
4536 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4537 machine_mode cmp_mode, rtx op0, rtx op1)
4539 rtx tem, trueop0, trueop1;
4541 if (cmp_mode == VOIDmode)
4542 cmp_mode = GET_MODE (op0);
4543 if (cmp_mode == VOIDmode)
4544 cmp_mode = GET_MODE (op1);
4546 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4547 if (tem)
4549 if (SCALAR_FLOAT_MODE_P (mode))
4551 if (tem == const0_rtx)
4552 return CONST0_RTX (mode);
4553 #ifdef FLOAT_STORE_FLAG_VALUE
4555 REAL_VALUE_TYPE val;
4556 val = FLOAT_STORE_FLAG_VALUE (mode);
4557 return const_double_from_real_value (val, mode);
4559 #else
4560 return NULL_RTX;
4561 #endif
4563 if (VECTOR_MODE_P (mode))
4565 if (tem == const0_rtx)
4566 return CONST0_RTX (mode);
4567 #ifdef VECTOR_STORE_FLAG_VALUE
4569 int i, units;
4570 rtvec v;
4572 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4573 if (val == NULL_RTX)
4574 return NULL_RTX;
4575 if (val == const1_rtx)
4576 return CONST1_RTX (mode);
4578 units = GET_MODE_NUNITS (mode);
4579 v = rtvec_alloc (units);
4580 for (i = 0; i < units; i++)
4581 RTVEC_ELT (v, i) = val;
4582 return gen_rtx_raw_CONST_VECTOR (mode, v);
4584 #else
4585 return NULL_RTX;
4586 #endif
4589 return tem;
4592 /* For the following tests, ensure const0_rtx is op1. */
4593 if (swap_commutative_operands_p (op0, op1)
4594 || (op0 == const0_rtx && op1 != const0_rtx))
4595 std::swap (op0, op1), code = swap_condition (code);
4597 /* If op0 is a compare, extract the comparison arguments from it. */
4598 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4599 return simplify_gen_relational (code, mode, VOIDmode,
4600 XEXP (op0, 0), XEXP (op0, 1));
4602 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4603 || CC0_P (op0))
4604 return NULL_RTX;
4606 trueop0 = avoid_constant_pool_reference (op0);
4607 trueop1 = avoid_constant_pool_reference (op1);
4608 return simplify_relational_operation_1 (code, mode, cmp_mode,
4609 trueop0, trueop1);
4612 /* This part of simplify_relational_operation is only used when CMP_MODE
4613 is not in class MODE_CC (i.e. it is a real comparison).
4615 MODE is the mode of the result, while CMP_MODE specifies in which
4616 mode the comparison is done in, so it is the mode of the operands. */
4618 static rtx
4619 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4620 machine_mode cmp_mode, rtx op0, rtx op1)
4622 enum rtx_code op0code = GET_CODE (op0);
4624 if (op1 == const0_rtx && COMPARISON_P (op0))
4626 /* If op0 is a comparison, extract the comparison arguments
4627 from it. */
4628 if (code == NE)
4630 if (GET_MODE (op0) == mode)
4631 return simplify_rtx (op0);
4632 else
4633 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4634 XEXP (op0, 0), XEXP (op0, 1));
4636 else if (code == EQ)
4638 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4639 if (new_code != UNKNOWN)
4640 return simplify_gen_relational (new_code, mode, VOIDmode,
4641 XEXP (op0, 0), XEXP (op0, 1));
4645 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4646 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4647 if ((code == LTU || code == GEU)
4648 && GET_CODE (op0) == PLUS
4649 && CONST_INT_P (XEXP (op0, 1))
4650 && (rtx_equal_p (op1, XEXP (op0, 0))
4651 || rtx_equal_p (op1, XEXP (op0, 1)))
4652 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4653 && XEXP (op0, 1) != const0_rtx)
4655 rtx new_cmp
4656 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4657 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4658 cmp_mode, XEXP (op0, 0), new_cmp);
4661 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4662 if ((code == LTU || code == GEU)
4663 && GET_CODE (op0) == PLUS
4664 && rtx_equal_p (op1, XEXP (op0, 1))
4665 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4666 && !rtx_equal_p (op1, XEXP (op0, 0)))
4667 return simplify_gen_relational (code, mode, cmp_mode, op0,
4668 copy_rtx (XEXP (op0, 0)));
4670 if (op1 == const0_rtx)
4672 /* Canonicalize (GTU x 0) as (NE x 0). */
4673 if (code == GTU)
4674 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4675 /* Canonicalize (LEU x 0) as (EQ x 0). */
4676 if (code == LEU)
4677 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4679 else if (op1 == const1_rtx)
4681 switch (code)
4683 case GE:
4684 /* Canonicalize (GE x 1) as (GT x 0). */
4685 return simplify_gen_relational (GT, mode, cmp_mode,
4686 op0, const0_rtx);
4687 case GEU:
4688 /* Canonicalize (GEU x 1) as (NE x 0). */
4689 return simplify_gen_relational (NE, mode, cmp_mode,
4690 op0, const0_rtx);
4691 case LT:
4692 /* Canonicalize (LT x 1) as (LE x 0). */
4693 return simplify_gen_relational (LE, mode, cmp_mode,
4694 op0, const0_rtx);
4695 case LTU:
4696 /* Canonicalize (LTU x 1) as (EQ x 0). */
4697 return simplify_gen_relational (EQ, mode, cmp_mode,
4698 op0, const0_rtx);
4699 default:
4700 break;
4703 else if (op1 == constm1_rtx)
4705 /* Canonicalize (LE x -1) as (LT x 0). */
4706 if (code == LE)
4707 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4708 /* Canonicalize (GT x -1) as (GE x 0). */
4709 if (code == GT)
4710 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4713 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4714 if ((code == EQ || code == NE)
4715 && (op0code == PLUS || op0code == MINUS)
4716 && CONSTANT_P (op1)
4717 && CONSTANT_P (XEXP (op0, 1))
4718 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4720 rtx x = XEXP (op0, 0);
4721 rtx c = XEXP (op0, 1);
4722 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4723 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4725 /* Detect an infinite recursive condition, where we oscillate at this
4726 simplification case between:
4727 A + B == C <---> C - B == A,
4728 where A, B, and C are all constants with non-simplifiable expressions,
4729 usually SYMBOL_REFs. */
4730 if (GET_CODE (tem) == invcode
4731 && CONSTANT_P (x)
4732 && rtx_equal_p (c, XEXP (tem, 1)))
4733 return NULL_RTX;
4735 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4738 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4739 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4740 if (code == NE
4741 && op1 == const0_rtx
4742 && GET_MODE_CLASS (mode) == MODE_INT
4743 && cmp_mode != VOIDmode
4744 /* ??? Work-around BImode bugs in the ia64 backend. */
4745 && mode != BImode
4746 && cmp_mode != BImode
4747 && nonzero_bits (op0, cmp_mode) == 1
4748 && STORE_FLAG_VALUE == 1)
4749 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4750 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4751 : lowpart_subreg (mode, op0, cmp_mode);
4753 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4754 if ((code == EQ || code == NE)
4755 && op1 == const0_rtx
4756 && op0code == XOR)
4757 return simplify_gen_relational (code, mode, cmp_mode,
4758 XEXP (op0, 0), XEXP (op0, 1));
4760 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4761 if ((code == EQ || code == NE)
4762 && op0code == XOR
4763 && rtx_equal_p (XEXP (op0, 0), op1)
4764 && !side_effects_p (XEXP (op0, 0)))
4765 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4766 CONST0_RTX (mode));
4768 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4769 if ((code == EQ || code == NE)
4770 && op0code == XOR
4771 && rtx_equal_p (XEXP (op0, 1), op1)
4772 && !side_effects_p (XEXP (op0, 1)))
4773 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4774 CONST0_RTX (mode));
4776 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4777 if ((code == EQ || code == NE)
4778 && op0code == XOR
4779 && CONST_SCALAR_INT_P (op1)
4780 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4781 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4782 simplify_gen_binary (XOR, cmp_mode,
4783 XEXP (op0, 1), op1));
4785 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4786 can be implemented with a BICS instruction on some targets, or
4787 constant-folded if y is a constant. */
4788 if ((code == EQ || code == NE)
4789 && op0code == AND
4790 && rtx_equal_p (XEXP (op0, 0), op1)
4791 && !side_effects_p (op1)
4792 && op1 != CONST0_RTX (cmp_mode))
4794 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4795 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4797 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4798 CONST0_RTX (cmp_mode));
4801 /* Likewise for (eq/ne (and x y) y). */
4802 if ((code == EQ || code == NE)
4803 && op0code == AND
4804 && rtx_equal_p (XEXP (op0, 1), op1)
4805 && !side_effects_p (op1)
4806 && op1 != CONST0_RTX (cmp_mode))
4808 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4809 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4811 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4812 CONST0_RTX (cmp_mode));
4815 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4816 if ((code == EQ || code == NE)
4817 && GET_CODE (op0) == BSWAP
4818 && CONST_SCALAR_INT_P (op1))
4819 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4820 simplify_gen_unary (BSWAP, cmp_mode,
4821 op1, cmp_mode));
4823 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4824 if ((code == EQ || code == NE)
4825 && GET_CODE (op0) == BSWAP
4826 && GET_CODE (op1) == BSWAP)
4827 return simplify_gen_relational (code, mode, cmp_mode,
4828 XEXP (op0, 0), XEXP (op1, 0));
4830 if (op0code == POPCOUNT && op1 == const0_rtx)
4831 switch (code)
4833 case EQ:
4834 case LE:
4835 case LEU:
4836 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4837 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4838 XEXP (op0, 0), const0_rtx);
4840 case NE:
4841 case GT:
4842 case GTU:
4843 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4844 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4845 XEXP (op0, 0), const0_rtx);
4847 default:
4848 break;
4851 return NULL_RTX;
4854 enum
4856 CMP_EQ = 1,
4857 CMP_LT = 2,
4858 CMP_GT = 4,
4859 CMP_LTU = 8,
4860 CMP_GTU = 16
4864 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4865 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4866 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4867 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4868 For floating-point comparisons, assume that the operands were ordered. */
4870 static rtx
4871 comparison_result (enum rtx_code code, int known_results)
4873 switch (code)
4875 case EQ:
4876 case UNEQ:
4877 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4878 case NE:
4879 case LTGT:
4880 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4882 case LT:
4883 case UNLT:
4884 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4885 case GE:
4886 case UNGE:
4887 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4889 case GT:
4890 case UNGT:
4891 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4892 case LE:
4893 case UNLE:
4894 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4896 case LTU:
4897 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4898 case GEU:
4899 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4901 case GTU:
4902 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4903 case LEU:
4904 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4906 case ORDERED:
4907 return const_true_rtx;
4908 case UNORDERED:
4909 return const0_rtx;
4910 default:
4911 gcc_unreachable ();
4915 /* Check if the given comparison (done in the given MODE) is actually
4916 a tautology or a contradiction. If the mode is VOID_mode, the
4917 comparison is done in "infinite precision". If no simplification
4918 is possible, this function returns zero. Otherwise, it returns
4919 either const_true_rtx or const0_rtx. */
4922 simplify_const_relational_operation (enum rtx_code code,
4923 machine_mode mode,
4924 rtx op0, rtx op1)
4926 rtx tem;
4927 rtx trueop0;
4928 rtx trueop1;
4930 gcc_assert (mode != VOIDmode
4931 || (GET_MODE (op0) == VOIDmode
4932 && GET_MODE (op1) == VOIDmode));
4934 /* If op0 is a compare, extract the comparison arguments from it. */
4935 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4937 op1 = XEXP (op0, 1);
4938 op0 = XEXP (op0, 0);
4940 if (GET_MODE (op0) != VOIDmode)
4941 mode = GET_MODE (op0);
4942 else if (GET_MODE (op1) != VOIDmode)
4943 mode = GET_MODE (op1);
4944 else
4945 return 0;
4948 /* We can't simplify MODE_CC values since we don't know what the
4949 actual comparison is. */
4950 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4951 return 0;
4953 /* Make sure the constant is second. */
4954 if (swap_commutative_operands_p (op0, op1))
4956 std::swap (op0, op1);
4957 code = swap_condition (code);
4960 trueop0 = avoid_constant_pool_reference (op0);
4961 trueop1 = avoid_constant_pool_reference (op1);
4963 /* For integer comparisons of A and B maybe we can simplify A - B and can
4964 then simplify a comparison of that with zero. If A and B are both either
4965 a register or a CONST_INT, this can't help; testing for these cases will
4966 prevent infinite recursion here and speed things up.
4968 We can only do this for EQ and NE comparisons as otherwise we may
4969 lose or introduce overflow which we cannot disregard as undefined as
4970 we do not know the signedness of the operation on either the left or
4971 the right hand side of the comparison. */
4973 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4974 && (code == EQ || code == NE)
4975 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4976 && (REG_P (op1) || CONST_INT_P (trueop1)))
4977 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4978 /* We cannot do this if tem is a nonzero address. */
4979 && ! nonzero_address_p (tem))
4980 return simplify_const_relational_operation (signed_condition (code),
4981 mode, tem, const0_rtx);
4983 if (! HONOR_NANS (mode) && code == ORDERED)
4984 return const_true_rtx;
4986 if (! HONOR_NANS (mode) && code == UNORDERED)
4987 return const0_rtx;
4989 /* For modes without NaNs, if the two operands are equal, we know the
4990 result except if they have side-effects. Even with NaNs we know
4991 the result of unordered comparisons and, if signaling NaNs are
4992 irrelevant, also the result of LT/GT/LTGT. */
4993 if ((! HONOR_NANS (trueop0)
4994 || code == UNEQ || code == UNLE || code == UNGE
4995 || ((code == LT || code == GT || code == LTGT)
4996 && ! HONOR_SNANS (trueop0)))
4997 && rtx_equal_p (trueop0, trueop1)
4998 && ! side_effects_p (trueop0))
4999 return comparison_result (code, CMP_EQ);
5001 /* If the operands are floating-point constants, see if we can fold
5002 the result. */
5003 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5004 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5005 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5007 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5008 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5010 /* Comparisons are unordered iff at least one of the values is NaN. */
5011 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5012 switch (code)
5014 case UNEQ:
5015 case UNLT:
5016 case UNGT:
5017 case UNLE:
5018 case UNGE:
5019 case NE:
5020 case UNORDERED:
5021 return const_true_rtx;
5022 case EQ:
5023 case LT:
5024 case GT:
5025 case LE:
5026 case GE:
5027 case LTGT:
5028 case ORDERED:
5029 return const0_rtx;
5030 default:
5031 return 0;
5034 return comparison_result (code,
5035 (real_equal (d0, d1) ? CMP_EQ :
5036 real_less (d0, d1) ? CMP_LT : CMP_GT));
5039 /* Otherwise, see if the operands are both integers. */
5040 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5041 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5043 /* It would be nice if we really had a mode here. However, the
5044 largest int representable on the target is as good as
5045 infinite. */
5046 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5047 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
5048 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
5050 if (wi::eq_p (ptrueop0, ptrueop1))
5051 return comparison_result (code, CMP_EQ);
5052 else
5054 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5055 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5056 return comparison_result (code, cr);
5060 /* Optimize comparisons with upper and lower bounds. */
5061 if (HWI_COMPUTABLE_MODE_P (mode)
5062 && CONST_INT_P (trueop1)
5063 && !side_effects_p (trueop0))
5065 int sign;
5066 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5067 HOST_WIDE_INT val = INTVAL (trueop1);
5068 HOST_WIDE_INT mmin, mmax;
5070 if (code == GEU
5071 || code == LEU
5072 || code == GTU
5073 || code == LTU)
5074 sign = 0;
5075 else
5076 sign = 1;
5078 /* Get a reduced range if the sign bit is zero. */
5079 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5081 mmin = 0;
5082 mmax = nonzero;
5084 else
5086 rtx mmin_rtx, mmax_rtx;
5087 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5089 mmin = INTVAL (mmin_rtx);
5090 mmax = INTVAL (mmax_rtx);
5091 if (sign)
5093 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5095 mmin >>= (sign_copies - 1);
5096 mmax >>= (sign_copies - 1);
5100 switch (code)
5102 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5103 case GEU:
5104 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5105 return const_true_rtx;
5106 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5107 return const0_rtx;
5108 break;
5109 case GE:
5110 if (val <= mmin)
5111 return const_true_rtx;
5112 if (val > mmax)
5113 return const0_rtx;
5114 break;
5116 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5117 case LEU:
5118 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5119 return const_true_rtx;
5120 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5121 return const0_rtx;
5122 break;
5123 case LE:
5124 if (val >= mmax)
5125 return const_true_rtx;
5126 if (val < mmin)
5127 return const0_rtx;
5128 break;
5130 case EQ:
5131 /* x == y is always false for y out of range. */
5132 if (val < mmin || val > mmax)
5133 return const0_rtx;
5134 break;
5136 /* x > y is always false for y >= mmax, always true for y < mmin. */
5137 case GTU:
5138 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5139 return const0_rtx;
5140 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5141 return const_true_rtx;
5142 break;
5143 case GT:
5144 if (val >= mmax)
5145 return const0_rtx;
5146 if (val < mmin)
5147 return const_true_rtx;
5148 break;
5150 /* x < y is always false for y <= mmin, always true for y > mmax. */
5151 case LTU:
5152 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5153 return const0_rtx;
5154 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5155 return const_true_rtx;
5156 break;
5157 case LT:
5158 if (val <= mmin)
5159 return const0_rtx;
5160 if (val > mmax)
5161 return const_true_rtx;
5162 break;
5164 case NE:
5165 /* x != y is always true for y out of range. */
5166 if (val < mmin || val > mmax)
5167 return const_true_rtx;
5168 break;
5170 default:
5171 break;
5175 /* Optimize integer comparisons with zero. */
5176 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5178 /* Some addresses are known to be nonzero. We don't know
5179 their sign, but equality comparisons are known. */
5180 if (nonzero_address_p (trueop0))
5182 if (code == EQ || code == LEU)
5183 return const0_rtx;
5184 if (code == NE || code == GTU)
5185 return const_true_rtx;
5188 /* See if the first operand is an IOR with a constant. If so, we
5189 may be able to determine the result of this comparison. */
5190 if (GET_CODE (op0) == IOR)
5192 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5193 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5195 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5196 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5197 && (UINTVAL (inner_const)
5198 & ((unsigned HOST_WIDE_INT) 1
5199 << sign_bitnum)));
5201 switch (code)
5203 case EQ:
5204 case LEU:
5205 return const0_rtx;
5206 case NE:
5207 case GTU:
5208 return const_true_rtx;
5209 case LT:
5210 case LE:
5211 if (has_sign)
5212 return const_true_rtx;
5213 break;
5214 case GT:
5215 case GE:
5216 if (has_sign)
5217 return const0_rtx;
5218 break;
5219 default:
5220 break;
5226 /* Optimize comparison of ABS with zero. */
5227 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5228 && (GET_CODE (trueop0) == ABS
5229 || (GET_CODE (trueop0) == FLOAT_EXTEND
5230 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5232 switch (code)
5234 case LT:
5235 /* Optimize abs(x) < 0.0. */
5236 if (!HONOR_SNANS (mode)
5237 && (!INTEGRAL_MODE_P (mode)
5238 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5240 if (INTEGRAL_MODE_P (mode)
5241 && (issue_strict_overflow_warning
5242 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5243 warning (OPT_Wstrict_overflow,
5244 ("assuming signed overflow does not occur when "
5245 "assuming abs (x) < 0 is false"));
5246 return const0_rtx;
5248 break;
5250 case GE:
5251 /* Optimize abs(x) >= 0.0. */
5252 if (!HONOR_NANS (mode)
5253 && (!INTEGRAL_MODE_P (mode)
5254 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5256 if (INTEGRAL_MODE_P (mode)
5257 && (issue_strict_overflow_warning
5258 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5259 warning (OPT_Wstrict_overflow,
5260 ("assuming signed overflow does not occur when "
5261 "assuming abs (x) >= 0 is true"));
5262 return const_true_rtx;
5264 break;
5266 case UNGE:
5267 /* Optimize ! (abs(x) < 0.0). */
5268 return const_true_rtx;
5270 default:
5271 break;
5275 return 0;
5278 /* Simplify CODE, an operation with result mode MODE and three operands,
5279 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5280 a constant. Return 0 if no simplifications is possible. */
5283 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5284 machine_mode op0_mode, rtx op0, rtx op1,
5285 rtx op2)
5287 unsigned int width = GET_MODE_PRECISION (mode);
5288 bool any_change = false;
5289 rtx tem, trueop2;
5291 /* VOIDmode means "infinite" precision. */
5292 if (width == 0)
5293 width = HOST_BITS_PER_WIDE_INT;
5295 switch (code)
5297 case FMA:
5298 /* Simplify negations around the multiplication. */
5299 /* -a * -b + c => a * b + c. */
5300 if (GET_CODE (op0) == NEG)
5302 tem = simplify_unary_operation (NEG, mode, op1, mode);
5303 if (tem)
5304 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5306 else if (GET_CODE (op1) == NEG)
5308 tem = simplify_unary_operation (NEG, mode, op0, mode);
5309 if (tem)
5310 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5313 /* Canonicalize the two multiplication operands. */
5314 /* a * -b + c => -b * a + c. */
5315 if (swap_commutative_operands_p (op0, op1))
5316 std::swap (op0, op1), any_change = true;
5318 if (any_change)
5319 return gen_rtx_FMA (mode, op0, op1, op2);
5320 return NULL_RTX;
5322 case SIGN_EXTRACT:
5323 case ZERO_EXTRACT:
5324 if (CONST_INT_P (op0)
5325 && CONST_INT_P (op1)
5326 && CONST_INT_P (op2)
5327 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5328 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5330 /* Extracting a bit-field from a constant */
5331 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5332 HOST_WIDE_INT op1val = INTVAL (op1);
5333 HOST_WIDE_INT op2val = INTVAL (op2);
5334 if (BITS_BIG_ENDIAN)
5335 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5336 else
5337 val >>= op2val;
5339 if (HOST_BITS_PER_WIDE_INT != op1val)
5341 /* First zero-extend. */
5342 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5343 /* If desired, propagate sign bit. */
5344 if (code == SIGN_EXTRACT
5345 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5346 != 0)
5347 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5350 return gen_int_mode (val, mode);
5352 break;
5354 case IF_THEN_ELSE:
5355 if (CONST_INT_P (op0))
5356 return op0 != const0_rtx ? op1 : op2;
5358 /* Convert c ? a : a into "a". */
5359 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5360 return op1;
5362 /* Convert a != b ? a : b into "a". */
5363 if (GET_CODE (op0) == NE
5364 && ! side_effects_p (op0)
5365 && ! HONOR_NANS (mode)
5366 && ! HONOR_SIGNED_ZEROS (mode)
5367 && ((rtx_equal_p (XEXP (op0, 0), op1)
5368 && rtx_equal_p (XEXP (op0, 1), op2))
5369 || (rtx_equal_p (XEXP (op0, 0), op2)
5370 && rtx_equal_p (XEXP (op0, 1), op1))))
5371 return op1;
5373 /* Convert a == b ? a : b into "b". */
5374 if (GET_CODE (op0) == EQ
5375 && ! side_effects_p (op0)
5376 && ! HONOR_NANS (mode)
5377 && ! HONOR_SIGNED_ZEROS (mode)
5378 && ((rtx_equal_p (XEXP (op0, 0), op1)
5379 && rtx_equal_p (XEXP (op0, 1), op2))
5380 || (rtx_equal_p (XEXP (op0, 0), op2)
5381 && rtx_equal_p (XEXP (op0, 1), op1))))
5382 return op2;
5384 /* Convert (!c) != {0,...,0} ? a : b into
5385 c != {0,...,0} ? b : a for vector modes. */
5386 if (VECTOR_MODE_P (GET_MODE (op1))
5387 && GET_CODE (op0) == NE
5388 && GET_CODE (XEXP (op0, 0)) == NOT
5389 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5391 rtx cv = XEXP (op0, 1);
5392 int nunits = CONST_VECTOR_NUNITS (cv);
5393 bool ok = true;
5394 for (int i = 0; i < nunits; ++i)
5395 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5397 ok = false;
5398 break;
5400 if (ok)
5402 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5403 XEXP (XEXP (op0, 0), 0),
5404 XEXP (op0, 1));
5405 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5406 return retval;
5410 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5412 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5413 ? GET_MODE (XEXP (op0, 1))
5414 : GET_MODE (XEXP (op0, 0)));
5415 rtx temp;
5417 /* Look for happy constants in op1 and op2. */
5418 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5420 HOST_WIDE_INT t = INTVAL (op1);
5421 HOST_WIDE_INT f = INTVAL (op2);
5423 if (t == STORE_FLAG_VALUE && f == 0)
5424 code = GET_CODE (op0);
5425 else if (t == 0 && f == STORE_FLAG_VALUE)
5427 enum rtx_code tmp;
5428 tmp = reversed_comparison_code (op0, NULL_RTX);
5429 if (tmp == UNKNOWN)
5430 break;
5431 code = tmp;
5433 else
5434 break;
5436 return simplify_gen_relational (code, mode, cmp_mode,
5437 XEXP (op0, 0), XEXP (op0, 1));
5440 if (cmp_mode == VOIDmode)
5441 cmp_mode = op0_mode;
5442 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5443 cmp_mode, XEXP (op0, 0),
5444 XEXP (op0, 1));
5446 /* See if any simplifications were possible. */
5447 if (temp)
5449 if (CONST_INT_P (temp))
5450 return temp == const0_rtx ? op2 : op1;
5451 else if (temp)
5452 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5455 break;
5457 case VEC_MERGE:
5458 gcc_assert (GET_MODE (op0) == mode);
5459 gcc_assert (GET_MODE (op1) == mode);
5460 gcc_assert (VECTOR_MODE_P (mode));
5461 trueop2 = avoid_constant_pool_reference (op2);
5462 if (CONST_INT_P (trueop2))
5464 int elt_size = GET_MODE_UNIT_SIZE (mode);
5465 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5466 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5467 unsigned HOST_WIDE_INT mask;
5468 if (n_elts == HOST_BITS_PER_WIDE_INT)
5469 mask = -1;
5470 else
5471 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5473 if (!(sel & mask) && !side_effects_p (op0))
5474 return op1;
5475 if ((sel & mask) == mask && !side_effects_p (op1))
5476 return op0;
5478 rtx trueop0 = avoid_constant_pool_reference (op0);
5479 rtx trueop1 = avoid_constant_pool_reference (op1);
5480 if (GET_CODE (trueop0) == CONST_VECTOR
5481 && GET_CODE (trueop1) == CONST_VECTOR)
5483 rtvec v = rtvec_alloc (n_elts);
5484 unsigned int i;
5486 for (i = 0; i < n_elts; i++)
5487 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5488 ? CONST_VECTOR_ELT (trueop0, i)
5489 : CONST_VECTOR_ELT (trueop1, i));
5490 return gen_rtx_CONST_VECTOR (mode, v);
5493 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5494 if no element from a appears in the result. */
5495 if (GET_CODE (op0) == VEC_MERGE)
5497 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5498 if (CONST_INT_P (tem))
5500 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5501 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5502 return simplify_gen_ternary (code, mode, mode,
5503 XEXP (op0, 1), op1, op2);
5504 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5505 return simplify_gen_ternary (code, mode, mode,
5506 XEXP (op0, 0), op1, op2);
5509 if (GET_CODE (op1) == VEC_MERGE)
5511 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5512 if (CONST_INT_P (tem))
5514 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5515 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5516 return simplify_gen_ternary (code, mode, mode,
5517 op0, XEXP (op1, 1), op2);
5518 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5519 return simplify_gen_ternary (code, mode, mode,
5520 op0, XEXP (op1, 0), op2);
5524 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5525 with a. */
5526 if (GET_CODE (op0) == VEC_DUPLICATE
5527 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5528 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5529 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5531 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5532 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5534 if (XEXP (XEXP (op0, 0), 0) == op1
5535 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5536 return op1;
5541 if (rtx_equal_p (op0, op1)
5542 && !side_effects_p (op2) && !side_effects_p (op1))
5543 return op0;
5545 break;
5547 default:
5548 gcc_unreachable ();
5551 return 0;
5554 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5555 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5556 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5558 Works by unpacking OP into a collection of 8-bit values
5559 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5560 and then repacking them again for OUTERMODE. */
5562 static rtx
5563 simplify_immed_subreg (machine_mode outermode, rtx op,
5564 machine_mode innermode, unsigned int byte)
5566 enum {
5567 value_bit = 8,
5568 value_mask = (1 << value_bit) - 1
5570 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5571 int value_start;
5572 int i;
5573 int elem;
5575 int num_elem;
5576 rtx * elems;
5577 int elem_bitsize;
5578 rtx result_s;
5579 rtvec result_v = NULL;
5580 enum mode_class outer_class;
5581 machine_mode outer_submode;
5582 int max_bitsize;
5584 /* Some ports misuse CCmode. */
5585 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5586 return op;
5588 /* We have no way to represent a complex constant at the rtl level. */
5589 if (COMPLEX_MODE_P (outermode))
5590 return NULL_RTX;
5592 /* We support any size mode. */
5593 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5594 GET_MODE_BITSIZE (innermode));
5596 /* Unpack the value. */
5598 if (GET_CODE (op) == CONST_VECTOR)
5600 num_elem = CONST_VECTOR_NUNITS (op);
5601 elems = &CONST_VECTOR_ELT (op, 0);
5602 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5604 else
5606 num_elem = 1;
5607 elems = &op;
5608 elem_bitsize = max_bitsize;
5610 /* If this asserts, it is too complicated; reducing value_bit may help. */
5611 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5612 /* I don't know how to handle endianness of sub-units. */
5613 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5615 for (elem = 0; elem < num_elem; elem++)
5617 unsigned char * vp;
5618 rtx el = elems[elem];
5620 /* Vectors are kept in target memory order. (This is probably
5621 a mistake.) */
5623 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5624 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5625 / BITS_PER_UNIT);
5626 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5627 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5628 unsigned bytele = (subword_byte % UNITS_PER_WORD
5629 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5630 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5633 switch (GET_CODE (el))
5635 case CONST_INT:
5636 for (i = 0;
5637 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5638 i += value_bit)
5639 *vp++ = INTVAL (el) >> i;
5640 /* CONST_INTs are always logically sign-extended. */
5641 for (; i < elem_bitsize; i += value_bit)
5642 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5643 break;
5645 case CONST_WIDE_INT:
5647 rtx_mode_t val = std::make_pair (el, innermode);
5648 unsigned char extend = wi::sign_mask (val);
5650 for (i = 0; i < elem_bitsize; i += value_bit)
5651 *vp++ = wi::extract_uhwi (val, i, value_bit);
5652 for (; i < elem_bitsize; i += value_bit)
5653 *vp++ = extend;
5655 break;
5657 case CONST_DOUBLE:
5658 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5660 unsigned char extend = 0;
5661 /* If this triggers, someone should have generated a
5662 CONST_INT instead. */
5663 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5665 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5666 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5667 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5669 *vp++
5670 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5671 i += value_bit;
5674 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5675 extend = -1;
5676 for (; i < elem_bitsize; i += value_bit)
5677 *vp++ = extend;
5679 else
5681 /* This is big enough for anything on the platform. */
5682 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5683 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5685 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5686 gcc_assert (bitsize <= elem_bitsize);
5687 gcc_assert (bitsize % value_bit == 0);
5689 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5690 GET_MODE (el));
5692 /* real_to_target produces its result in words affected by
5693 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5694 and use WORDS_BIG_ENDIAN instead; see the documentation
5695 of SUBREG in rtl.texi. */
5696 for (i = 0; i < bitsize; i += value_bit)
5698 int ibase;
5699 if (WORDS_BIG_ENDIAN)
5700 ibase = bitsize - 1 - i;
5701 else
5702 ibase = i;
5703 *vp++ = tmp[ibase / 32] >> i % 32;
5706 /* It shouldn't matter what's done here, so fill it with
5707 zero. */
5708 for (; i < elem_bitsize; i += value_bit)
5709 *vp++ = 0;
5711 break;
5713 case CONST_FIXED:
5714 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5716 for (i = 0; i < elem_bitsize; i += value_bit)
5717 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5719 else
5721 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5722 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5723 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5724 i += value_bit)
5725 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5726 >> (i - HOST_BITS_PER_WIDE_INT);
5727 for (; i < elem_bitsize; i += value_bit)
5728 *vp++ = 0;
5730 break;
5732 default:
5733 gcc_unreachable ();
5737 /* Now, pick the right byte to start with. */
5738 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5739 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5740 will already have offset 0. */
5741 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5743 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5744 - byte);
5745 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5746 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5747 byte = (subword_byte % UNITS_PER_WORD
5748 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5751 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5752 so if it's become negative it will instead be very large.) */
5753 gcc_assert (byte < GET_MODE_SIZE (innermode));
5755 /* Convert from bytes to chunks of size value_bit. */
5756 value_start = byte * (BITS_PER_UNIT / value_bit);
5758 /* Re-pack the value. */
5759 num_elem = GET_MODE_NUNITS (outermode);
5761 if (VECTOR_MODE_P (outermode))
5763 result_v = rtvec_alloc (num_elem);
5764 elems = &RTVEC_ELT (result_v, 0);
5766 else
5767 elems = &result_s;
5769 outer_submode = GET_MODE_INNER (outermode);
5770 outer_class = GET_MODE_CLASS (outer_submode);
5771 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5773 gcc_assert (elem_bitsize % value_bit == 0);
5774 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5776 for (elem = 0; elem < num_elem; elem++)
5778 unsigned char *vp;
5780 /* Vectors are stored in target memory order. (This is probably
5781 a mistake.) */
5783 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5784 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5785 / BITS_PER_UNIT);
5786 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5787 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5788 unsigned bytele = (subword_byte % UNITS_PER_WORD
5789 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5790 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5793 switch (outer_class)
5795 case MODE_INT:
5796 case MODE_PARTIAL_INT:
5798 int u;
5799 int base = 0;
5800 int units
5801 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5802 / HOST_BITS_PER_WIDE_INT;
5803 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5804 wide_int r;
5806 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5807 return NULL_RTX;
5808 for (u = 0; u < units; u++)
5810 unsigned HOST_WIDE_INT buf = 0;
5811 for (i = 0;
5812 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5813 i += value_bit)
5814 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5816 tmp[u] = buf;
5817 base += HOST_BITS_PER_WIDE_INT;
5819 r = wide_int::from_array (tmp, units,
5820 GET_MODE_PRECISION (outer_submode));
5821 #if TARGET_SUPPORTS_WIDE_INT == 0
5822 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5823 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5824 return NULL_RTX;
5825 #endif
5826 elems[elem] = immed_wide_int_const (r, outer_submode);
5828 break;
5830 case MODE_FLOAT:
5831 case MODE_DECIMAL_FLOAT:
5833 REAL_VALUE_TYPE r;
5834 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5836 /* real_from_target wants its input in words affected by
5837 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5838 and use WORDS_BIG_ENDIAN instead; see the documentation
5839 of SUBREG in rtl.texi. */
5840 for (i = 0; i < max_bitsize / 32; i++)
5841 tmp[i] = 0;
5842 for (i = 0; i < elem_bitsize; i += value_bit)
5844 int ibase;
5845 if (WORDS_BIG_ENDIAN)
5846 ibase = elem_bitsize - 1 - i;
5847 else
5848 ibase = i;
5849 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5852 real_from_target (&r, tmp, outer_submode);
5853 elems[elem] = const_double_from_real_value (r, outer_submode);
5855 break;
5857 case MODE_FRACT:
5858 case MODE_UFRACT:
5859 case MODE_ACCUM:
5860 case MODE_UACCUM:
5862 FIXED_VALUE_TYPE f;
5863 f.data.low = 0;
5864 f.data.high = 0;
5865 f.mode = outer_submode;
5867 for (i = 0;
5868 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5869 i += value_bit)
5870 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5871 for (; i < elem_bitsize; i += value_bit)
5872 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5873 << (i - HOST_BITS_PER_WIDE_INT));
5875 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5877 break;
5879 default:
5880 gcc_unreachable ();
5883 if (VECTOR_MODE_P (outermode))
5884 return gen_rtx_CONST_VECTOR (outermode, result_v);
5885 else
5886 return result_s;
5889 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5890 Return 0 if no simplifications are possible. */
5892 simplify_subreg (machine_mode outermode, rtx op,
5893 machine_mode innermode, unsigned int byte)
5895 /* Little bit of sanity checking. */
5896 gcc_assert (innermode != VOIDmode);
5897 gcc_assert (outermode != VOIDmode);
5898 gcc_assert (innermode != BLKmode);
5899 gcc_assert (outermode != BLKmode);
5901 gcc_assert (GET_MODE (op) == innermode
5902 || GET_MODE (op) == VOIDmode);
5904 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5905 return NULL_RTX;
5907 if (byte >= GET_MODE_SIZE (innermode))
5908 return NULL_RTX;
5910 if (outermode == innermode && !byte)
5911 return op;
5913 if (CONST_SCALAR_INT_P (op)
5914 || CONST_DOUBLE_AS_FLOAT_P (op)
5915 || GET_CODE (op) == CONST_FIXED
5916 || GET_CODE (op) == CONST_VECTOR)
5917 return simplify_immed_subreg (outermode, op, innermode, byte);
5919 /* Changing mode twice with SUBREG => just change it once,
5920 or not at all if changing back op starting mode. */
5921 if (GET_CODE (op) == SUBREG)
5923 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5924 int final_offset = byte + SUBREG_BYTE (op);
5925 rtx newx;
5927 if (outermode == innermostmode
5928 && byte == 0 && SUBREG_BYTE (op) == 0)
5929 return SUBREG_REG (op);
5931 /* The SUBREG_BYTE represents offset, as if the value were stored
5932 in memory. Irritating exception is paradoxical subreg, where
5933 we define SUBREG_BYTE to be 0. On big endian machines, this
5934 value should be negative. For a moment, undo this exception. */
5935 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5937 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5938 if (WORDS_BIG_ENDIAN)
5939 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5940 if (BYTES_BIG_ENDIAN)
5941 final_offset += difference % UNITS_PER_WORD;
5943 if (SUBREG_BYTE (op) == 0
5944 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5946 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5947 if (WORDS_BIG_ENDIAN)
5948 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5949 if (BYTES_BIG_ENDIAN)
5950 final_offset += difference % UNITS_PER_WORD;
5953 /* See whether resulting subreg will be paradoxical. */
5954 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5956 /* In nonparadoxical subregs we can't handle negative offsets. */
5957 if (final_offset < 0)
5958 return NULL_RTX;
5959 /* Bail out in case resulting subreg would be incorrect. */
5960 if (final_offset % GET_MODE_SIZE (outermode)
5961 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5962 return NULL_RTX;
5964 else
5966 int offset = 0;
5967 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5969 /* In paradoxical subreg, see if we are still looking on lower part.
5970 If so, our SUBREG_BYTE will be 0. */
5971 if (WORDS_BIG_ENDIAN)
5972 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5973 if (BYTES_BIG_ENDIAN)
5974 offset += difference % UNITS_PER_WORD;
5975 if (offset == final_offset)
5976 final_offset = 0;
5977 else
5978 return NULL_RTX;
5981 /* Recurse for further possible simplifications. */
5982 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5983 final_offset);
5984 if (newx)
5985 return newx;
5986 if (validate_subreg (outermode, innermostmode,
5987 SUBREG_REG (op), final_offset))
5989 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5990 if (SUBREG_PROMOTED_VAR_P (op)
5991 && SUBREG_PROMOTED_SIGN (op) >= 0
5992 && GET_MODE_CLASS (outermode) == MODE_INT
5993 && IN_RANGE (GET_MODE_SIZE (outermode),
5994 GET_MODE_SIZE (innermode),
5995 GET_MODE_SIZE (innermostmode))
5996 && subreg_lowpart_p (newx))
5998 SUBREG_PROMOTED_VAR_P (newx) = 1;
5999 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6001 return newx;
6003 return NULL_RTX;
6006 /* SUBREG of a hard register => just change the register number
6007 and/or mode. If the hard register is not valid in that mode,
6008 suppress this simplification. If the hard register is the stack,
6009 frame, or argument pointer, leave this as a SUBREG. */
6011 if (REG_P (op) && HARD_REGISTER_P (op))
6013 unsigned int regno, final_regno;
6015 regno = REGNO (op);
6016 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6017 if (HARD_REGISTER_NUM_P (final_regno))
6019 rtx x;
6020 int final_offset = byte;
6022 /* Adjust offset for paradoxical subregs. */
6023 if (byte == 0
6024 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6026 int difference = (GET_MODE_SIZE (innermode)
6027 - GET_MODE_SIZE (outermode));
6028 if (WORDS_BIG_ENDIAN)
6029 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6030 if (BYTES_BIG_ENDIAN)
6031 final_offset += difference % UNITS_PER_WORD;
6034 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
6036 /* Propagate original regno. We don't have any way to specify
6037 the offset inside original regno, so do so only for lowpart.
6038 The information is used only by alias analysis that can not
6039 grog partial register anyway. */
6041 if (subreg_lowpart_offset (outermode, innermode) == byte)
6042 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6043 return x;
6047 /* If we have a SUBREG of a register that we are replacing and we are
6048 replacing it with a MEM, make a new MEM and try replacing the
6049 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6050 or if we would be widening it. */
6052 if (MEM_P (op)
6053 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6054 /* Allow splitting of volatile memory references in case we don't
6055 have instruction to move the whole thing. */
6056 && (! MEM_VOLATILE_P (op)
6057 || ! have_insn_for (SET, innermode))
6058 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6059 return adjust_address_nv (op, outermode, byte);
6061 /* Handle complex values represented as CONCAT
6062 of real and imaginary part. */
6063 if (GET_CODE (op) == CONCAT)
6065 unsigned int part_size, final_offset;
6066 rtx part, res;
6068 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
6069 if (byte < part_size)
6071 part = XEXP (op, 0);
6072 final_offset = byte;
6074 else
6076 part = XEXP (op, 1);
6077 final_offset = byte - part_size;
6080 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6081 return NULL_RTX;
6083 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
6084 if (res)
6085 return res;
6086 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
6087 return gen_rtx_SUBREG (outermode, part, final_offset);
6088 return NULL_RTX;
6091 /* A SUBREG resulting from a zero extension may fold to zero if
6092 it extracts higher bits that the ZERO_EXTEND's source bits. */
6093 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6095 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6096 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6097 return CONST0_RTX (outermode);
6100 if (SCALAR_INT_MODE_P (outermode)
6101 && SCALAR_INT_MODE_P (innermode)
6102 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6103 && byte == subreg_lowpart_offset (outermode, innermode))
6105 rtx tem = simplify_truncation (outermode, op, innermode);
6106 if (tem)
6107 return tem;
6110 return NULL_RTX;
6113 /* Make a SUBREG operation or equivalent if it folds. */
6116 simplify_gen_subreg (machine_mode outermode, rtx op,
6117 machine_mode innermode, unsigned int byte)
6119 rtx newx;
6121 newx = simplify_subreg (outermode, op, innermode, byte);
6122 if (newx)
6123 return newx;
6125 if (GET_CODE (op) == SUBREG
6126 || GET_CODE (op) == CONCAT
6127 || GET_MODE (op) == VOIDmode)
6128 return NULL_RTX;
6130 if (validate_subreg (outermode, innermode, op, byte))
6131 return gen_rtx_SUBREG (outermode, op, byte);
6133 return NULL_RTX;
6136 /* Generates a subreg to get the least significant part of EXPR (in mode
6137 INNER_MODE) to OUTER_MODE. */
6140 lowpart_subreg (machine_mode outer_mode, rtx expr,
6141 machine_mode inner_mode)
6143 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6144 subreg_lowpart_offset (outer_mode, inner_mode));
6147 /* Simplify X, an rtx expression.
6149 Return the simplified expression or NULL if no simplifications
6150 were possible.
6152 This is the preferred entry point into the simplification routines;
6153 however, we still allow passes to call the more specific routines.
6155 Right now GCC has three (yes, three) major bodies of RTL simplification
6156 code that need to be unified.
6158 1. fold_rtx in cse.c. This code uses various CSE specific
6159 information to aid in RTL simplification.
6161 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6162 it uses combine specific information to aid in RTL
6163 simplification.
6165 3. The routines in this file.
6168 Long term we want to only have one body of simplification code; to
6169 get to that state I recommend the following steps:
6171 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6172 which are not pass dependent state into these routines.
6174 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6175 use this routine whenever possible.
6177 3. Allow for pass dependent state to be provided to these
6178 routines and add simplifications based on the pass dependent
6179 state. Remove code from cse.c & combine.c that becomes
6180 redundant/dead.
6182 It will take time, but ultimately the compiler will be easier to
6183 maintain and improve. It's totally silly that when we add a
6184 simplification that it needs to be added to 4 places (3 for RTL
6185 simplification and 1 for tree simplification. */
6188 simplify_rtx (const_rtx x)
6190 const enum rtx_code code = GET_CODE (x);
6191 const machine_mode mode = GET_MODE (x);
6193 switch (GET_RTX_CLASS (code))
6195 case RTX_UNARY:
6196 return simplify_unary_operation (code, mode,
6197 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6198 case RTX_COMM_ARITH:
6199 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6200 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6202 /* Fall through.... */
6204 case RTX_BIN_ARITH:
6205 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6207 case RTX_TERNARY:
6208 case RTX_BITFIELD_OPS:
6209 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6210 XEXP (x, 0), XEXP (x, 1),
6211 XEXP (x, 2));
6213 case RTX_COMPARE:
6214 case RTX_COMM_COMPARE:
6215 return simplify_relational_operation (code, mode,
6216 ((GET_MODE (XEXP (x, 0))
6217 != VOIDmode)
6218 ? GET_MODE (XEXP (x, 0))
6219 : GET_MODE (XEXP (x, 1))),
6220 XEXP (x, 0),
6221 XEXP (x, 1));
6223 case RTX_EXTRA:
6224 if (code == SUBREG)
6225 return simplify_subreg (mode, SUBREG_REG (x),
6226 GET_MODE (SUBREG_REG (x)),
6227 SUBREG_BYTE (x));
6228 break;
6230 case RTX_OBJ:
6231 if (code == LO_SUM)
6233 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6234 if (GET_CODE (XEXP (x, 0)) == HIGH
6235 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6236 return XEXP (x, 1);
6238 break;
6240 default:
6241 break;
6243 return NULL;