1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2023 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
35 #include "diagnostic-core.h"
36 #include "rtx-vector-builder.h"
38 /* Include insn-config.h before expr.h so that HAVE_conditional_move
39 is properly defined. */
40 #include "stor-layout.h"
45 #include "optabs-tree.h"
47 #include "internal-fn.h"
48 #include "langhooks.h"
52 static void prepare_float_lib_cmp (rtx
, rtx
, enum rtx_code
, rtx
*,
54 static rtx
expand_unop_direct (machine_mode
, optab
, rtx
, rtx
, int);
55 static void emit_libcall_block_1 (rtx_insn
*, rtx
, rtx
, rtx
, bool);
57 static rtx
emit_conditional_move_1 (rtx
, rtx
, rtx
, rtx
, machine_mode
);
59 /* Debug facility for use in GDB. */
60 void debug_optab_libfuncs (void);
62 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
63 the result of operation CODE applied to OP0 (and OP1 if it is a binary
64 operation). OP0_MODE is OP0's mode.
66 If the last insn does not set TARGET, don't do anything, but return true.
68 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
69 or OP1, don't add the REG_EQUAL note but return false. Our caller can then
70 try again, ensuring that TARGET is not one of the operands. */
73 add_equal_note (rtx_insn
*insns
, rtx target
, enum rtx_code code
, rtx op0
,
74 rtx op1
, machine_mode op0_mode
)
80 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
82 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
83 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
84 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
85 && GET_RTX_CLASS (code
) != RTX_COMPARE
86 && GET_RTX_CLASS (code
) != RTX_UNARY
)
89 if (GET_CODE (target
) == ZERO_EXTRACT
)
92 for (last_insn
= insns
;
93 NEXT_INSN (last_insn
) != NULL_RTX
;
94 last_insn
= NEXT_INSN (last_insn
))
97 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
98 a value changing in the insn, so the note would be invalid for CSE. */
99 if (reg_overlap_mentioned_p (target
, op0
)
100 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
103 && (rtx_equal_p (target
, op0
)
104 || (op1
&& rtx_equal_p (target
, op1
))))
106 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
107 over expanding it as temp = MEM op X, MEM = temp. If the target
108 supports MEM = MEM op X instructions, it is sometimes too hard
109 to reconstruct that form later, especially if X is also a memory,
110 and due to multiple occurrences of addresses the address might
111 be forced into register unnecessarily.
112 Note that not emitting the REG_EQUIV note might inhibit
113 CSE in some cases. */
114 set
= single_set (last_insn
);
116 && GET_CODE (SET_SRC (set
)) == code
117 && MEM_P (SET_DEST (set
))
118 && (rtx_equal_p (SET_DEST (set
), XEXP (SET_SRC (set
), 0))
119 || (op1
&& rtx_equal_p (SET_DEST (set
),
120 XEXP (SET_SRC (set
), 1)))))
126 set
= set_for_reg_notes (last_insn
);
130 if (! rtx_equal_p (SET_DEST (set
), target
)
131 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
132 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
133 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
136 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
146 if (op0_mode
!= VOIDmode
&& GET_MODE (target
) != op0_mode
)
148 note
= gen_rtx_fmt_e (code
, op0_mode
, copy_rtx (op0
));
149 if (GET_MODE_UNIT_SIZE (op0_mode
)
150 > GET_MODE_UNIT_SIZE (GET_MODE (target
)))
151 note
= simplify_gen_unary (TRUNCATE
, GET_MODE (target
),
154 note
= simplify_gen_unary (ZERO_EXTEND
, GET_MODE (target
),
160 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
164 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
166 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
171 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
172 for a widening operation would be. In most cases this would be OP0, but if
173 that's a constant it'll be VOIDmode, which isn't useful. */
176 widened_mode (machine_mode to_mode
, rtx op0
, rtx op1
)
178 machine_mode m0
= GET_MODE (op0
);
179 machine_mode m1
= GET_MODE (op1
);
182 if (m0
== VOIDmode
&& m1
== VOIDmode
)
184 else if (m0
== VOIDmode
|| GET_MODE_UNIT_SIZE (m0
) < GET_MODE_UNIT_SIZE (m1
))
189 if (GET_MODE_UNIT_SIZE (result
) > GET_MODE_UNIT_SIZE (to_mode
))
195 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
196 says whether OP is signed or unsigned. NO_EXTEND is true if we need
197 not actually do a sign-extend or zero-extend, but can leave the
198 higher-order bits of the result rtx undefined, for example, in the case
199 of logical operations, but not right shifts. */
202 widen_operand (rtx op
, machine_mode mode
, machine_mode oldmode
,
203 int unsignedp
, bool no_extend
)
206 scalar_int_mode int_mode
;
208 /* If we don't have to extend and this is a constant, return it. */
209 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
212 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
213 extend since it will be more efficient to do so unless the signedness of
214 a promoted object differs from our extension. */
216 || !is_a
<scalar_int_mode
> (mode
, &int_mode
)
217 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
218 && SUBREG_CHECK_PROMOTED_SIGN (op
, unsignedp
)))
219 return convert_modes (mode
, oldmode
, op
, unsignedp
);
221 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
223 if (GET_MODE_SIZE (int_mode
) <= UNITS_PER_WORD
)
224 return gen_lowpart (int_mode
, force_reg (GET_MODE (op
), op
));
226 /* Otherwise, get an object of MODE, clobber it, and set the low-order
229 result
= gen_reg_rtx (int_mode
);
230 emit_clobber (result
);
231 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
235 /* Expand vector widening operations.
237 There are two different classes of operations handled here:
238 1) Operations whose result is wider than all the arguments to the operation.
239 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
240 In this case OP0 and optionally OP1 would be initialized,
241 but WIDE_OP wouldn't (not relevant for this case).
242 2) Operations whose result is of the same size as the last argument to the
243 operation, but wider than all the other arguments to the operation.
244 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
245 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
247 E.g, when called to expand the following operations, this is how
248 the arguments will be initialized:
250 widening-sum 2 oprnd0 - oprnd1
251 widening-dot-product 3 oprnd0 oprnd1 oprnd2
252 widening-mult 2 oprnd0 oprnd1 -
253 type-promotion (vec-unpack) 1 oprnd0 - - */
256 expand_widen_pattern_expr (sepops ops
, rtx op0
, rtx op1
, rtx wide_op
,
257 rtx target
, int unsignedp
)
259 class expand_operand eops
[4];
260 tree oprnd0
, oprnd1
, oprnd2
;
261 machine_mode wmode
= VOIDmode
, tmode0
, tmode1
= VOIDmode
;
262 optab widen_pattern_optab
;
263 enum insn_code icode
;
264 int nops
= TREE_CODE_LENGTH (ops
->code
);
269 oprnd1
= nops
>= 2 ? ops
->op1
: NULL_TREE
;
270 oprnd2
= nops
>= 3 ? ops
->op2
: NULL_TREE
;
272 tmode0
= TYPE_MODE (TREE_TYPE (oprnd0
));
273 if (ops
->code
== VEC_UNPACK_FIX_TRUNC_HI_EXPR
274 || ops
->code
== VEC_UNPACK_FIX_TRUNC_LO_EXPR
)
275 /* The sign is from the result type rather than operand's type
278 = optab_for_tree_code (ops
->code
, ops
->type
, optab_default
);
279 else if ((ops
->code
== VEC_UNPACK_HI_EXPR
280 || ops
->code
== VEC_UNPACK_LO_EXPR
)
281 && VECTOR_BOOLEAN_TYPE_P (ops
->type
)
282 && VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (oprnd0
))
283 && TYPE_MODE (ops
->type
) == TYPE_MODE (TREE_TYPE (oprnd0
))
284 && SCALAR_INT_MODE_P (TYPE_MODE (ops
->type
)))
286 /* For VEC_UNPACK_{LO,HI}_EXPR if the mode of op0 and result is
287 the same scalar mode for VECTOR_BOOLEAN_TYPE_P vectors, use
288 vec_unpacks_sbool_{lo,hi}_optab, so that we can pass in
289 the pattern number of elements in the wider vector. */
291 = (ops
->code
== VEC_UNPACK_HI_EXPR
292 ? vec_unpacks_sbool_hi_optab
: vec_unpacks_sbool_lo_optab
);
295 else if (ops
->code
== DOT_PROD_EXPR
)
297 enum optab_subtype subtype
= optab_default
;
298 signop sign1
= TYPE_SIGN (TREE_TYPE (oprnd0
));
299 signop sign2
= TYPE_SIGN (TREE_TYPE (oprnd1
));
302 else if (sign1
== SIGNED
&& sign2
== UNSIGNED
)
304 subtype
= optab_vector_mixed_sign
;
305 /* Same as optab_vector_mixed_sign but flip the operands. */
306 std::swap (op0
, op1
);
308 else if (sign1
== UNSIGNED
&& sign2
== SIGNED
)
309 subtype
= optab_vector_mixed_sign
;
314 = optab_for_tree_code (ops
->code
, TREE_TYPE (oprnd0
), subtype
);
318 = optab_for_tree_code (ops
->code
, TREE_TYPE (oprnd0
), optab_default
);
319 if (ops
->code
== WIDEN_MULT_PLUS_EXPR
320 || ops
->code
== WIDEN_MULT_MINUS_EXPR
)
321 icode
= find_widening_optab_handler (widen_pattern_optab
,
322 TYPE_MODE (TREE_TYPE (ops
->op2
)),
325 icode
= optab_handler (widen_pattern_optab
, tmode0
);
326 gcc_assert (icode
!= CODE_FOR_nothing
);
329 tmode1
= TYPE_MODE (TREE_TYPE (oprnd1
));
333 op1
= GEN_INT (TYPE_VECTOR_SUBPARTS (TREE_TYPE (oprnd0
)).to_constant ());
337 /* The last operand is of a wider mode than the rest of the operands. */
342 gcc_assert (tmode1
== tmode0
);
344 wmode
= TYPE_MODE (TREE_TYPE (oprnd2
));
348 create_output_operand (&eops
[op
++], target
, TYPE_MODE (ops
->type
));
349 create_convert_operand_from (&eops
[op
++], op0
, tmode0
, unsignedp
);
351 create_convert_operand_from (&eops
[op
++], op1
, tmode1
, unsignedp
);
353 create_convert_operand_from (&eops
[op
++], wide_op
, wmode
, unsignedp
);
354 expand_insn (icode
, op
, eops
);
355 return eops
[0].value
;
358 /* Generate code to perform an operation specified by TERNARY_OPTAB
359 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
361 UNSIGNEDP is for the case where we have to widen the operands
362 to perform the operation. It says to use zero-extension.
364 If TARGET is nonzero, the value
365 is generated there, if it is convenient to do so.
366 In all cases an rtx is returned for the locus of the value;
367 this may or may not be TARGET. */
370 expand_ternary_op (machine_mode mode
, optab ternary_optab
, rtx op0
,
371 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
373 class expand_operand ops
[4];
374 enum insn_code icode
= optab_handler (ternary_optab
, mode
);
376 gcc_assert (optab_handler (ternary_optab
, mode
) != CODE_FOR_nothing
);
378 create_output_operand (&ops
[0], target
, mode
);
379 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
380 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
381 create_convert_operand_from (&ops
[3], op2
, mode
, unsignedp
);
382 expand_insn (icode
, 4, ops
);
387 /* Like expand_binop, but return a constant rtx if the result can be
388 calculated at compile time. The arguments and return value are
389 otherwise the same as for expand_binop. */
392 simplify_expand_binop (machine_mode mode
, optab binoptab
,
393 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
394 enum optab_methods methods
)
396 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
398 rtx x
= simplify_binary_operation (optab_to_code (binoptab
),
404 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
407 /* Like simplify_expand_binop, but always put the result in TARGET.
408 Return true if the expansion succeeded. */
411 force_expand_binop (machine_mode mode
, optab binoptab
,
412 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
413 enum optab_methods methods
)
415 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
416 target
, unsignedp
, methods
);
420 emit_move_insn (target
, x
);
424 /* Create a new vector value in VMODE with all elements set to OP. The
425 mode of OP must be the element mode of VMODE. If OP is a constant,
426 then the return value will be a constant. */
429 expand_vector_broadcast (machine_mode vmode
, rtx op
)
434 gcc_checking_assert (VECTOR_MODE_P (vmode
));
436 if (valid_for_const_vector_p (vmode
, op
))
437 return gen_const_vec_duplicate (vmode
, op
);
439 insn_code icode
= optab_handler (vec_duplicate_optab
, vmode
);
440 if (icode
!= CODE_FOR_nothing
)
442 class expand_operand ops
[2];
443 create_output_operand (&ops
[0], NULL_RTX
, vmode
);
444 create_input_operand (&ops
[1], op
, GET_MODE (op
));
445 expand_insn (icode
, 2, ops
);
449 if (!GET_MODE_NUNITS (vmode
).is_constant (&n
))
452 /* ??? If the target doesn't have a vec_init, then we have no easy way
453 of performing this operation. Most of this sort of generic support
454 is hidden away in the vector lowering support in gimple. */
455 icode
= convert_optab_handler (vec_init_optab
, vmode
,
456 GET_MODE_INNER (vmode
));
457 if (icode
== CODE_FOR_nothing
)
460 vec
= rtvec_alloc (n
);
461 for (int i
= 0; i
< n
; ++i
)
462 RTVEC_ELT (vec
, i
) = op
;
463 rtx ret
= gen_reg_rtx (vmode
);
464 emit_insn (GEN_FCN (icode
) (ret
, gen_rtx_PARALLEL (vmode
, vec
)));
469 /* This subroutine of expand_doubleword_shift handles the cases in which
470 the effective shift value is >= BITS_PER_WORD. The arguments and return
471 value are the same as for the parent routine, except that SUPERWORD_OP1
472 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
473 INTO_TARGET may be null if the caller has decided to calculate it. */
476 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
477 rtx outof_target
, rtx into_target
,
478 int unsignedp
, enum optab_methods methods
)
480 if (into_target
!= 0)
481 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
482 into_target
, unsignedp
, methods
))
485 if (outof_target
!= 0)
487 /* For a signed right shift, we must fill OUTOF_TARGET with copies
488 of the sign bit, otherwise we must fill it with zeros. */
489 if (binoptab
!= ashr_optab
)
490 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
492 if (!force_expand_binop (word_mode
, binoptab
, outof_input
,
493 gen_int_shift_amount (word_mode
,
495 outof_target
, unsignedp
, methods
))
501 /* This subroutine of expand_doubleword_shift handles the cases in which
502 the effective shift value is < BITS_PER_WORD. The arguments and return
503 value are the same as for the parent routine. */
506 expand_subword_shift (scalar_int_mode op1_mode
, optab binoptab
,
507 rtx outof_input
, rtx into_input
, rtx op1
,
508 rtx outof_target
, rtx into_target
,
509 int unsignedp
, enum optab_methods methods
,
510 unsigned HOST_WIDE_INT shift_mask
)
512 optab reverse_unsigned_shift
, unsigned_shift
;
515 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
516 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
518 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
519 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
520 the opposite direction to BINOPTAB. */
521 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
523 carries
= outof_input
;
524 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
,
525 op1_mode
), op1_mode
);
526 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
531 /* We must avoid shifting by BITS_PER_WORD bits since that is either
532 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
533 has unknown behavior. Do a single shift first, then shift by the
534 remainder. It's OK to use ~OP1 as the remainder if shift counts
535 are truncated to the mode size. */
536 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
537 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
538 if (shift_mask
== BITS_PER_WORD
- 1)
540 tmp
= immed_wide_int_const
541 (wi::minus_one (GET_MODE_PRECISION (op1_mode
)), op1_mode
);
542 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
547 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
- 1,
548 op1_mode
), op1_mode
);
549 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
553 if (tmp
== 0 || carries
== 0)
555 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
556 carries
, tmp
, 0, unsignedp
, methods
);
560 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
561 so the result can go directly into INTO_TARGET if convenient. */
562 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
563 into_target
, unsignedp
, methods
);
567 /* Now OR in the bits carried over from OUTOF_INPUT. */
568 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
569 into_target
, unsignedp
, methods
))
572 /* Use a standard word_mode shift for the out-of half. */
573 if (outof_target
!= 0)
574 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
575 outof_target
, unsignedp
, methods
))
582 /* Try implementing expand_doubleword_shift using conditional moves.
583 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
584 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
585 are the shift counts to use in the former and latter case. All other
586 arguments are the same as the parent routine. */
589 expand_doubleword_shift_condmove (scalar_int_mode op1_mode
, optab binoptab
,
590 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
591 rtx outof_input
, rtx into_input
,
592 rtx subword_op1
, rtx superword_op1
,
593 rtx outof_target
, rtx into_target
,
594 int unsignedp
, enum optab_methods methods
,
595 unsigned HOST_WIDE_INT shift_mask
)
597 rtx outof_superword
, into_superword
;
599 /* Put the superword version of the output into OUTOF_SUPERWORD and
601 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
602 if (outof_target
!= 0 && subword_op1
== superword_op1
)
604 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
605 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
606 into_superword
= outof_target
;
607 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
608 outof_superword
, 0, unsignedp
, methods
))
613 into_superword
= gen_reg_rtx (word_mode
);
614 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
615 outof_superword
, into_superword
,
620 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
621 if (!expand_subword_shift (op1_mode
, binoptab
,
622 outof_input
, into_input
, subword_op1
,
623 outof_target
, into_target
,
624 unsignedp
, methods
, shift_mask
))
627 /* Select between them. Do the INTO half first because INTO_SUPERWORD
628 might be the current value of OUTOF_TARGET. */
629 if (!emit_conditional_move (into_target
, { cmp_code
, cmp1
, cmp2
, op1_mode
},
630 into_target
, into_superword
, word_mode
, false))
633 if (outof_target
!= 0)
634 if (!emit_conditional_move (outof_target
,
635 { cmp_code
, cmp1
, cmp2
, op1_mode
},
636 outof_target
, outof_superword
,
643 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
644 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
645 input operand; the shift moves bits in the direction OUTOF_INPUT->
646 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
647 of the target. OP1 is the shift count and OP1_MODE is its mode.
648 If OP1 is constant, it will have been truncated as appropriate
649 and is known to be nonzero.
651 If SHIFT_MASK is zero, the result of word shifts is undefined when the
652 shift count is outside the range [0, BITS_PER_WORD). This routine must
653 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
655 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
656 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
657 fill with zeros or sign bits as appropriate.
659 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
660 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
661 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
662 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
665 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
666 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
667 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
668 function wants to calculate it itself.
670 Return true if the shift could be successfully synthesized. */
673 expand_doubleword_shift (scalar_int_mode op1_mode
, optab binoptab
,
674 rtx outof_input
, rtx into_input
, rtx op1
,
675 rtx outof_target
, rtx into_target
,
676 int unsignedp
, enum optab_methods methods
,
677 unsigned HOST_WIDE_INT shift_mask
)
679 rtx superword_op1
, tmp
, cmp1
, cmp2
;
680 enum rtx_code cmp_code
;
682 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
683 fill the result with sign or zero bits as appropriate. If so, the value
684 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
685 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
686 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
688 This isn't worthwhile for constant shifts since the optimizers will
689 cope better with in-range shift counts. */
690 if (shift_mask
>= BITS_PER_WORD
692 && !CONSTANT_P (op1
))
694 if (!expand_doubleword_shift (op1_mode
, binoptab
,
695 outof_input
, into_input
, op1
,
697 unsignedp
, methods
, shift_mask
))
699 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
700 outof_target
, unsignedp
, methods
))
705 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
706 is true when the effective shift value is less than BITS_PER_WORD.
707 Set SUPERWORD_OP1 to the shift count that should be used to shift
708 OUTOF_INPUT into INTO_TARGET when the condition is false. */
709 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
, op1_mode
), op1_mode
);
710 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
712 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
713 is a subword shift count. */
714 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
716 cmp2
= CONST0_RTX (op1_mode
);
722 /* Set CMP1 to OP1 - BITS_PER_WORD. */
723 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
725 cmp2
= CONST0_RTX (op1_mode
);
727 superword_op1
= cmp1
;
732 /* If we can compute the condition at compile time, pick the
733 appropriate subroutine. */
734 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
735 if (tmp
!= 0 && CONST_INT_P (tmp
))
737 if (tmp
== const0_rtx
)
738 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
739 outof_target
, into_target
,
742 return expand_subword_shift (op1_mode
, binoptab
,
743 outof_input
, into_input
, op1
,
744 outof_target
, into_target
,
745 unsignedp
, methods
, shift_mask
);
748 /* Try using conditional moves to generate straight-line code. */
749 if (HAVE_conditional_move
)
751 rtx_insn
*start
= get_last_insn ();
752 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
753 cmp_code
, cmp1
, cmp2
,
754 outof_input
, into_input
,
756 outof_target
, into_target
,
757 unsignedp
, methods
, shift_mask
))
759 delete_insns_since (start
);
762 /* As a last resort, use branches to select the correct alternative. */
763 rtx_code_label
*subword_label
= gen_label_rtx ();
764 rtx_code_label
*done_label
= gen_label_rtx ();
767 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
769 profile_probability::uninitialized ());
772 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
773 outof_target
, into_target
,
777 emit_jump_insn (targetm
.gen_jump (done_label
));
779 emit_label (subword_label
);
781 if (!expand_subword_shift (op1_mode
, binoptab
,
782 outof_input
, into_input
, op1
,
783 outof_target
, into_target
,
784 unsignedp
, methods
, shift_mask
))
787 emit_label (done_label
);
791 /* Subroutine of expand_binop. Perform a double word multiplication of
792 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
793 as the target's word_mode. This function return NULL_RTX if anything
794 goes wrong, in which case it may have already emitted instructions
795 which need to be deleted.
797 If we want to multiply two two-word values and have normal and widening
798 multiplies of single-word values, we can do this with three smaller
801 The multiplication proceeds as follows:
802 _______________________
803 [__op0_high_|__op0_low__]
804 _______________________
805 * [__op1_high_|__op1_low__]
806 _______________________________________________
807 _______________________
808 (1) [__op0_low__*__op1_low__]
809 _______________________
810 (2a) [__op0_low__*__op1_high_]
811 _______________________
812 (2b) [__op0_high_*__op1_low__]
813 _______________________
814 (3) [__op0_high_*__op1_high_]
817 This gives a 4-word result. Since we are only interested in the
818 lower 2 words, partial result (3) and the upper words of (2a) and
819 (2b) don't need to be calculated. Hence (2a) and (2b) can be
820 calculated using non-widening multiplication.
822 (1), however, needs to be calculated with an unsigned widening
823 multiplication. If this operation is not directly supported we
824 try using a signed widening multiplication and adjust the result.
825 This adjustment works as follows:
827 If both operands are positive then no adjustment is needed.
829 If the operands have different signs, for example op0_low < 0 and
830 op1_low >= 0, the instruction treats the most significant bit of
831 op0_low as a sign bit instead of a bit with significance
832 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
833 with 2**BITS_PER_WORD - op0_low, and two's complements the
834 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
837 Similarly, if both operands are negative, we need to add
838 (op0_low + op1_low) * 2**BITS_PER_WORD.
840 We use a trick to adjust quickly. We logically shift op0_low right
841 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
842 op0_high (op1_high) before it is used to calculate 2b (2a). If no
843 logical shift exists, we do an arithmetic right shift and subtract
847 expand_doubleword_mult (machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
848 bool umulp
, enum optab_methods methods
)
850 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
851 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
852 rtx wordm1
= (umulp
? NULL_RTX
853 : gen_int_shift_amount (word_mode
, BITS_PER_WORD
- 1));
854 rtx product
, adjust
, product_high
, temp
;
856 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
857 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
858 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
859 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
861 /* If we're using an unsigned multiply to directly compute the product
862 of the low-order words of the operands and perform any required
863 adjustments of the operands, we begin by trying two more multiplications
864 and then computing the appropriate sum.
866 We have checked above that the required addition is provided.
867 Full-word addition will normally always succeed, especially if
868 it is provided at all, so we don't worry about its failure. The
869 multiplication may well fail, however, so we do handle that. */
873 /* ??? This could be done with emit_store_flag where available. */
874 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
875 NULL_RTX
, 1, methods
);
877 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
878 NULL_RTX
, 0, OPTAB_DIRECT
);
881 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
882 NULL_RTX
, 0, methods
);
885 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
886 NULL_RTX
, 0, OPTAB_DIRECT
);
893 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
894 NULL_RTX
, 0, OPTAB_DIRECT
);
898 /* OP0_HIGH should now be dead. */
902 /* ??? This could be done with emit_store_flag where available. */
903 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
904 NULL_RTX
, 1, methods
);
906 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
907 NULL_RTX
, 0, OPTAB_DIRECT
);
910 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
911 NULL_RTX
, 0, methods
);
914 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
915 NULL_RTX
, 0, OPTAB_DIRECT
);
922 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
923 NULL_RTX
, 0, OPTAB_DIRECT
);
927 /* OP1_HIGH should now be dead. */
929 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
930 NULL_RTX
, 0, OPTAB_DIRECT
);
932 if (target
&& !REG_P (target
))
935 /* *_widen_optab needs to determine operand mode, make sure at least
936 one operand has non-VOID mode. */
937 if (GET_MODE (op0_low
) == VOIDmode
&& GET_MODE (op1_low
) == VOIDmode
)
938 op0_low
= force_reg (word_mode
, op0_low
);
941 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
942 target
, 1, OPTAB_DIRECT
);
944 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
945 target
, 1, OPTAB_DIRECT
);
950 product_high
= operand_subword (product
, high
, 1, mode
);
951 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
952 NULL_RTX
, 0, OPTAB_DIRECT
);
953 emit_move_insn (product_high
, adjust
);
957 /* Subroutine of expand_binop. Optimize unsigned double-word OP0 % OP1 for
958 constant OP1. If for some bit in [BITS_PER_WORD / 2, BITS_PER_WORD] range
959 (prefer higher bits) ((1w << bit) % OP1) == 1, then the modulo can be
960 computed in word-mode as ((OP0 & (bit - 1)) + ((OP0 >> bit) & (bit - 1))
961 + (OP0 >> (2 * bit))) % OP1. Whether we need to sum 2, 3 or 4 values
962 depends on the bit value, if 2, then carry from the addition needs to be
963 added too, i.e. like:
964 sum += __builtin_add_overflow (low, high, &sum)
966 Optimize signed double-word OP0 % OP1 similarly, just apply some correction
967 factor to the sum before doing unsigned remainder, in the form of
968 sum += (((signed) OP0 >> (2 * BITS_PER_WORD - 1)) & const);
969 then perform unsigned
970 remainder = sum % OP1;
972 remainder += ((signed) OP0 >> (2 * BITS_PER_WORD - 1)) & (1 - OP1); */
975 expand_doubleword_mod (machine_mode mode
, rtx op0
, rtx op1
, bool unsignedp
)
977 if (INTVAL (op1
) <= 1 || (INTVAL (op1
) & 1) == 0)
980 rtx_insn
*last
= get_last_insn ();
981 for (int bit
= BITS_PER_WORD
; bit
>= BITS_PER_WORD
/ 2; bit
--)
983 wide_int w
= wi::shifted_mask (bit
, 1, false, 2 * BITS_PER_WORD
);
984 if (wi::ne_p (wi::umod_trunc (w
, INTVAL (op1
)), 1))
986 rtx sum
= NULL_RTX
, mask
= NULL_RTX
;
987 if (bit
== BITS_PER_WORD
)
989 /* For signed modulo we need to add correction to the sum
990 and that might again overflow. */
993 if (optab_handler (uaddv4_optab
, word_mode
) == CODE_FOR_nothing
)
995 tree wtype
= lang_hooks
.types
.type_for_mode (word_mode
, 1);
996 if (wtype
== NULL_TREE
)
998 tree ctype
= build_complex_type (wtype
);
999 if (TYPE_MODE (ctype
) != GET_MODE_COMPLEX_MODE (word_mode
))
1001 machine_mode cmode
= TYPE_MODE (ctype
);
1002 rtx op00
= operand_subword_force (op0
, 0, mode
);
1003 rtx op01
= operand_subword_force (op0
, 1, mode
);
1004 rtx cres
= gen_rtx_CONCAT (cmode
, gen_reg_rtx (word_mode
),
1005 gen_reg_rtx (word_mode
));
1006 tree lhs
= make_tree (ctype
, cres
);
1007 tree arg0
= make_tree (wtype
, op00
);
1008 tree arg1
= make_tree (wtype
, op01
);
1009 expand_addsub_overflow (UNKNOWN_LOCATION
, PLUS_EXPR
, lhs
, arg0
,
1010 arg1
, true, true, true, false, NULL
);
1011 sum
= expand_simple_binop (word_mode
, PLUS
, XEXP (cres
, 0),
1012 XEXP (cres
, 1), NULL_RTX
, 1,
1014 if (sum
== NULL_RTX
)
1019 /* Code below uses GEN_INT, so we need the masks to be representable
1020 in HOST_WIDE_INTs. */
1021 if (bit
>= HOST_BITS_PER_WIDE_INT
)
1023 /* If op0 is e.g. -1 or -2 unsigned, then the 2 additions might
1024 overflow. Consider 64-bit -1ULL for word size 32, if we add
1025 0x7fffffffU + 0x7fffffffU + 3U, it wraps around to 1. */
1026 if (bit
== BITS_PER_WORD
- 1)
1029 int count
= (2 * BITS_PER_WORD
+ bit
- 1) / bit
;
1030 rtx sum_corr
= NULL_RTX
;
1034 /* For signed modulo, compute it as unsigned modulo of
1035 sum with a correction added to it if OP0 is negative,
1036 such that the result can be computed as unsigned
1037 remainder + ((OP1 >> (2 * BITS_PER_WORD - 1)) & (1 - OP1). */
1038 w
= wi::min_value (2 * BITS_PER_WORD
, SIGNED
);
1039 wide_int wmod1
= wi::umod_trunc (w
, INTVAL (op1
));
1040 wide_int wmod2
= wi::smod_trunc (w
, INTVAL (op1
));
1041 /* wmod2 == -wmod1. */
1042 wmod2
= wmod2
+ (INTVAL (op1
) - 1);
1043 if (wi::ne_p (wmod1
, wmod2
))
1045 wide_int wcorr
= wmod2
- wmod1
;
1047 wcorr
= wcorr
+ INTVAL (op1
);
1048 /* Now verify if the count sums can't overflow, and punt
1050 w
= wi::mask (bit
, false, 2 * BITS_PER_WORD
);
1051 w
= w
* (count
- 1);
1052 w
= w
+ wi::mask (2 * BITS_PER_WORD
- (count
- 1) * bit
,
1053 false, 2 * BITS_PER_WORD
);
1055 w
= wi::lrshift (w
, BITS_PER_WORD
);
1056 if (wi::ne_p (w
, 0))
1059 mask
= operand_subword_force (op0
, WORDS_BIG_ENDIAN
? 0 : 1,
1061 mask
= expand_simple_binop (word_mode
, ASHIFTRT
, mask
,
1062 GEN_INT (BITS_PER_WORD
- 1),
1063 NULL_RTX
, 0, OPTAB_DIRECT
);
1064 if (mask
== NULL_RTX
)
1066 sum_corr
= immed_wide_int_const (wcorr
, word_mode
);
1067 sum_corr
= expand_simple_binop (word_mode
, AND
, mask
,
1068 sum_corr
, NULL_RTX
, 1,
1070 if (sum_corr
== NULL_RTX
)
1075 for (int i
= 0; i
< count
; i
++)
1079 v
= expand_simple_binop (mode
, LSHIFTRT
, v
, GEN_INT (i
* bit
),
1080 NULL_RTX
, 1, OPTAB_DIRECT
);
1083 v
= lowpart_subreg (word_mode
, v
, mode
);
1087 v
= expand_simple_binop (word_mode
, AND
, v
,
1088 GEN_INT ((HOST_WIDE_INT_1U
<< bit
)
1093 if (sum
== NULL_RTX
)
1096 sum
= expand_simple_binop (word_mode
, PLUS
, sum
, v
, NULL_RTX
,
1098 if (sum
== NULL_RTX
)
1103 sum
= expand_simple_binop (word_mode
, PLUS
, sum
, sum_corr
,
1104 NULL_RTX
, 1, OPTAB_DIRECT
);
1105 if (sum
== NULL_RTX
)
1109 rtx remainder
= expand_divmod (1, TRUNC_MOD_EXPR
, word_mode
, sum
,
1110 gen_int_mode (INTVAL (op1
), word_mode
),
1111 NULL_RTX
, 1, OPTAB_DIRECT
);
1112 if (remainder
== NULL_RTX
)
1117 if (mask
== NULL_RTX
)
1119 mask
= operand_subword_force (op0
, WORDS_BIG_ENDIAN
? 0 : 1,
1121 mask
= expand_simple_binop (word_mode
, ASHIFTRT
, mask
,
1122 GEN_INT (BITS_PER_WORD
- 1),
1123 NULL_RTX
, 0, OPTAB_DIRECT
);
1124 if (mask
== NULL_RTX
)
1127 mask
= expand_simple_binop (word_mode
, AND
, mask
,
1128 gen_int_mode (1 - INTVAL (op1
),
1130 NULL_RTX
, 1, OPTAB_DIRECT
);
1131 if (mask
== NULL_RTX
)
1133 remainder
= expand_simple_binop (word_mode
, PLUS
, remainder
,
1134 mask
, NULL_RTX
, 1, OPTAB_DIRECT
);
1135 if (remainder
== NULL_RTX
)
1139 remainder
= convert_modes (mode
, word_mode
, remainder
, unsignedp
);
1140 /* Punt if we need any library calls. */
1142 last
= NEXT_INSN (last
);
1144 last
= get_insns ();
1145 for (; last
; last
= NEXT_INSN (last
))
1153 /* Similarly to the above function, but compute both quotient and remainder.
1154 Quotient can be computed from the remainder as:
1155 rem = op0 % op1; // Handled using expand_doubleword_mod
1156 quot = (op0 - rem) * inv; // inv is multiplicative inverse of op1 modulo
1157 // 2 * BITS_PER_WORD
1159 We can also handle cases where op1 is a multiple of power of two constant
1160 and constant handled by expand_doubleword_mod.
1161 op11 = 1 << __builtin_ctz (op1);
1163 rem1 = op0 % op12; // Handled using expand_doubleword_mod
1164 quot1 = (op0 - rem1) * inv; // inv is multiplicative inverse of op12 modulo
1165 // 2 * BITS_PER_WORD
1166 rem = (quot1 % op11) * op12 + rem1;
1167 quot = quot1 / op11; */
1170 expand_doubleword_divmod (machine_mode mode
, rtx op0
, rtx op1
, rtx
*rem
,
1175 /* Negative dividend should have been optimized into positive,
1176 similarly modulo by 1 and modulo by power of two is optimized
1178 if (INTVAL (op1
) <= 1 || pow2p_hwi (INTVAL (op1
)))
1181 rtx op11
= const1_rtx
;
1183 if ((INTVAL (op1
) & 1) == 0)
1185 int bit
= ctz_hwi (INTVAL (op1
));
1186 op11
= GEN_INT (HOST_WIDE_INT_1
<< bit
);
1187 op12
= GEN_INT (INTVAL (op1
) >> bit
);
1190 rtx rem1
= expand_doubleword_mod (mode
, op0
, op12
, unsignedp
);
1191 if (rem1
== NULL_RTX
)
1194 int prec
= 2 * BITS_PER_WORD
;
1195 wide_int a
= wide_int::from (INTVAL (op12
), prec
+ 1, UNSIGNED
);
1196 wide_int b
= wi::shifted_mask (prec
, 1, false, prec
+ 1);
1197 wide_int m
= wide_int::from (wi::mod_inv (a
, b
), prec
, UNSIGNED
);
1198 rtx inv
= immed_wide_int_const (m
, mode
);
1200 rtx_insn
*last
= get_last_insn ();
1201 rtx quot1
= expand_simple_binop (mode
, MINUS
, op0
, rem1
,
1202 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1203 if (quot1
== NULL_RTX
)
1206 quot1
= expand_simple_binop (mode
, MULT
, quot1
, inv
,
1207 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1208 if (quot1
== NULL_RTX
)
1211 if (op11
!= const1_rtx
)
1213 rtx rem2
= expand_divmod (1, TRUNC_MOD_EXPR
, mode
, quot1
, op11
,
1214 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1215 if (rem2
== NULL_RTX
)
1218 rem2
= expand_simple_binop (mode
, MULT
, rem2
, op12
, NULL_RTX
,
1219 unsignedp
, OPTAB_DIRECT
);
1220 if (rem2
== NULL_RTX
)
1223 rem2
= expand_simple_binop (mode
, PLUS
, rem2
, rem1
, NULL_RTX
,
1224 unsignedp
, OPTAB_DIRECT
);
1225 if (rem2
== NULL_RTX
)
1228 rtx quot2
= expand_divmod (0, TRUNC_DIV_EXPR
, mode
, quot1
, op11
,
1229 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1230 if (quot2
== NULL_RTX
)
1237 /* Punt if we need any library calls. */
1239 last
= NEXT_INSN (last
);
1241 last
= get_insns ();
1242 for (; last
; last
= NEXT_INSN (last
))
1250 /* Wrapper around expand_binop which takes an rtx code to specify
1251 the operation to perform, not an optab pointer. All other
1252 arguments are the same. */
1254 expand_simple_binop (machine_mode mode
, enum rtx_code code
, rtx op0
,
1255 rtx op1
, rtx target
, int unsignedp
,
1256 enum optab_methods methods
)
1258 optab binop
= code_to_optab (code
);
1261 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
1264 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1265 binop. Order them according to commutative_operand_precedence and, if
1266 possible, try to put TARGET or a pseudo first. */
1268 swap_commutative_operands_with_target (rtx target
, rtx op0
, rtx op1
)
1270 int op0_prec
= commutative_operand_precedence (op0
);
1271 int op1_prec
= commutative_operand_precedence (op1
);
1273 if (op0_prec
< op1_prec
)
1276 if (op0_prec
> op1_prec
)
1279 /* With equal precedence, both orders are ok, but it is better if the
1280 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1281 if (target
== 0 || REG_P (target
))
1282 return (REG_P (op1
) && !REG_P (op0
)) || target
== op1
;
1284 return rtx_equal_p (op1
, target
);
1287 /* Return true if BINOPTAB implements a shift operation. */
1290 shift_optab_p (optab binoptab
)
1292 switch (optab_to_code (binoptab
))
1308 /* Return true if BINOPTAB implements a commutative binary operation. */
1311 commutative_optab_p (optab binoptab
)
1313 return (GET_RTX_CLASS (optab_to_code (binoptab
)) == RTX_COMM_ARITH
1314 || binoptab
== smul_widen_optab
1315 || binoptab
== umul_widen_optab
1316 || binoptab
== smul_highpart_optab
1317 || binoptab
== umul_highpart_optab
1318 || binoptab
== vec_widen_sadd_optab
1319 || binoptab
== vec_widen_uadd_optab
1320 || binoptab
== vec_widen_sadd_hi_optab
1321 || binoptab
== vec_widen_sadd_lo_optab
1322 || binoptab
== vec_widen_uadd_hi_optab
1323 || binoptab
== vec_widen_uadd_lo_optab
1324 || binoptab
== vec_widen_sadd_even_optab
1325 || binoptab
== vec_widen_sadd_odd_optab
1326 || binoptab
== vec_widen_uadd_even_optab
1327 || binoptab
== vec_widen_uadd_odd_optab
);
1330 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
1331 optimizing, and if the operand is a constant that costs more than
1332 1 instruction, force the constant into a register and return that
1333 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1336 avoid_expensive_constant (machine_mode mode
, optab binoptab
,
1337 int opn
, rtx x
, bool unsignedp
)
1339 bool speed
= optimize_insn_for_speed_p ();
1341 if (mode
!= VOIDmode
1344 && (rtx_cost (x
, mode
, optab_to_code (binoptab
), opn
, speed
)
1345 > set_src_cost (x
, mode
, speed
)))
1347 if (CONST_INT_P (x
))
1349 HOST_WIDE_INT intval
= trunc_int_for_mode (INTVAL (x
), mode
);
1350 if (intval
!= INTVAL (x
))
1351 x
= GEN_INT (intval
);
1354 x
= convert_modes (mode
, VOIDmode
, x
, unsignedp
);
1355 x
= force_reg (mode
, x
);
1360 /* Helper function for expand_binop: handle the case where there
1361 is an insn ICODE that directly implements the indicated operation.
1362 Returns null if this is not possible. */
1364 expand_binop_directly (enum insn_code icode
, machine_mode mode
, optab binoptab
,
1366 rtx target
, int unsignedp
, enum optab_methods methods
,
1369 machine_mode xmode0
= insn_data
[(int) icode
].operand
[1].mode
;
1370 machine_mode xmode1
= insn_data
[(int) icode
].operand
[2].mode
;
1371 machine_mode mode0
, mode1
, tmp_mode
;
1372 class expand_operand ops
[3];
1375 rtx xop0
= op0
, xop1
= op1
;
1376 bool canonicalize_op1
= false;
1378 /* If it is a commutative operator and the modes would match
1379 if we would swap the operands, we can save the conversions. */
1380 commutative_p
= commutative_optab_p (binoptab
);
1382 && GET_MODE (xop0
) != xmode0
&& GET_MODE (xop1
) != xmode1
1383 && GET_MODE (xop0
) == xmode1
&& GET_MODE (xop1
) == xmode0
)
1384 std::swap (xop0
, xop1
);
1386 /* If we are optimizing, force expensive constants into a register. */
1387 xop0
= avoid_expensive_constant (xmode0
, binoptab
, 0, xop0
, unsignedp
);
1388 if (!shift_optab_p (binoptab
))
1389 xop1
= avoid_expensive_constant (xmode1
, binoptab
, 1, xop1
, unsignedp
);
1391 /* Shifts and rotates often use a different mode for op1 from op0;
1392 for VOIDmode constants we don't know the mode, so force it
1393 to be canonicalized using convert_modes. */
1394 canonicalize_op1
= true;
1396 /* In case the insn wants input operands in modes different from
1397 those of the actual operands, convert the operands. It would
1398 seem that we don't need to convert CONST_INTs, but we do, so
1399 that they're properly zero-extended, sign-extended or truncated
1402 mode0
= GET_MODE (xop0
) != VOIDmode
? GET_MODE (xop0
) : mode
;
1403 if (xmode0
!= VOIDmode
&& xmode0
!= mode0
)
1405 xop0
= convert_modes (xmode0
, mode0
, xop0
, unsignedp
);
1409 mode1
= ((GET_MODE (xop1
) != VOIDmode
|| canonicalize_op1
)
1410 ? GET_MODE (xop1
) : mode
);
1411 if (xmode1
!= VOIDmode
&& xmode1
!= mode1
)
1413 xop1
= convert_modes (xmode1
, mode1
, xop1
, unsignedp
);
1417 /* If operation is commutative,
1418 try to make the first operand a register.
1419 Even better, try to make it the same as the target.
1420 Also try to make the last operand a constant. */
1422 && swap_commutative_operands_with_target (target
, xop0
, xop1
))
1423 std::swap (xop0
, xop1
);
1425 /* Now, if insn's predicates don't allow our operands, put them into
1428 if (binoptab
== vec_pack_trunc_optab
1429 || binoptab
== vec_pack_usat_optab
1430 || binoptab
== vec_pack_ssat_optab
1431 || binoptab
== vec_pack_ufix_trunc_optab
1432 || binoptab
== vec_pack_sfix_trunc_optab
1433 || binoptab
== vec_packu_float_optab
1434 || binoptab
== vec_packs_float_optab
)
1436 /* The mode of the result is different then the mode of the
1438 tmp_mode
= insn_data
[(int) icode
].operand
[0].mode
;
1439 if (VECTOR_MODE_P (mode
)
1440 && maybe_ne (GET_MODE_NUNITS (tmp_mode
), 2 * GET_MODE_NUNITS (mode
)))
1442 delete_insns_since (last
);
1449 create_output_operand (&ops
[0], target
, tmp_mode
);
1450 create_input_operand (&ops
[1], xop0
, mode0
);
1451 create_input_operand (&ops
[2], xop1
, mode1
);
1452 pat
= maybe_gen_insn (icode
, 3, ops
);
1455 /* If PAT is composed of more than one insn, try to add an appropriate
1456 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1457 operand, call expand_binop again, this time without a target. */
1458 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1459 && ! add_equal_note (pat
, ops
[0].value
,
1460 optab_to_code (binoptab
),
1461 ops
[1].value
, ops
[2].value
, mode0
))
1463 delete_insns_since (last
);
1464 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1465 unsignedp
, methods
);
1469 return ops
[0].value
;
1471 delete_insns_since (last
);
1475 /* Generate code to perform an operation specified by BINOPTAB
1476 on operands OP0 and OP1, with result having machine-mode MODE.
1478 UNSIGNEDP is for the case where we have to widen the operands
1479 to perform the operation. It says to use zero-extension.
1481 If TARGET is nonzero, the value
1482 is generated there, if it is convenient to do so.
1483 In all cases an rtx is returned for the locus of the value;
1484 this may or may not be TARGET. */
1487 expand_binop (machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1488 rtx target
, int unsignedp
, enum optab_methods methods
)
1490 enum optab_methods next_methods
1491 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1492 ? OPTAB_WIDEN
: methods
);
1493 enum mode_class mclass
;
1494 enum insn_code icode
;
1495 machine_mode wider_mode
;
1496 scalar_int_mode int_mode
;
1499 rtx_insn
*entry_last
= get_last_insn ();
1502 mclass
= GET_MODE_CLASS (mode
);
1504 /* If subtracting an integer constant, convert this into an addition of
1505 the negated constant. */
1507 if (binoptab
== sub_optab
&& CONST_INT_P (op1
))
1509 op1
= negate_rtx (mode
, op1
);
1510 binoptab
= add_optab
;
1512 /* For shifts, constant invalid op1 might be expanded from different
1513 mode than MODE. As those are invalid, force them to a register
1514 to avoid further problems during expansion. */
1515 else if (CONST_INT_P (op1
)
1516 && shift_optab_p (binoptab
)
1517 && UINTVAL (op1
) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode
)))
1519 op1
= gen_int_mode (INTVAL (op1
), GET_MODE_INNER (mode
));
1520 op1
= force_reg (GET_MODE_INNER (mode
), op1
);
1523 /* Record where to delete back to if we backtrack. */
1524 last
= get_last_insn ();
1526 /* If we can do it with a three-operand insn, do so. */
1528 if (methods
!= OPTAB_MUST_WIDEN
)
1530 if (convert_optab_p (binoptab
))
1532 machine_mode from_mode
= widened_mode (mode
, op0
, op1
);
1533 icode
= find_widening_optab_handler (binoptab
, mode
, from_mode
);
1536 icode
= optab_handler (binoptab
, mode
);
1537 if (icode
!= CODE_FOR_nothing
)
1539 temp
= expand_binop_directly (icode
, mode
, binoptab
, op0
, op1
,
1540 target
, unsignedp
, methods
, last
);
1546 /* If we were trying to rotate, and that didn't work, try rotating
1547 the other direction before falling back to shifts and bitwise-or. */
1548 if (((binoptab
== rotl_optab
1549 && (icode
= optab_handler (rotr_optab
, mode
)) != CODE_FOR_nothing
)
1550 || (binoptab
== rotr_optab
1551 && (icode
= optab_handler (rotl_optab
, mode
)) != CODE_FOR_nothing
))
1552 && is_int_mode (mode
, &int_mode
))
1554 optab otheroptab
= (binoptab
== rotl_optab
? rotr_optab
: rotl_optab
);
1556 unsigned int bits
= GET_MODE_PRECISION (int_mode
);
1558 if (CONST_INT_P (op1
))
1559 newop1
= gen_int_shift_amount (int_mode
, bits
- INTVAL (op1
));
1560 else if (targetm
.shift_truncation_mask (int_mode
) == bits
- 1)
1561 newop1
= negate_rtx (GET_MODE (op1
), op1
);
1563 newop1
= expand_binop (GET_MODE (op1
), sub_optab
,
1564 gen_int_mode (bits
, GET_MODE (op1
)), op1
,
1565 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1567 temp
= expand_binop_directly (icode
, int_mode
, otheroptab
, op0
, newop1
,
1568 target
, unsignedp
, methods
, last
);
1573 /* If this is a multiply, see if we can do a widening operation that
1574 takes operands of this mode and makes a wider mode. */
1576 if (binoptab
== smul_optab
1577 && GET_MODE_2XWIDER_MODE (mode
).exists (&wider_mode
)
1578 && (convert_optab_handler ((unsignedp
1580 : smul_widen_optab
),
1581 wider_mode
, mode
) != CODE_FOR_nothing
))
1583 /* *_widen_optab needs to determine operand mode, make sure at least
1584 one operand has non-VOID mode. */
1585 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
1586 op0
= force_reg (mode
, op0
);
1587 temp
= expand_binop (wider_mode
,
1588 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1589 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1593 if (GET_MODE_CLASS (mode
) == MODE_INT
1594 && TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (temp
)))
1595 return gen_lowpart (mode
, temp
);
1597 return convert_to_mode (mode
, temp
, unsignedp
);
1601 /* If this is a vector shift by a scalar, see if we can do a vector
1602 shift by a vector. If so, broadcast the scalar into a vector. */
1603 if (mclass
== MODE_VECTOR_INT
)
1605 optab otheroptab
= unknown_optab
;
1607 if (binoptab
== ashl_optab
)
1608 otheroptab
= vashl_optab
;
1609 else if (binoptab
== ashr_optab
)
1610 otheroptab
= vashr_optab
;
1611 else if (binoptab
== lshr_optab
)
1612 otheroptab
= vlshr_optab
;
1613 else if (binoptab
== rotl_optab
)
1614 otheroptab
= vrotl_optab
;
1615 else if (binoptab
== rotr_optab
)
1616 otheroptab
= vrotr_optab
;
1619 && (icode
= optab_handler (otheroptab
, mode
)) != CODE_FOR_nothing
)
1621 /* The scalar may have been extended to be too wide. Truncate
1622 it back to the proper size to fit in the broadcast vector. */
1623 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
1624 if (!CONST_INT_P (op1
)
1625 && (GET_MODE_BITSIZE (as_a
<scalar_int_mode
> (GET_MODE (op1
)))
1626 > GET_MODE_BITSIZE (inner_mode
)))
1627 op1
= force_reg (inner_mode
,
1628 simplify_gen_unary (TRUNCATE
, inner_mode
, op1
,
1630 rtx vop1
= expand_vector_broadcast (mode
, op1
);
1633 temp
= expand_binop_directly (icode
, mode
, otheroptab
, op0
, vop1
,
1634 target
, unsignedp
, methods
, last
);
1641 /* Look for a wider mode of the same class for which we think we
1642 can open-code the operation. Check for a widening multiply at the
1643 wider mode as well. */
1645 if (CLASS_HAS_WIDER_MODES_P (mclass
)
1646 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1647 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
1649 machine_mode next_mode
;
1650 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
1651 || (binoptab
== smul_optab
1652 && GET_MODE_WIDER_MODE (wider_mode
).exists (&next_mode
)
1653 && (find_widening_optab_handler ((unsignedp
1655 : smul_widen_optab
),
1657 != CODE_FOR_nothing
)))
1659 rtx xop0
= op0
, xop1
= op1
;
1660 bool no_extend
= false;
1662 /* For certain integer operations, we need not actually extend
1663 the narrow operands, as long as we will truncate
1664 the results to the same narrowness. */
1666 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1667 || binoptab
== xor_optab
1668 || binoptab
== add_optab
|| binoptab
== sub_optab
1669 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1670 && mclass
== MODE_INT
)
1673 xop0
= avoid_expensive_constant (mode
, binoptab
, 0,
1675 if (binoptab
!= ashl_optab
)
1676 xop1
= avoid_expensive_constant (mode
, binoptab
, 1,
1680 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1682 /* The second operand of a shift must always be extended. */
1683 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1684 no_extend
&& binoptab
!= ashl_optab
);
1686 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1687 unsignedp
, OPTAB_DIRECT
);
1690 if (mclass
!= MODE_INT
1691 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
1694 target
= gen_reg_rtx (mode
);
1695 convert_move (target
, temp
, 0);
1699 return gen_lowpart (mode
, temp
);
1702 delete_insns_since (last
);
1706 /* If operation is commutative,
1707 try to make the first operand a register.
1708 Even better, try to make it the same as the target.
1709 Also try to make the last operand a constant. */
1710 if (commutative_optab_p (binoptab
)
1711 && swap_commutative_operands_with_target (target
, op0
, op1
))
1712 std::swap (op0
, op1
);
1714 /* These can be done a word at a time. */
1715 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1716 && is_int_mode (mode
, &int_mode
)
1717 && GET_MODE_SIZE (int_mode
) > UNITS_PER_WORD
1718 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1723 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1724 won't be accurate, so use a new target. */
1728 || reg_overlap_mentioned_p (target
, op0
)
1729 || reg_overlap_mentioned_p (target
, op1
)
1730 || !valid_multiword_target_p (target
))
1731 target
= gen_reg_rtx (int_mode
);
1735 /* Do the actual arithmetic. */
1736 machine_mode op0_mode
= GET_MODE (op0
);
1737 machine_mode op1_mode
= GET_MODE (op1
);
1738 if (op0_mode
== VOIDmode
)
1739 op0_mode
= int_mode
;
1740 if (op1_mode
== VOIDmode
)
1741 op1_mode
= int_mode
;
1742 for (i
= 0; i
< GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
; i
++)
1744 rtx target_piece
= operand_subword (target
, i
, 1, int_mode
);
1745 rtx x
= expand_binop (word_mode
, binoptab
,
1746 operand_subword_force (op0
, i
, op0_mode
),
1747 operand_subword_force (op1
, i
, op1_mode
),
1748 target_piece
, unsignedp
, next_methods
);
1753 if (target_piece
!= x
)
1754 emit_move_insn (target_piece
, x
);
1757 insns
= get_insns ();
1760 if (i
== GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
)
1767 /* Synthesize double word shifts from single word shifts. */
1768 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1769 || binoptab
== ashr_optab
)
1770 && is_int_mode (mode
, &int_mode
)
1771 && (CONST_INT_P (op1
) || optimize_insn_for_speed_p ())
1772 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
1773 && GET_MODE_PRECISION (int_mode
) == GET_MODE_BITSIZE (int_mode
)
1774 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
1775 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1776 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1778 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1779 scalar_int_mode op1_mode
;
1781 double_shift_mask
= targetm
.shift_truncation_mask (int_mode
);
1782 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1783 op1_mode
= (GET_MODE (op1
) != VOIDmode
1784 ? as_a
<scalar_int_mode
> (GET_MODE (op1
))
1787 /* Apply the truncation to constant shifts. */
1788 if (double_shift_mask
> 0 && CONST_INT_P (op1
))
1789 op1
= gen_int_mode (INTVAL (op1
) & double_shift_mask
, op1_mode
);
1791 if (op1
== CONST0_RTX (op1_mode
))
1794 /* Make sure that this is a combination that expand_doubleword_shift
1795 can handle. See the comments there for details. */
1796 if (double_shift_mask
== 0
1797 || (shift_mask
== BITS_PER_WORD
- 1
1798 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1801 rtx into_target
, outof_target
;
1802 rtx into_input
, outof_input
;
1803 int left_shift
, outof_word
;
1805 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1806 won't be accurate, so use a new target. */
1810 || reg_overlap_mentioned_p (target
, op0
)
1811 || reg_overlap_mentioned_p (target
, op1
)
1812 || !valid_multiword_target_p (target
))
1813 target
= gen_reg_rtx (int_mode
);
1817 /* OUTOF_* is the word we are shifting bits away from, and
1818 INTO_* is the word that we are shifting bits towards, thus
1819 they differ depending on the direction of the shift and
1820 WORDS_BIG_ENDIAN. */
1822 left_shift
= binoptab
== ashl_optab
;
1823 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1825 outof_target
= operand_subword (target
, outof_word
, 1, int_mode
);
1826 into_target
= operand_subword (target
, 1 - outof_word
, 1, int_mode
);
1828 outof_input
= operand_subword_force (op0
, outof_word
, int_mode
);
1829 into_input
= operand_subword_force (op0
, 1 - outof_word
, int_mode
);
1831 if (expand_doubleword_shift (op1_mode
, binoptab
,
1832 outof_input
, into_input
, op1
,
1833 outof_target
, into_target
,
1834 unsignedp
, next_methods
, shift_mask
))
1836 insns
= get_insns ();
1846 /* Synthesize double word rotates from single word shifts. */
1847 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1848 && is_int_mode (mode
, &int_mode
)
1849 && CONST_INT_P (op1
)
1850 && GET_MODE_PRECISION (int_mode
) == 2 * BITS_PER_WORD
1851 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1852 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1855 rtx into_target
, outof_target
;
1856 rtx into_input
, outof_input
;
1858 int shift_count
, left_shift
, outof_word
;
1860 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1861 won't be accurate, so use a new target. Do this also if target is not
1862 a REG, first because having a register instead may open optimization
1863 opportunities, and second because if target and op0 happen to be MEMs
1864 designating the same location, we would risk clobbering it too early
1865 in the code sequence we generate below. */
1870 || reg_overlap_mentioned_p (target
, op0
)
1871 || reg_overlap_mentioned_p (target
, op1
)
1872 || !valid_multiword_target_p (target
))
1873 target
= gen_reg_rtx (int_mode
);
1877 shift_count
= INTVAL (op1
);
1879 /* OUTOF_* is the word we are shifting bits away from, and
1880 INTO_* is the word that we are shifting bits towards, thus
1881 they differ depending on the direction of the shift and
1882 WORDS_BIG_ENDIAN. */
1884 left_shift
= (binoptab
== rotl_optab
);
1885 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1887 outof_target
= operand_subword (target
, outof_word
, 1, int_mode
);
1888 into_target
= operand_subword (target
, 1 - outof_word
, 1, int_mode
);
1890 outof_input
= operand_subword_force (op0
, outof_word
, int_mode
);
1891 into_input
= operand_subword_force (op0
, 1 - outof_word
, int_mode
);
1893 if (shift_count
== BITS_PER_WORD
)
1895 /* This is just a word swap. */
1896 emit_move_insn (outof_target
, into_input
);
1897 emit_move_insn (into_target
, outof_input
);
1902 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1903 HOST_WIDE_INT first_shift_count
, second_shift_count
;
1904 optab reverse_unsigned_shift
, unsigned_shift
;
1906 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1907 ? lshr_optab
: ashl_optab
);
1909 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1910 ? ashl_optab
: lshr_optab
);
1912 if (shift_count
> BITS_PER_WORD
)
1914 first_shift_count
= shift_count
- BITS_PER_WORD
;
1915 second_shift_count
= 2 * BITS_PER_WORD
- shift_count
;
1919 first_shift_count
= BITS_PER_WORD
- shift_count
;
1920 second_shift_count
= shift_count
;
1922 rtx first_shift_count_rtx
1923 = gen_int_shift_amount (word_mode
, first_shift_count
);
1924 rtx second_shift_count_rtx
1925 = gen_int_shift_amount (word_mode
, second_shift_count
);
1927 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1928 outof_input
, first_shift_count_rtx
,
1929 NULL_RTX
, unsignedp
, next_methods
);
1930 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1931 into_input
, second_shift_count_rtx
,
1932 NULL_RTX
, unsignedp
, next_methods
);
1934 if (into_temp1
!= 0 && into_temp2
!= 0)
1935 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1936 into_target
, unsignedp
, next_methods
);
1940 if (inter
!= 0 && inter
!= into_target
)
1941 emit_move_insn (into_target
, inter
);
1943 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1944 into_input
, first_shift_count_rtx
,
1945 NULL_RTX
, unsignedp
, next_methods
);
1946 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1947 outof_input
, second_shift_count_rtx
,
1948 NULL_RTX
, unsignedp
, next_methods
);
1950 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1951 inter
= expand_binop (word_mode
, ior_optab
,
1952 outof_temp1
, outof_temp2
,
1953 outof_target
, unsignedp
, next_methods
);
1955 if (inter
!= 0 && inter
!= outof_target
)
1956 emit_move_insn (outof_target
, inter
);
1959 insns
= get_insns ();
1969 /* These can be done a word at a time by propagating carries. */
1970 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1971 && is_int_mode (mode
, &int_mode
)
1972 && GET_MODE_SIZE (int_mode
) >= 2 * UNITS_PER_WORD
1973 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1976 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1977 const unsigned int nwords
= GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
;
1978 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1979 rtx xop0
, xop1
, xtarget
;
1981 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1982 value is one of those, use it. Otherwise, use 1 since it is the
1983 one easiest to get. */
1984 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1985 int normalizep
= STORE_FLAG_VALUE
;
1990 /* Prepare the operands. */
1991 xop0
= force_reg (int_mode
, op0
);
1992 xop1
= force_reg (int_mode
, op1
);
1994 xtarget
= gen_reg_rtx (int_mode
);
1996 if (target
== 0 || !REG_P (target
) || !valid_multiword_target_p (target
))
1999 /* Indicate for flow that the entire target reg is being set. */
2001 emit_clobber (xtarget
);
2003 /* Do the actual arithmetic. */
2004 for (i
= 0; i
< nwords
; i
++)
2006 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
2007 rtx target_piece
= operand_subword (xtarget
, index
, 1, int_mode
);
2008 rtx op0_piece
= operand_subword_force (xop0
, index
, int_mode
);
2009 rtx op1_piece
= operand_subword_force (xop1
, index
, int_mode
);
2012 /* Main add/subtract of the input operands. */
2013 x
= expand_binop (word_mode
, binoptab
,
2014 op0_piece
, op1_piece
,
2015 target_piece
, unsignedp
, next_methods
);
2021 /* Store carry from main add/subtract. */
2022 carry_out
= gen_reg_rtx (word_mode
);
2023 carry_out
= emit_store_flag_force (carry_out
,
2024 (binoptab
== add_optab
2027 word_mode
, 1, normalizep
);
2034 /* Add/subtract previous carry to main result. */
2035 newx
= expand_binop (word_mode
,
2036 normalizep
== 1 ? binoptab
: otheroptab
,
2038 NULL_RTX
, 1, next_methods
);
2042 /* Get out carry from adding/subtracting carry in. */
2043 rtx carry_tmp
= gen_reg_rtx (word_mode
);
2044 carry_tmp
= emit_store_flag_force (carry_tmp
,
2045 (binoptab
== add_optab
2048 word_mode
, 1, normalizep
);
2050 /* Logical-ior the two poss. carry together. */
2051 carry_out
= expand_binop (word_mode
, ior_optab
,
2052 carry_out
, carry_tmp
,
2053 carry_out
, 0, next_methods
);
2057 emit_move_insn (target_piece
, newx
);
2061 if (x
!= target_piece
)
2062 emit_move_insn (target_piece
, x
);
2065 carry_in
= carry_out
;
2068 if (i
== GET_MODE_BITSIZE (int_mode
) / (unsigned) BITS_PER_WORD
)
2070 if (optab_handler (mov_optab
, int_mode
) != CODE_FOR_nothing
2071 || ! rtx_equal_p (target
, xtarget
))
2073 rtx_insn
*temp
= emit_move_insn (target
, xtarget
);
2075 set_dst_reg_note (temp
, REG_EQUAL
,
2076 gen_rtx_fmt_ee (optab_to_code (binoptab
),
2077 int_mode
, copy_rtx (xop0
),
2088 delete_insns_since (last
);
2091 /* Attempt to synthesize double word multiplies using a sequence of word
2092 mode multiplications. We first attempt to generate a sequence using a
2093 more efficient unsigned widening multiply, and if that fails we then
2094 try using a signed widening multiply. */
2096 if (binoptab
== smul_optab
2097 && is_int_mode (mode
, &int_mode
)
2098 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
2099 && optab_handler (smul_optab
, word_mode
) != CODE_FOR_nothing
2100 && optab_handler (add_optab
, word_mode
) != CODE_FOR_nothing
)
2102 rtx product
= NULL_RTX
;
2103 if (convert_optab_handler (umul_widen_optab
, int_mode
, word_mode
)
2104 != CODE_FOR_nothing
)
2106 product
= expand_doubleword_mult (int_mode
, op0
, op1
, target
,
2109 delete_insns_since (last
);
2112 if (product
== NULL_RTX
2113 && (convert_optab_handler (smul_widen_optab
, int_mode
, word_mode
)
2114 != CODE_FOR_nothing
))
2116 product
= expand_doubleword_mult (int_mode
, op0
, op1
, target
,
2119 delete_insns_since (last
);
2122 if (product
!= NULL_RTX
)
2124 if (optab_handler (mov_optab
, int_mode
) != CODE_FOR_nothing
)
2126 rtx_insn
*move
= emit_move_insn (target
? target
: product
,
2128 set_dst_reg_note (move
,
2130 gen_rtx_fmt_ee (MULT
, int_mode
,
2133 target
? target
: product
);
2139 /* Attempt to synthetize double word modulo by constant divisor. */
2140 if ((binoptab
== umod_optab
2141 || binoptab
== smod_optab
2142 || binoptab
== udiv_optab
2143 || binoptab
== sdiv_optab
)
2145 && CONST_INT_P (op1
)
2146 && is_int_mode (mode
, &int_mode
)
2147 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
2148 && optab_handler ((binoptab
== umod_optab
|| binoptab
== udiv_optab
)
2149 ? udivmod_optab
: sdivmod_optab
,
2150 int_mode
) == CODE_FOR_nothing
2151 && optab_handler (and_optab
, word_mode
) != CODE_FOR_nothing
2152 && optab_handler (add_optab
, word_mode
) != CODE_FOR_nothing
2153 && optimize_insn_for_speed_p ())
2156 if ((binoptab
== umod_optab
|| binoptab
== smod_optab
)
2157 && (INTVAL (op1
) & 1) == 0)
2158 res
= expand_doubleword_mod (int_mode
, op0
, op1
,
2159 binoptab
== umod_optab
);
2162 rtx quot
= expand_doubleword_divmod (int_mode
, op0
, op1
, &res
,
2163 binoptab
== umod_optab
2164 || binoptab
== udiv_optab
);
2165 if (quot
== NULL_RTX
)
2167 else if (binoptab
== udiv_optab
|| binoptab
== sdiv_optab
)
2170 if (res
!= NULL_RTX
)
2172 if (optab_handler (mov_optab
, int_mode
) != CODE_FOR_nothing
)
2174 rtx_insn
*move
= emit_move_insn (target
? target
: res
,
2176 set_dst_reg_note (move
, REG_EQUAL
,
2177 gen_rtx_fmt_ee (optab_to_code (binoptab
),
2178 int_mode
, copy_rtx (op0
), op1
),
2179 target
? target
: res
);
2184 delete_insns_since (last
);
2187 /* It can't be open-coded in this mode.
2188 Use a library call if one is available and caller says that's ok. */
2190 libfunc
= optab_libfunc (binoptab
, mode
);
2192 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
2196 machine_mode op1_mode
= mode
;
2201 if (shift_optab_p (binoptab
))
2203 op1_mode
= targetm
.libgcc_shift_count_mode ();
2204 /* Specify unsigned here,
2205 since negative shift counts are meaningless. */
2206 op1x
= convert_to_mode (op1_mode
, op1
, 1);
2209 if (GET_MODE (op0
) != VOIDmode
2210 && GET_MODE (op0
) != mode
)
2211 op0
= convert_to_mode (mode
, op0
, unsignedp
);
2213 /* Pass 1 for NO_QUEUE so we don't lose any increments
2214 if the libcall is cse'd or moved. */
2215 value
= emit_library_call_value (libfunc
,
2216 NULL_RTX
, LCT_CONST
, mode
,
2217 op0
, mode
, op1x
, op1_mode
);
2219 insns
= get_insns ();
2222 bool trapv
= trapv_binoptab_p (binoptab
);
2223 target
= gen_reg_rtx (mode
);
2224 emit_libcall_block_1 (insns
, target
, value
,
2226 : gen_rtx_fmt_ee (optab_to_code (binoptab
),
2227 mode
, op0
, op1
), trapv
);
2232 delete_insns_since (last
);
2234 /* It can't be done in this mode. Can we do it in a wider mode? */
2236 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
2237 || methods
== OPTAB_MUST_WIDEN
))
2239 /* Caller says, don't even try. */
2240 delete_insns_since (entry_last
);
2244 /* Compute the value of METHODS to pass to recursive calls.
2245 Don't allow widening to be tried recursively. */
2247 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
2249 /* Look for a wider mode of the same class for which it appears we can do
2252 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2254 /* This code doesn't make sense for conversion optabs, since we
2255 wouldn't then want to extend the operands to be the same size
2257 gcc_assert (!convert_optab_p (binoptab
));
2258 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2260 if (optab_handler (binoptab
, wider_mode
)
2261 || (methods
== OPTAB_LIB
2262 && optab_libfunc (binoptab
, wider_mode
)))
2264 rtx xop0
= op0
, xop1
= op1
;
2265 bool no_extend
= false;
2267 /* For certain integer operations, we need not actually extend
2268 the narrow operands, as long as we will truncate
2269 the results to the same narrowness. */
2271 if ((binoptab
== ior_optab
|| binoptab
== and_optab
2272 || binoptab
== xor_optab
2273 || binoptab
== add_optab
|| binoptab
== sub_optab
2274 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
2275 && mclass
== MODE_INT
)
2278 xop0
= widen_operand (xop0
, wider_mode
, mode
,
2279 unsignedp
, no_extend
);
2281 /* The second operand of a shift must always be extended. */
2282 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
2283 no_extend
&& binoptab
!= ashl_optab
);
2285 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
2286 unsignedp
, methods
);
2289 if (mclass
!= MODE_INT
2290 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2293 target
= gen_reg_rtx (mode
);
2294 convert_move (target
, temp
, 0);
2298 return gen_lowpart (mode
, temp
);
2301 delete_insns_since (last
);
2306 delete_insns_since (entry_last
);
2310 /* Expand a binary operator which has both signed and unsigned forms.
2311 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2314 If we widen unsigned operands, we may use a signed wider operation instead
2315 of an unsigned wider operation, since the result would be the same. */
2318 sign_expand_binop (machine_mode mode
, optab uoptab
, optab soptab
,
2319 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
2320 enum optab_methods methods
)
2323 optab direct_optab
= unsignedp
? uoptab
: soptab
;
2326 /* Do it without widening, if possible. */
2327 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2328 unsignedp
, OPTAB_DIRECT
);
2329 if (temp
|| methods
== OPTAB_DIRECT
)
2332 /* Try widening to a signed int. Disable any direct use of any
2333 signed insn in the current mode. */
2334 save_enable
= swap_optab_enable (soptab
, mode
, false);
2336 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
2337 unsignedp
, OPTAB_WIDEN
);
2339 /* For unsigned operands, try widening to an unsigned int. */
2340 if (!temp
&& unsignedp
)
2341 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2342 unsignedp
, OPTAB_WIDEN
);
2343 if (temp
|| methods
== OPTAB_WIDEN
)
2346 /* Use the right width libcall if that exists. */
2347 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2348 unsignedp
, OPTAB_LIB
);
2349 if (temp
|| methods
== OPTAB_LIB
)
2352 /* Must widen and use a libcall, use either signed or unsigned. */
2353 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
2354 unsignedp
, methods
);
2355 if (!temp
&& unsignedp
)
2356 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2357 unsignedp
, methods
);
2360 /* Undo the fiddling above. */
2362 swap_optab_enable (soptab
, mode
, true);
2366 /* Generate code to perform an operation specified by UNOPPTAB
2367 on operand OP0, with two results to TARG0 and TARG1.
2368 We assume that the order of the operands for the instruction
2369 is TARG0, TARG1, OP0.
2371 Either TARG0 or TARG1 may be zero, but what that means is that
2372 the result is not actually wanted. We will generate it into
2373 a dummy pseudo-reg and discard it. They may not both be zero.
2375 Returns true if this operation can be performed; false if not. */
2378 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
2381 machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2382 enum mode_class mclass
;
2383 machine_mode wider_mode
;
2384 rtx_insn
*entry_last
= get_last_insn ();
2387 mclass
= GET_MODE_CLASS (mode
);
2390 targ0
= gen_reg_rtx (mode
);
2392 targ1
= gen_reg_rtx (mode
);
2394 /* Record where to go back to if we fail. */
2395 last
= get_last_insn ();
2397 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
2399 class expand_operand ops
[3];
2400 enum insn_code icode
= optab_handler (unoptab
, mode
);
2402 create_fixed_operand (&ops
[0], targ0
);
2403 create_fixed_operand (&ops
[1], targ1
);
2404 create_convert_operand_from (&ops
[2], op0
, mode
, unsignedp
);
2405 if (maybe_expand_insn (icode
, 3, ops
))
2409 /* It can't be done in this mode. Can we do it in a wider mode? */
2411 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2413 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2415 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2417 rtx t0
= gen_reg_rtx (wider_mode
);
2418 rtx t1
= gen_reg_rtx (wider_mode
);
2419 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2421 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
2423 convert_move (targ0
, t0
, unsignedp
);
2424 convert_move (targ1
, t1
, unsignedp
);
2428 delete_insns_since (last
);
2433 delete_insns_since (entry_last
);
2437 /* Generate code to perform an operation specified by BINOPTAB
2438 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2439 We assume that the order of the operands for the instruction
2440 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2441 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2443 Either TARG0 or TARG1 may be zero, but what that means is that
2444 the result is not actually wanted. We will generate it into
2445 a dummy pseudo-reg and discard it. They may not both be zero.
2447 Returns true if this operation can be performed; false if not. */
2450 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
2453 machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2454 enum mode_class mclass
;
2455 machine_mode wider_mode
;
2456 rtx_insn
*entry_last
= get_last_insn ();
2459 mclass
= GET_MODE_CLASS (mode
);
2462 targ0
= gen_reg_rtx (mode
);
2464 targ1
= gen_reg_rtx (mode
);
2466 /* Record where to go back to if we fail. */
2467 last
= get_last_insn ();
2469 if (optab_handler (binoptab
, mode
) != CODE_FOR_nothing
)
2471 class expand_operand ops
[4];
2472 enum insn_code icode
= optab_handler (binoptab
, mode
);
2473 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2474 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2475 rtx xop0
= op0
, xop1
= op1
;
2477 /* If we are optimizing, force expensive constants into a register. */
2478 xop0
= avoid_expensive_constant (mode0
, binoptab
, 0, xop0
, unsignedp
);
2479 xop1
= avoid_expensive_constant (mode1
, binoptab
, 1, xop1
, unsignedp
);
2481 create_fixed_operand (&ops
[0], targ0
);
2482 create_convert_operand_from (&ops
[1], xop0
, mode
, unsignedp
);
2483 create_convert_operand_from (&ops
[2], xop1
, mode
, unsignedp
);
2484 create_fixed_operand (&ops
[3], targ1
);
2485 if (maybe_expand_insn (icode
, 4, ops
))
2487 delete_insns_since (last
);
2490 /* It can't be done in this mode. Can we do it in a wider mode? */
2492 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2494 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2496 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
)
2498 rtx t0
= gen_reg_rtx (wider_mode
);
2499 rtx t1
= gen_reg_rtx (wider_mode
);
2500 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2501 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2503 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2506 convert_move (targ0
, t0
, unsignedp
);
2507 convert_move (targ1
, t1
, unsignedp
);
2511 delete_insns_since (last
);
2516 delete_insns_since (entry_last
);
2520 /* Expand the two-valued library call indicated by BINOPTAB, but
2521 preserve only one of the values. If TARG0 is non-NULL, the first
2522 value is placed into TARG0; otherwise the second value is placed
2523 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2524 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2525 This routine assumes that the value returned by the library call is
2526 as if the return value was of an integral mode twice as wide as the
2527 mode of OP0. Returns 1 if the call was successful. */
2530 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2531 rtx targ0
, rtx targ1
, enum rtx_code code
)
2534 machine_mode libval_mode
;
2539 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2540 gcc_assert (!targ0
!= !targ1
);
2542 mode
= GET_MODE (op0
);
2543 libfunc
= optab_libfunc (binoptab
, mode
);
2547 /* The value returned by the library function will have twice as
2548 many bits as the nominal MODE. */
2549 libval_mode
= smallest_int_mode_for_size (2 * GET_MODE_BITSIZE (mode
));
2551 libval
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
2555 /* Get the part of VAL containing the value that we want. */
2556 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2557 targ0
? 0 : GET_MODE_SIZE (mode
));
2558 insns
= get_insns ();
2560 /* Move the into the desired location. */
2561 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2562 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2568 /* Wrapper around expand_unop which takes an rtx code to specify
2569 the operation to perform, not an optab pointer. All other
2570 arguments are the same. */
2572 expand_simple_unop (machine_mode mode
, enum rtx_code code
, rtx op0
,
2573 rtx target
, int unsignedp
)
2575 optab unop
= code_to_optab (code
);
2578 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2584 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2586 A similar operation can be used for clrsb. UNOPTAB says which operation
2587 we are trying to expand. */
2589 widen_leading (scalar_int_mode mode
, rtx op0
, rtx target
, optab unoptab
)
2591 opt_scalar_int_mode wider_mode_iter
;
2592 FOR_EACH_WIDER_MODE (wider_mode_iter
, mode
)
2594 scalar_int_mode wider_mode
= wider_mode_iter
.require ();
2595 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2600 last
= get_last_insn ();
2603 target
= gen_reg_rtx (mode
);
2604 xop0
= widen_operand (op0
, wider_mode
, mode
,
2605 unoptab
!= clrsb_optab
, false);
2606 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2607 unoptab
!= clrsb_optab
);
2610 (wider_mode
, sub_optab
, temp
,
2611 gen_int_mode (GET_MODE_PRECISION (wider_mode
)
2612 - GET_MODE_PRECISION (mode
),
2614 target
, true, OPTAB_DIRECT
);
2616 delete_insns_since (last
);
2624 /* Attempt to emit (clrsb:mode op0) as
2625 (plus:mode (clz:mode (xor:mode op0 (ashr:mode op0 (const_int prec-1))))
2627 if CLZ_DEFINED_VALUE_AT_ZERO (mode, val) is 2 and val is prec,
2629 (clz:mode (ior:mode (xor:mode (ashl:mode op0 (const_int 1))
2630 (ashr:mode op0 (const_int prec-1)))
2635 expand_clrsb_using_clz (scalar_int_mode mode
, rtx op0
, rtx target
)
2637 if (optimize_insn_for_size_p ()
2638 || optab_handler (clz_optab
, mode
) == CODE_FOR_nothing
)
2642 HOST_WIDE_INT val
= 0;
2643 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
) != 2
2644 || val
!= GET_MODE_PRECISION (mode
))
2652 temp2
= expand_binop (mode
, ashl_optab
, op0
, const1_rtx
,
2653 NULL_RTX
, 0, OPTAB_DIRECT
);
2662 rtx temp
= expand_binop (mode
, ashr_optab
, op0
,
2663 GEN_INT (GET_MODE_PRECISION (mode
) - 1),
2664 NULL_RTX
, 0, OPTAB_DIRECT
);
2668 temp
= expand_binop (mode
, xor_optab
, temp2
, temp
, NULL_RTX
, 0,
2675 temp
= expand_binop (mode
, ior_optab
, temp
, const1_rtx
,
2676 NULL_RTX
, 0, OPTAB_DIRECT
);
2680 temp
= expand_unop_direct (mode
, clz_optab
, temp
, val
? NULL_RTX
: target
,
2686 temp
= expand_binop (mode
, add_optab
, temp
, constm1_rtx
,
2687 target
, 0, OPTAB_DIRECT
);
2692 rtx_insn
*seq
= get_insns ();
2695 add_equal_note (seq
, temp
, CLRSB
, op0
, NULL_RTX
, mode
);
2700 static rtx
expand_ffs (scalar_int_mode
, rtx
, rtx
);
2702 /* Try calculating clz, ctz or ffs of a double-word quantity as two clz, ctz or
2703 ffs operations on word-sized quantities, choosing which based on whether the
2704 high (for clz) or low (for ctz and ffs) word is nonzero. */
2706 expand_doubleword_clz_ctz_ffs (scalar_int_mode mode
, rtx op0
, rtx target
,
2709 rtx xop0
= force_reg (mode
, op0
);
2710 rtx subhi
= gen_highpart (word_mode
, xop0
);
2711 rtx sublo
= gen_lowpart (word_mode
, xop0
);
2712 rtx_code_label
*hi0_label
= gen_label_rtx ();
2713 rtx_code_label
*after_label
= gen_label_rtx ();
2718 /* If we were not given a target, use a word_mode register, not a
2719 'mode' register. The result will fit, and nobody is expecting
2720 anything bigger (the return type of __builtin_clz* is int). */
2722 target
= gen_reg_rtx (word_mode
);
2724 /* In any case, write to a word_mode scratch in both branches of the
2725 conditional, so we can ensure there is a single move insn setting
2726 'target' to tag a REG_EQUAL note on. */
2727 result
= gen_reg_rtx (word_mode
);
2729 if (unoptab
!= clz_optab
)
2730 std::swap (subhi
, sublo
);
2734 /* If the high word is not equal to zero,
2735 then clz of the full value is clz of the high word. */
2736 emit_cmp_and_jump_insns (subhi
, CONST0_RTX (word_mode
), EQ
, 0,
2737 word_mode
, true, hi0_label
);
2739 if (optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2740 temp
= expand_unop_direct (word_mode
, unoptab
, subhi
, result
, true);
2743 gcc_assert (unoptab
== ffs_optab
);
2744 temp
= expand_ffs (word_mode
, subhi
, result
);
2750 convert_move (result
, temp
, true);
2752 emit_jump_insn (targetm
.gen_jump (after_label
));
2755 /* Else clz of the full value is clz of the low word plus the number
2756 of bits in the high word. Similarly for ctz/ffs of the high word,
2757 except that ffs should be 0 when both words are zero. */
2758 emit_label (hi0_label
);
2760 if (unoptab
== ffs_optab
)
2762 convert_move (result
, const0_rtx
, true);
2763 emit_cmp_and_jump_insns (sublo
, CONST0_RTX (word_mode
), EQ
, 0,
2764 word_mode
, true, after_label
);
2767 if (optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2768 temp
= expand_unop_direct (word_mode
, unoptab
, sublo
, NULL_RTX
, true);
2771 gcc_assert (unoptab
== ffs_optab
);
2772 temp
= expand_unop_direct (word_mode
, ctz_optab
, sublo
, NULL_RTX
, true);
2779 temp
= expand_binop (word_mode
, add_optab
, temp
,
2780 gen_int_mode (GET_MODE_BITSIZE (word_mode
) + addend
,
2782 result
, true, OPTAB_DIRECT
);
2786 convert_move (result
, temp
, true);
2788 emit_label (after_label
);
2789 convert_move (target
, result
, true);
2794 add_equal_note (seq
, target
, optab_to_code (unoptab
), xop0
, NULL_RTX
, mode
);
2803 /* Try calculating popcount of a double-word quantity as two popcount's of
2804 word-sized quantities and summing up the results. */
2806 expand_doubleword_popcount (scalar_int_mode mode
, rtx op0
, rtx target
)
2813 t0
= expand_unop_direct (word_mode
, popcount_optab
,
2814 operand_subword_force (op0
, 0, mode
), NULL_RTX
,
2816 t1
= expand_unop_direct (word_mode
, popcount_optab
,
2817 operand_subword_force (op0
, 1, mode
), NULL_RTX
,
2825 /* If we were not given a target, use a word_mode register, not a
2826 'mode' register. The result will fit, and nobody is expecting
2827 anything bigger (the return type of __builtin_popcount* is int). */
2829 target
= gen_reg_rtx (word_mode
);
2831 t
= expand_binop (word_mode
, add_optab
, t0
, t1
, target
, 0, OPTAB_DIRECT
);
2836 add_equal_note (seq
, t
, POPCOUNT
, op0
, NULL_RTX
, mode
);
2844 (parity:narrow (low (x) ^ high (x))) */
2846 expand_doubleword_parity (scalar_int_mode mode
, rtx op0
, rtx target
)
2848 rtx t
= expand_binop (word_mode
, xor_optab
,
2849 operand_subword_force (op0
, 0, mode
),
2850 operand_subword_force (op0
, 1, mode
),
2851 NULL_RTX
, 0, OPTAB_DIRECT
);
2852 return expand_unop (word_mode
, parity_optab
, t
, target
, true);
2858 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2860 widen_bswap (scalar_int_mode mode
, rtx op0
, rtx target
)
2864 opt_scalar_int_mode wider_mode_iter
;
2866 FOR_EACH_WIDER_MODE (wider_mode_iter
, mode
)
2867 if (optab_handler (bswap_optab
, wider_mode_iter
.require ())
2868 != CODE_FOR_nothing
)
2871 if (!wider_mode_iter
.exists ())
2874 scalar_int_mode wider_mode
= wider_mode_iter
.require ();
2875 last
= get_last_insn ();
2877 x
= widen_operand (op0
, wider_mode
, mode
, true, true);
2878 x
= expand_unop (wider_mode
, bswap_optab
, x
, NULL_RTX
, true);
2880 gcc_assert (GET_MODE_PRECISION (wider_mode
) == GET_MODE_BITSIZE (wider_mode
)
2881 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
));
2883 x
= expand_shift (RSHIFT_EXPR
, wider_mode
, x
,
2884 GET_MODE_BITSIZE (wider_mode
)
2885 - GET_MODE_BITSIZE (mode
),
2891 target
= gen_reg_rtx (mode
);
2892 emit_move_insn (target
, gen_lowpart (mode
, x
));
2895 delete_insns_since (last
);
2900 /* Try calculating bswap as two bswaps of two word-sized operands. */
2903 expand_doubleword_bswap (machine_mode mode
, rtx op
, rtx target
)
2907 t1
= expand_unop (word_mode
, bswap_optab
,
2908 operand_subword_force (op
, 0, mode
), NULL_RTX
, true);
2909 t0
= expand_unop (word_mode
, bswap_optab
,
2910 operand_subword_force (op
, 1, mode
), NULL_RTX
, true);
2912 if (target
== 0 || !valid_multiword_target_p (target
))
2913 target
= gen_reg_rtx (mode
);
2915 emit_clobber (target
);
2916 emit_move_insn (operand_subword (target
, 0, 1, mode
), t0
);
2917 emit_move_insn (operand_subword (target
, 1, 1, mode
), t1
);
2922 /* Try calculating (parity x) as (and (popcount x) 1), where
2923 popcount can also be done in a wider mode. */
2925 expand_parity (scalar_int_mode mode
, rtx op0
, rtx target
)
2927 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2928 opt_scalar_int_mode wider_mode_iter
;
2929 FOR_EACH_MODE_FROM (wider_mode_iter
, mode
)
2931 scalar_int_mode wider_mode
= wider_mode_iter
.require ();
2932 if (optab_handler (popcount_optab
, wider_mode
) != CODE_FOR_nothing
)
2937 last
= get_last_insn ();
2939 if (target
== 0 || GET_MODE (target
) != wider_mode
)
2940 target
= gen_reg_rtx (wider_mode
);
2942 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2943 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2946 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2947 target
, true, OPTAB_DIRECT
);
2951 if (mclass
!= MODE_INT
2952 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2953 return convert_to_mode (mode
, temp
, 0);
2955 return gen_lowpart (mode
, temp
);
2958 delete_insns_since (last
);
2964 /* Try calculating ctz(x) as K - clz(x & -x) ,
2965 where K is GET_MODE_PRECISION(mode) - 1.
2967 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2968 don't have to worry about what the hardware does in that case. (If
2969 the clz instruction produces the usual value at 0, which is K, the
2970 result of this code sequence will be -1; expand_ffs, below, relies
2971 on this. It might be nice to have it be K instead, for consistency
2972 with the (very few) processors that provide a ctz with a defined
2973 value, but that would take one more instruction, and it would be
2974 less convenient for expand_ffs anyway. */
2977 expand_ctz (scalar_int_mode mode
, rtx op0
, rtx target
)
2982 if (optab_handler (clz_optab
, mode
) == CODE_FOR_nothing
)
2987 temp
= expand_unop_direct (mode
, neg_optab
, op0
, NULL_RTX
, true);
2989 temp
= expand_binop (mode
, and_optab
, op0
, temp
, NULL_RTX
,
2990 true, OPTAB_DIRECT
);
2992 temp
= expand_unop_direct (mode
, clz_optab
, temp
, NULL_RTX
, true);
2994 temp
= expand_binop (mode
, sub_optab
,
2995 gen_int_mode (GET_MODE_PRECISION (mode
) - 1, mode
),
2997 true, OPTAB_DIRECT
);
3007 add_equal_note (seq
, temp
, CTZ
, op0
, NULL_RTX
, mode
);
3013 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
3014 else with the sequence used by expand_clz.
3016 The ffs builtin promises to return zero for a zero value and ctz/clz
3017 may have an undefined value in that case. If they do not give us a
3018 convenient value, we have to generate a test and branch. */
3020 expand_ffs (scalar_int_mode mode
, rtx op0
, rtx target
)
3022 HOST_WIDE_INT val
= 0;
3023 bool defined_at_zero
= false;
3027 if (optab_handler (ctz_optab
, mode
) != CODE_FOR_nothing
)
3031 temp
= expand_unop_direct (mode
, ctz_optab
, op0
, 0, true);
3035 defined_at_zero
= (CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2);
3037 else if (optab_handler (clz_optab
, mode
) != CODE_FOR_nothing
)
3040 temp
= expand_ctz (mode
, op0
, 0);
3044 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2)
3046 defined_at_zero
= true;
3047 val
= (GET_MODE_PRECISION (mode
) - 1) - val
;
3053 if (defined_at_zero
&& val
== -1)
3054 /* No correction needed at zero. */;
3057 /* We don't try to do anything clever with the situation found
3058 on some processors (eg Alpha) where ctz(0:mode) ==
3059 bitsize(mode). If someone can think of a way to send N to -1
3060 and leave alone all values in the range 0..N-1 (where N is a
3061 power of two), cheaper than this test-and-branch, please add it.
3063 The test-and-branch is done after the operation itself, in case
3064 the operation sets condition codes that can be recycled for this.
3065 (This is true on i386, for instance.) */
3067 rtx_code_label
*nonzero_label
= gen_label_rtx ();
3068 emit_cmp_and_jump_insns (op0
, CONST0_RTX (mode
), NE
, 0,
3069 mode
, true, nonzero_label
);
3071 convert_move (temp
, GEN_INT (-1), false);
3072 emit_label (nonzero_label
);
3075 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
3076 to produce a value in the range 0..bitsize. */
3077 temp
= expand_binop (mode
, add_optab
, temp
, gen_int_mode (1, mode
),
3078 target
, false, OPTAB_DIRECT
);
3085 add_equal_note (seq
, temp
, FFS
, op0
, NULL_RTX
, mode
);
3094 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
3095 conditions, VAL may already be a SUBREG against which we cannot generate
3096 a further SUBREG. In this case, we expect forcing the value into a
3097 register will work around the situation. */
3100 lowpart_subreg_maybe_copy (machine_mode omode
, rtx val
,
3104 ret
= lowpart_subreg (omode
, val
, imode
);
3107 val
= force_reg (imode
, val
);
3108 ret
= lowpart_subreg (omode
, val
, imode
);
3109 gcc_assert (ret
!= NULL
);
3114 /* Expand a floating point absolute value or negation operation via a
3115 logical operation on the sign bit. */
3118 expand_absneg_bit (enum rtx_code code
, scalar_float_mode mode
,
3119 rtx op0
, rtx target
)
3121 const struct real_format
*fmt
;
3122 int bitpos
, word
, nwords
, i
;
3123 scalar_int_mode imode
;
3127 /* The format has to have a simple sign bit. */
3128 fmt
= REAL_MODE_FORMAT (mode
);
3132 bitpos
= fmt
->signbit_rw
;
3136 /* Don't create negative zeros if the format doesn't support them. */
3137 if (code
== NEG
&& !fmt
->has_signed_zero
)
3140 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3142 if (!int_mode_for_mode (mode
).exists (&imode
))
3151 if (FLOAT_WORDS_BIG_ENDIAN
)
3152 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3154 word
= bitpos
/ BITS_PER_WORD
;
3155 bitpos
= bitpos
% BITS_PER_WORD
;
3156 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3159 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3165 || reg_overlap_mentioned_p (target
, op0
)
3166 || (nwords
> 1 && !valid_multiword_target_p (target
)))
3167 target
= gen_reg_rtx (mode
);
3173 for (i
= 0; i
< nwords
; ++i
)
3175 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3176 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3180 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
3182 immed_wide_int_const (mask
, imode
),
3183 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3184 if (temp
!= targ_piece
)
3185 emit_move_insn (targ_piece
, temp
);
3188 emit_move_insn (targ_piece
, op0_piece
);
3191 insns
= get_insns ();
3198 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
3199 gen_lowpart (imode
, op0
),
3200 immed_wide_int_const (mask
, imode
),
3201 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3202 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3204 set_dst_reg_note (get_last_insn (), REG_EQUAL
,
3205 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)),
3212 /* As expand_unop, but will fail rather than attempt the operation in a
3213 different mode or with a libcall. */
3215 expand_unop_direct (machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
3218 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
3220 class expand_operand ops
[2];
3221 enum insn_code icode
= optab_handler (unoptab
, mode
);
3222 rtx_insn
*last
= get_last_insn ();
3225 create_output_operand (&ops
[0], target
, mode
);
3226 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
3227 pat
= maybe_gen_insn (icode
, 2, ops
);
3230 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
3231 && ! add_equal_note (pat
, ops
[0].value
,
3232 optab_to_code (unoptab
),
3233 ops
[1].value
, NULL_RTX
, mode
))
3235 delete_insns_since (last
);
3236 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
3241 return ops
[0].value
;
3247 /* Generate code to perform an operation specified by UNOPTAB
3248 on operand OP0, with result having machine-mode MODE.
3250 UNSIGNEDP is for the case where we have to widen the operands
3251 to perform the operation. It says to use zero-extension.
3253 If TARGET is nonzero, the value
3254 is generated there, if it is convenient to do so.
3255 In all cases an rtx is returned for the locus of the value;
3256 this may or may not be TARGET. */
3259 expand_unop (machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
3262 enum mode_class mclass
= GET_MODE_CLASS (mode
);
3263 machine_mode wider_mode
;
3264 scalar_int_mode int_mode
;
3265 scalar_float_mode float_mode
;
3269 temp
= expand_unop_direct (mode
, unoptab
, op0
, target
, unsignedp
);
3273 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3275 /* Widening (or narrowing) clz needs special treatment. */
3276 if (unoptab
== clz_optab
)
3278 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
3280 temp
= widen_leading (int_mode
, op0
, target
, unoptab
);
3284 if (GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
3285 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
3287 temp
= expand_doubleword_clz_ctz_ffs (int_mode
, op0
, target
,
3297 if (unoptab
== clrsb_optab
)
3299 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
3301 temp
= widen_leading (int_mode
, op0
, target
, unoptab
);
3304 temp
= expand_clrsb_using_clz (int_mode
, op0
, target
);
3311 if (unoptab
== popcount_optab
3312 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3313 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
3314 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
3315 && optimize_insn_for_speed_p ())
3317 temp
= expand_doubleword_popcount (int_mode
, op0
, target
);
3322 if (unoptab
== parity_optab
3323 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3324 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
3325 && (optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
3326 || optab_handler (popcount_optab
, word_mode
) != CODE_FOR_nothing
)
3327 && optimize_insn_for_speed_p ())
3329 temp
= expand_doubleword_parity (int_mode
, op0
, target
);
3334 /* Widening (or narrowing) bswap needs special treatment. */
3335 if (unoptab
== bswap_optab
)
3337 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
3338 or ROTATERT. First try these directly; if this fails, then try the
3339 obvious pair of shifts with allowed widening, as this will probably
3340 be always more efficient than the other fallback methods. */
3346 if (optab_handler (rotl_optab
, mode
) != CODE_FOR_nothing
)
3348 temp
= expand_binop (mode
, rotl_optab
, op0
,
3349 gen_int_shift_amount (mode
, 8),
3350 target
, unsignedp
, OPTAB_DIRECT
);
3355 if (optab_handler (rotr_optab
, mode
) != CODE_FOR_nothing
)
3357 temp
= expand_binop (mode
, rotr_optab
, op0
,
3358 gen_int_shift_amount (mode
, 8),
3359 target
, unsignedp
, OPTAB_DIRECT
);
3364 last
= get_last_insn ();
3366 temp1
= expand_binop (mode
, ashl_optab
, op0
,
3367 gen_int_shift_amount (mode
, 8), NULL_RTX
,
3368 unsignedp
, OPTAB_WIDEN
);
3369 temp2
= expand_binop (mode
, lshr_optab
, op0
,
3370 gen_int_shift_amount (mode
, 8), NULL_RTX
,
3371 unsignedp
, OPTAB_WIDEN
);
3374 temp
= expand_binop (mode
, ior_optab
, temp1
, temp2
, target
,
3375 unsignedp
, OPTAB_WIDEN
);
3380 delete_insns_since (last
);
3383 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
3385 temp
= widen_bswap (int_mode
, op0
, target
);
3389 /* We do not provide a 128-bit bswap in libgcc so force the use of
3390 a double bswap for 64-bit targets. */
3391 if (GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
3392 && (UNITS_PER_WORD
== 8
3393 || optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
))
3395 temp
= expand_doubleword_bswap (mode
, op0
, target
);
3404 if (CLASS_HAS_WIDER_MODES_P (mclass
))
3405 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
3407 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
3410 rtx_insn
*last
= get_last_insn ();
3412 /* For certain operations, we need not actually extend
3413 the narrow operand, as long as we will truncate the
3414 results to the same narrowness. */
3416 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3417 (unoptab
== neg_optab
3418 || unoptab
== one_cmpl_optab
)
3419 && mclass
== MODE_INT
);
3421 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3426 if (mclass
!= MODE_INT
3427 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
3430 target
= gen_reg_rtx (mode
);
3431 convert_move (target
, temp
, 0);
3435 return gen_lowpart (mode
, temp
);
3438 delete_insns_since (last
);
3442 /* These can be done a word at a time. */
3443 if (unoptab
== one_cmpl_optab
3444 && is_int_mode (mode
, &int_mode
)
3445 && GET_MODE_SIZE (int_mode
) > UNITS_PER_WORD
3446 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
3453 || reg_overlap_mentioned_p (target
, op0
)
3454 || !valid_multiword_target_p (target
))
3455 target
= gen_reg_rtx (int_mode
);
3459 /* Do the actual arithmetic. */
3460 for (i
= 0; i
< GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
; i
++)
3462 rtx target_piece
= operand_subword (target
, i
, 1, int_mode
);
3463 rtx x
= expand_unop (word_mode
, unoptab
,
3464 operand_subword_force (op0
, i
, int_mode
),
3465 target_piece
, unsignedp
);
3467 if (target_piece
!= x
)
3468 emit_move_insn (target_piece
, x
);
3471 insns
= get_insns ();
3478 /* Emit ~op0 as op0 ^ -1. */
3479 if (unoptab
== one_cmpl_optab
3480 && (SCALAR_INT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
3481 && optab_handler (xor_optab
, mode
) != CODE_FOR_nothing
)
3483 temp
= expand_binop (mode
, xor_optab
, op0
, CONSTM1_RTX (mode
),
3484 target
, unsignedp
, OPTAB_DIRECT
);
3489 if (optab_to_code (unoptab
) == NEG
)
3491 /* Try negating floating point values by flipping the sign bit. */
3492 if (is_a
<scalar_float_mode
> (mode
, &float_mode
))
3494 temp
= expand_absneg_bit (NEG
, float_mode
, op0
, target
);
3499 /* If there is no negation pattern, and we have no negative zero,
3500 try subtracting from zero. */
3501 if (!HONOR_SIGNED_ZEROS (mode
))
3503 temp
= expand_binop (mode
, (unoptab
== negv_optab
3504 ? subv_optab
: sub_optab
),
3505 CONST0_RTX (mode
), op0
, target
,
3506 unsignedp
, OPTAB_DIRECT
);
3512 /* Try calculating parity (x) as popcount (x) % 2. */
3513 if (unoptab
== parity_optab
&& is_a
<scalar_int_mode
> (mode
, &int_mode
))
3515 temp
= expand_parity (int_mode
, op0
, target
);
3520 /* Try implementing ffs (x) in terms of clz (x). */
3521 if (unoptab
== ffs_optab
&& is_a
<scalar_int_mode
> (mode
, &int_mode
))
3523 temp
= expand_ffs (int_mode
, op0
, target
);
3528 /* Try implementing ctz (x) in terms of clz (x). */
3529 if (unoptab
== ctz_optab
&& is_a
<scalar_int_mode
> (mode
, &int_mode
))
3531 temp
= expand_ctz (int_mode
, op0
, target
);
3536 if ((unoptab
== ctz_optab
|| unoptab
== ffs_optab
)
3537 && optimize_insn_for_speed_p ()
3538 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3539 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
3540 && (optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
3541 || optab_handler (ctz_optab
, word_mode
) != CODE_FOR_nothing
))
3543 temp
= expand_doubleword_clz_ctz_ffs (int_mode
, op0
, target
, unoptab
);
3549 /* Now try a library call in this mode. */
3550 libfunc
= optab_libfunc (unoptab
, mode
);
3556 machine_mode outmode
= mode
;
3558 /* All of these functions return small values. Thus we choose to
3559 have them return something that isn't a double-word. */
3560 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
3561 || unoptab
== clrsb_optab
|| unoptab
== popcount_optab
3562 || unoptab
== parity_optab
)
3564 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
),
3565 optab_libfunc (unoptab
, mode
)));
3569 /* Pass 1 for NO_QUEUE so we don't lose any increments
3570 if the libcall is cse'd or moved. */
3571 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, outmode
,
3573 insns
= get_insns ();
3576 target
= gen_reg_rtx (outmode
);
3577 bool trapv
= trapv_unoptab_p (unoptab
);
3579 eq_value
= NULL_RTX
;
3582 eq_value
= gen_rtx_fmt_e (optab_to_code (unoptab
), mode
, op0
);
3583 if (GET_MODE_UNIT_SIZE (outmode
) < GET_MODE_UNIT_SIZE (mode
))
3584 eq_value
= simplify_gen_unary (TRUNCATE
, outmode
, eq_value
, mode
);
3585 else if (GET_MODE_UNIT_SIZE (outmode
) > GET_MODE_UNIT_SIZE (mode
))
3586 eq_value
= simplify_gen_unary (ZERO_EXTEND
,
3587 outmode
, eq_value
, mode
);
3589 emit_libcall_block_1 (insns
, target
, value
, eq_value
, trapv
);
3594 /* It can't be done in this mode. Can we do it in a wider mode? */
3596 if (CLASS_HAS_WIDER_MODES_P (mclass
))
3598 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
3600 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
3601 || optab_libfunc (unoptab
, wider_mode
))
3604 rtx_insn
*last
= get_last_insn ();
3606 /* For certain operations, we need not actually extend
3607 the narrow operand, as long as we will truncate the
3608 results to the same narrowness. */
3609 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3610 (unoptab
== neg_optab
3611 || unoptab
== one_cmpl_optab
3612 || unoptab
== bswap_optab
)
3613 && mclass
== MODE_INT
);
3615 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3618 /* If we are generating clz using wider mode, adjust the
3619 result. Similarly for clrsb. */
3620 if ((unoptab
== clz_optab
|| unoptab
== clrsb_optab
)
3623 scalar_int_mode wider_int_mode
3624 = as_a
<scalar_int_mode
> (wider_mode
);
3625 int_mode
= as_a
<scalar_int_mode
> (mode
);
3627 (wider_mode
, sub_optab
, temp
,
3628 gen_int_mode (GET_MODE_PRECISION (wider_int_mode
)
3629 - GET_MODE_PRECISION (int_mode
),
3631 target
, true, OPTAB_DIRECT
);
3634 /* Likewise for bswap. */
3635 if (unoptab
== bswap_optab
&& temp
!= 0)
3637 scalar_int_mode wider_int_mode
3638 = as_a
<scalar_int_mode
> (wider_mode
);
3639 int_mode
= as_a
<scalar_int_mode
> (mode
);
3640 gcc_assert (GET_MODE_PRECISION (wider_int_mode
)
3641 == GET_MODE_BITSIZE (wider_int_mode
)
3642 && GET_MODE_PRECISION (int_mode
)
3643 == GET_MODE_BITSIZE (int_mode
));
3645 temp
= expand_shift (RSHIFT_EXPR
, wider_int_mode
, temp
,
3646 GET_MODE_BITSIZE (wider_int_mode
)
3647 - GET_MODE_BITSIZE (int_mode
),
3653 if (mclass
!= MODE_INT
)
3656 target
= gen_reg_rtx (mode
);
3657 convert_move (target
, temp
, 0);
3661 return gen_lowpart (mode
, temp
);
3664 delete_insns_since (last
);
3669 /* One final attempt at implementing negation via subtraction,
3670 this time allowing widening of the operand. */
3671 if (optab_to_code (unoptab
) == NEG
&& !HONOR_SIGNED_ZEROS (mode
))
3674 temp
= expand_binop (mode
,
3675 unoptab
== negv_optab
? subv_optab
: sub_optab
,
3676 CONST0_RTX (mode
), op0
,
3677 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3685 /* Emit code to compute the absolute value of OP0, with result to
3686 TARGET if convenient. (TARGET may be 0.) The return value says
3687 where the result actually is to be found.
3689 MODE is the mode of the operand; the mode of the result is
3690 different but can be deduced from MODE.
3695 expand_abs_nojump (machine_mode mode
, rtx op0
, rtx target
,
3696 int result_unsignedp
)
3700 if (GET_MODE_CLASS (mode
) != MODE_INT
3702 result_unsignedp
= 1;
3704 /* First try to do it with a special abs instruction. */
3705 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
3710 /* For floating point modes, try clearing the sign bit. */
3711 scalar_float_mode float_mode
;
3712 if (is_a
<scalar_float_mode
> (mode
, &float_mode
))
3714 temp
= expand_absneg_bit (ABS
, float_mode
, op0
, target
);
3719 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3720 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
3721 && !HONOR_SIGNED_ZEROS (mode
))
3723 rtx_insn
*last
= get_last_insn ();
3725 temp
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3728 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3734 delete_insns_since (last
);
3737 /* If this machine has expensive jumps, we can do integer absolute
3738 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3739 where W is the width of MODE. */
3741 scalar_int_mode int_mode
;
3742 if (is_int_mode (mode
, &int_mode
)
3743 && BRANCH_COST (optimize_insn_for_speed_p (),
3746 rtx extended
= expand_shift (RSHIFT_EXPR
, int_mode
, op0
,
3747 GET_MODE_PRECISION (int_mode
) - 1,
3750 temp
= expand_binop (int_mode
, xor_optab
, extended
, op0
, target
, 0,
3753 temp
= expand_binop (int_mode
,
3754 result_unsignedp
? sub_optab
: subv_optab
,
3755 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
3765 expand_abs (machine_mode mode
, rtx op0
, rtx target
,
3766 int result_unsignedp
, int safe
)
3769 rtx_code_label
*op1
;
3771 if (GET_MODE_CLASS (mode
) != MODE_INT
3773 result_unsignedp
= 1;
3775 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
3779 /* If that does not win, use conditional jump and negate. */
3781 /* It is safe to use the target if it is the same
3782 as the source if this is also a pseudo register */
3783 if (op0
== target
&& REG_P (op0
)
3784 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
3787 op1
= gen_label_rtx ();
3788 if (target
== 0 || ! safe
3789 || GET_MODE (target
) != mode
3790 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
3792 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
3793 target
= gen_reg_rtx (mode
);
3795 emit_move_insn (target
, op0
);
3798 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
3799 NULL_RTX
, NULL
, op1
,
3800 profile_probability::uninitialized ());
3802 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3805 emit_move_insn (target
, op0
);
3811 /* Emit code to compute the one's complement absolute value of OP0
3812 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3813 (TARGET may be NULL_RTX.) The return value says where the result
3814 actually is to be found.
3816 MODE is the mode of the operand; the mode of the result is
3817 different but can be deduced from MODE. */
3820 expand_one_cmpl_abs_nojump (machine_mode mode
, rtx op0
, rtx target
)
3824 /* Not applicable for floating point modes. */
3825 if (FLOAT_MODE_P (mode
))
3828 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3829 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
)
3831 rtx_insn
*last
= get_last_insn ();
3833 temp
= expand_unop (mode
, one_cmpl_optab
, op0
, NULL_RTX
, 0);
3835 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3841 delete_insns_since (last
);
3844 /* If this machine has expensive jumps, we can do one's complement
3845 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3847 scalar_int_mode int_mode
;
3848 if (is_int_mode (mode
, &int_mode
)
3849 && BRANCH_COST (optimize_insn_for_speed_p (),
3852 rtx extended
= expand_shift (RSHIFT_EXPR
, int_mode
, op0
,
3853 GET_MODE_PRECISION (int_mode
) - 1,
3856 temp
= expand_binop (int_mode
, xor_optab
, extended
, op0
, target
, 0,
3866 /* A subroutine of expand_copysign, perform the copysign operation using the
3867 abs and neg primitives advertised to exist on the target. The assumption
3868 is that we have a split register file, and leaving op0 in fp registers,
3869 and not playing with subregs so much, will help the register allocator. */
3872 expand_copysign_absneg (scalar_float_mode mode
, rtx op0
, rtx op1
, rtx target
,
3873 int bitpos
, bool op0_is_abs
)
3875 scalar_int_mode imode
;
3876 enum insn_code icode
;
3878 rtx_code_label
*label
;
3883 /* Check if the back end provides an insn that handles signbit for the
3885 icode
= optab_handler (signbit_optab
, mode
);
3886 if (icode
!= CODE_FOR_nothing
)
3888 imode
= as_a
<scalar_int_mode
> (insn_data
[(int) icode
].operand
[0].mode
);
3889 sign
= gen_reg_rtx (imode
);
3890 emit_unop_insn (icode
, sign
, op1
, UNKNOWN
);
3894 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3896 if (!int_mode_for_mode (mode
).exists (&imode
))
3898 op1
= gen_lowpart (imode
, op1
);
3905 if (FLOAT_WORDS_BIG_ENDIAN
)
3906 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3908 word
= bitpos
/ BITS_PER_WORD
;
3909 bitpos
= bitpos
% BITS_PER_WORD
;
3910 op1
= operand_subword_force (op1
, word
, mode
);
3913 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3914 sign
= expand_binop (imode
, and_optab
, op1
,
3915 immed_wide_int_const (mask
, imode
),
3916 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3921 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
3928 if (target
== NULL_RTX
)
3929 target
= copy_to_reg (op0
);
3931 emit_move_insn (target
, op0
);
3934 label
= gen_label_rtx ();
3935 emit_cmp_and_jump_insns (sign
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
3937 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
3938 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3940 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
3942 emit_move_insn (target
, op0
);
3950 /* A subroutine of expand_copysign, perform the entire copysign operation
3951 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3952 is true if op0 is known to have its sign bit clear. */
3955 expand_copysign_bit (scalar_float_mode mode
, rtx op0
, rtx op1
, rtx target
,
3956 int bitpos
, bool op0_is_abs
)
3958 scalar_int_mode imode
;
3959 int word
, nwords
, i
;
3963 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3965 if (!int_mode_for_mode (mode
).exists (&imode
))
3974 if (FLOAT_WORDS_BIG_ENDIAN
)
3975 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3977 word
= bitpos
/ BITS_PER_WORD
;
3978 bitpos
= bitpos
% BITS_PER_WORD
;
3979 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3982 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3987 || reg_overlap_mentioned_p (target
, op0
)
3988 || reg_overlap_mentioned_p (target
, op1
)
3989 || (nwords
> 1 && !valid_multiword_target_p (target
)))
3990 target
= gen_reg_rtx (mode
);
3996 for (i
= 0; i
< nwords
; ++i
)
3998 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3999 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
4005 = expand_binop (imode
, and_optab
, op0_piece
,
4006 immed_wide_int_const (~mask
, imode
),
4007 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4008 op1
= expand_binop (imode
, and_optab
,
4009 operand_subword_force (op1
, i
, mode
),
4010 immed_wide_int_const (mask
, imode
),
4011 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4013 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
4014 targ_piece
, 1, OPTAB_LIB_WIDEN
);
4015 if (temp
!= targ_piece
)
4016 emit_move_insn (targ_piece
, temp
);
4019 emit_move_insn (targ_piece
, op0_piece
);
4022 insns
= get_insns ();
4029 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
4030 immed_wide_int_const (mask
, imode
),
4031 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4033 op0
= gen_lowpart (imode
, op0
);
4035 op0
= expand_binop (imode
, and_optab
, op0
,
4036 immed_wide_int_const (~mask
, imode
),
4037 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4039 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
4040 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
4041 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
4047 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
4048 scalar floating point mode. Return NULL if we do not know how to
4049 expand the operation inline. */
4052 expand_copysign (rtx op0
, rtx op1
, rtx target
)
4054 scalar_float_mode mode
;
4055 const struct real_format
*fmt
;
4059 mode
= as_a
<scalar_float_mode
> (GET_MODE (op0
));
4060 gcc_assert (GET_MODE (op1
) == mode
);
4062 /* First try to do it with a special instruction. */
4063 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
4064 target
, 0, OPTAB_DIRECT
);
4068 fmt
= REAL_MODE_FORMAT (mode
);
4069 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
4073 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
4075 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
4076 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
4080 if (fmt
->signbit_ro
>= 0
4081 && (CONST_DOUBLE_AS_FLOAT_P (op0
)
4082 || (optab_handler (neg_optab
, mode
) != CODE_FOR_nothing
4083 && optab_handler (abs_optab
, mode
) != CODE_FOR_nothing
)))
4085 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
4086 fmt
->signbit_ro
, op0_is_abs
);
4091 if (fmt
->signbit_rw
< 0)
4093 return expand_copysign_bit (mode
, op0
, op1
, target
,
4094 fmt
->signbit_rw
, op0_is_abs
);
4097 /* Generate an instruction whose insn-code is INSN_CODE,
4098 with two operands: an output TARGET and an input OP0.
4099 TARGET *must* be nonzero, and the output is always stored there.
4100 CODE is an rtx code such that (CODE OP0) is an rtx that describes
4101 the value that is stored into TARGET.
4103 Return false if expansion failed. */
4106 maybe_emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
,
4109 class expand_operand ops
[2];
4112 create_output_operand (&ops
[0], target
, GET_MODE (target
));
4113 create_input_operand (&ops
[1], op0
, GET_MODE (op0
));
4114 pat
= maybe_gen_insn (icode
, 2, ops
);
4118 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
4120 add_equal_note (pat
, ops
[0].value
, code
, ops
[1].value
, NULL_RTX
,
4125 if (ops
[0].value
!= target
)
4126 emit_move_insn (target
, ops
[0].value
);
4129 /* Generate an instruction whose insn-code is INSN_CODE,
4130 with two operands: an output TARGET and an input OP0.
4131 TARGET *must* be nonzero, and the output is always stored there.
4132 CODE is an rtx code such that (CODE OP0) is an rtx that describes
4133 the value that is stored into TARGET. */
4136 emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
, enum rtx_code code
)
4138 bool ok
= maybe_emit_unop_insn (icode
, target
, op0
, code
);
4142 struct no_conflict_data
4145 rtx_insn
*first
, *insn
;
4149 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
4150 the currently examined clobber / store has to stay in the list of
4151 insns that constitute the actual libcall block. */
4153 no_conflict_move_test (rtx dest
, const_rtx set
, void *p0
)
4155 struct no_conflict_data
*p
= (struct no_conflict_data
*) p0
;
4157 /* If this inns directly contributes to setting the target, it must stay. */
4158 if (reg_overlap_mentioned_p (p
->target
, dest
))
4159 p
->must_stay
= true;
4160 /* If we haven't committed to keeping any other insns in the list yet,
4161 there is nothing more to check. */
4162 else if (p
->insn
== p
->first
)
4164 /* If this insn sets / clobbers a register that feeds one of the insns
4165 already in the list, this insn has to stay too. */
4166 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
4167 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
4168 || reg_used_between_p (dest
, p
->first
, p
->insn
)
4169 /* Likewise if this insn depends on a register set by a previous
4170 insn in the list, or if it sets a result (presumably a hard
4171 register) that is set or clobbered by a previous insn.
4172 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
4173 SET_DEST perform the former check on the address, and the latter
4174 check on the MEM. */
4175 || (GET_CODE (set
) == SET
4176 && (modified_in_p (SET_SRC (set
), p
->first
)
4177 || modified_in_p (SET_DEST (set
), p
->first
)
4178 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
4179 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
4180 p
->must_stay
= true;
4184 /* Emit code to make a call to a constant function or a library call.
4186 INSNS is a list containing all insns emitted in the call.
4187 These insns leave the result in RESULT. Our block is to copy RESULT
4188 to TARGET, which is logically equivalent to EQUIV.
4190 We first emit any insns that set a pseudo on the assumption that these are
4191 loading constants into registers; doing so allows them to be safely cse'ed
4192 between blocks. Then we emit all the other insns in the block, followed by
4193 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
4194 note with an operand of EQUIV. */
4197 emit_libcall_block_1 (rtx_insn
*insns
, rtx target
, rtx result
, rtx equiv
,
4198 bool equiv_may_trap
)
4200 rtx final_dest
= target
;
4201 rtx_insn
*next
, *last
, *insn
;
4203 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
4204 into a MEM later. Protect the libcall block from this change. */
4205 if (! REG_P (target
) || REG_USERVAR_P (target
))
4206 target
= gen_reg_rtx (GET_MODE (target
));
4208 /* If we're using non-call exceptions, a libcall corresponding to an
4209 operation that may trap may also trap. */
4210 /* ??? See the comment in front of make_reg_eh_region_note. */
4211 if (cfun
->can_throw_non_call_exceptions
4212 && (equiv_may_trap
|| may_trap_p (equiv
)))
4214 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
4217 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
4220 int lp_nr
= INTVAL (XEXP (note
, 0));
4221 if (lp_nr
== 0 || lp_nr
== INT_MIN
)
4222 remove_note (insn
, note
);
4228 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
4229 reg note to indicate that this call cannot throw or execute a nonlocal
4230 goto (unless there is already a REG_EH_REGION note, in which case
4232 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
4234 make_reg_eh_region_note_nothrow_nononlocal (insn
);
4237 /* First emit all insns that set pseudos. Remove them from the list as
4238 we go. Avoid insns that set pseudos which were referenced in previous
4239 insns. These can be generated by move_by_pieces, for example,
4240 to update an address. Similarly, avoid insns that reference things
4241 set in previous insns. */
4243 for (insn
= insns
; insn
; insn
= next
)
4245 rtx set
= single_set (insn
);
4247 next
= NEXT_INSN (insn
);
4249 if (set
!= 0 && REG_P (SET_DEST (set
))
4250 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
4252 struct no_conflict_data data
;
4254 data
.target
= const0_rtx
;
4258 note_stores (insn
, no_conflict_move_test
, &data
);
4259 if (! data
.must_stay
)
4261 if (PREV_INSN (insn
))
4262 SET_NEXT_INSN (PREV_INSN (insn
)) = next
;
4267 SET_PREV_INSN (next
) = PREV_INSN (insn
);
4273 /* Some ports use a loop to copy large arguments onto the stack.
4274 Don't move anything outside such a loop. */
4279 /* Write the remaining insns followed by the final copy. */
4280 for (insn
= insns
; insn
; insn
= next
)
4282 next
= NEXT_INSN (insn
);
4287 last
= emit_move_insn (target
, result
);
4289 set_dst_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
), target
);
4291 if (final_dest
!= target
)
4292 emit_move_insn (final_dest
, target
);
4296 emit_libcall_block (rtx_insn
*insns
, rtx target
, rtx result
, rtx equiv
)
4298 emit_libcall_block_1 (insns
, target
, result
, equiv
, false);
4301 /* True if we can perform a comparison of mode MODE straightforwardly.
4302 PURPOSE describes how this comparison will be used. CODE is the rtx
4303 comparison code we will be using.
4305 ??? Actually, CODE is slightly weaker than that. A target is still
4306 required to implement all of the normal bcc operations, but not
4307 required to implement all (or any) of the unordered bcc operations. */
4310 can_compare_p (enum rtx_code code
, machine_mode mode
,
4311 enum can_compare_purpose purpose
)
4314 test
= gen_rtx_fmt_ee (code
, mode
, const0_rtx
, const0_rtx
);
4317 enum insn_code icode
;
4319 if (purpose
== ccp_jump
4320 && (icode
= optab_handler (cbranch_optab
, mode
)) != CODE_FOR_nothing
4321 && insn_operand_matches (icode
, 0, test
))
4323 if (purpose
== ccp_store_flag
4324 && (icode
= optab_handler (cstore_optab
, mode
)) != CODE_FOR_nothing
4325 && insn_operand_matches (icode
, 1, test
))
4327 if (purpose
== ccp_cmov
4328 && optab_handler (cmov_optab
, mode
) != CODE_FOR_nothing
)
4331 mode
= GET_MODE_WIDER_MODE (mode
).else_void ();
4332 PUT_MODE (test
, mode
);
4334 while (mode
!= VOIDmode
);
4339 /* Return whether RTL code CODE corresponds to an unsigned optab. */
4342 unsigned_optab_p (enum rtx_code code
)
4344 return code
== LTU
|| code
== LEU
|| code
== GTU
|| code
== GEU
;
4347 /* Return whether the backend-emitted comparison for code CODE, comparing
4348 operands of mode VALUE_MODE and producing a result with MASK_MODE, matches
4349 operand OPNO of pattern ICODE. */
4352 insn_predicate_matches_p (enum insn_code icode
, unsigned int opno
,
4353 enum rtx_code code
, machine_mode mask_mode
,
4354 machine_mode value_mode
)
4356 rtx reg1
= alloca_raw_REG (value_mode
, LAST_VIRTUAL_REGISTER
+ 1);
4357 rtx reg2
= alloca_raw_REG (value_mode
, LAST_VIRTUAL_REGISTER
+ 2);
4358 rtx test
= alloca_rtx_fmt_ee (code
, mask_mode
, reg1
, reg2
);
4359 return insn_operand_matches (icode
, opno
, test
);
4362 /* Return whether the backend can emit a vector comparison (vec_cmp/vec_cmpu)
4363 for code CODE, comparing operands of mode VALUE_MODE and producing a result
4367 can_vec_cmp_compare_p (enum rtx_code code
, machine_mode value_mode
,
4368 machine_mode mask_mode
)
4370 enum insn_code icode
4371 = get_vec_cmp_icode (value_mode
, mask_mode
, unsigned_optab_p (code
));
4372 if (icode
== CODE_FOR_nothing
)
4375 return insn_predicate_matches_p (icode
, 1, code
, mask_mode
, value_mode
);
4378 /* Return whether the backend can emit a vector comparison (vcond/vcondu) for
4379 code CODE, comparing operands of mode CMP_OP_MODE and producing a result
4383 can_vcond_compare_p (enum rtx_code code
, machine_mode value_mode
,
4384 machine_mode cmp_op_mode
)
4386 enum insn_code icode
4387 = get_vcond_icode (value_mode
, cmp_op_mode
, unsigned_optab_p (code
));
4388 if (icode
== CODE_FOR_nothing
)
4391 return insn_predicate_matches_p (icode
, 3, code
, value_mode
, cmp_op_mode
);
4394 /* Return whether the backend can emit vector set instructions for inserting
4395 element into vector at variable index position. */
4398 can_vec_set_var_idx_p (machine_mode vec_mode
)
4400 if (!VECTOR_MODE_P (vec_mode
))
4403 machine_mode inner_mode
= GET_MODE_INNER (vec_mode
);
4405 rtx reg1
= alloca_raw_REG (vec_mode
, LAST_VIRTUAL_REGISTER
+ 1);
4406 rtx reg2
= alloca_raw_REG (inner_mode
, LAST_VIRTUAL_REGISTER
+ 2);
4408 enum insn_code icode
= optab_handler (vec_set_optab
, vec_mode
);
4410 const struct insn_data_d
*data
= &insn_data
[icode
];
4411 machine_mode idx_mode
= data
->operand
[2].mode
;
4413 rtx reg3
= alloca_raw_REG (idx_mode
, LAST_VIRTUAL_REGISTER
+ 3);
4415 return icode
!= CODE_FOR_nothing
&& insn_operand_matches (icode
, 0, reg1
)
4416 && insn_operand_matches (icode
, 1, reg2
)
4417 && insn_operand_matches (icode
, 2, reg3
);
4420 /* Return whether the backend can emit a vec_extract instruction with
4421 a non-constant index. */
4423 can_vec_extract_var_idx_p (machine_mode vec_mode
, machine_mode extr_mode
)
4425 if (!VECTOR_MODE_P (vec_mode
))
4428 rtx reg1
= alloca_raw_REG (extr_mode
, LAST_VIRTUAL_REGISTER
+ 1);
4429 rtx reg2
= alloca_raw_REG (vec_mode
, LAST_VIRTUAL_REGISTER
+ 2);
4431 enum insn_code icode
= convert_optab_handler (vec_extract_optab
,
4432 vec_mode
, extr_mode
);
4434 const struct insn_data_d
*data
= &insn_data
[icode
];
4435 machine_mode idx_mode
= data
->operand
[2].mode
;
4437 rtx reg3
= alloca_raw_REG (idx_mode
, LAST_VIRTUAL_REGISTER
+ 3);
4439 return icode
!= CODE_FOR_nothing
&& insn_operand_matches (icode
, 0, reg1
)
4440 && insn_operand_matches (icode
, 1, reg2
)
4441 && insn_operand_matches (icode
, 2, reg3
);
4444 /* This function is called when we are going to emit a compare instruction that
4445 compares the values found in X and Y, using the rtl operator COMPARISON.
4447 If they have mode BLKmode, then SIZE specifies the size of both operands.
4449 UNSIGNEDP nonzero says that the operands are unsigned;
4450 this matters if they need to be widened (as given by METHODS).
4452 *PTEST is where the resulting comparison RTX is returned or NULL_RTX
4453 if we failed to produce one.
4455 *PMODE is the mode of the inputs (in case they are const_int).
4457 This function performs all the setup necessary so that the caller only has
4458 to emit a single comparison insn. This setup can involve doing a BLKmode
4459 comparison or emitting a library call to perform the comparison if no insn
4460 is available to handle it.
4461 The values which are passed in through pointers can be modified; the caller
4462 should perform the comparison on the modified values. Constant
4463 comparisons must have already been folded. */
4466 prepare_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4467 int unsignedp
, enum optab_methods methods
,
4468 rtx
*ptest
, machine_mode
*pmode
)
4470 machine_mode mode
= *pmode
;
4472 machine_mode cmp_mode
;
4474 /* The other methods are not needed. */
4475 gcc_assert (methods
== OPTAB_DIRECT
|| methods
== OPTAB_WIDEN
4476 || methods
== OPTAB_LIB_WIDEN
);
4478 if (CONST_SCALAR_INT_P (y
))
4479 canonicalize_comparison (mode
, &comparison
, &y
);
4481 /* If we are optimizing, force expensive constants into a register. */
4482 if (CONSTANT_P (x
) && optimize
4483 && (rtx_cost (x
, mode
, COMPARE
, 0, optimize_insn_for_speed_p ())
4484 > COSTS_N_INSNS (1))
4485 && can_create_pseudo_p ())
4486 x
= force_reg (mode
, x
);
4488 if (CONSTANT_P (y
) && optimize
4489 && (rtx_cost (y
, mode
, COMPARE
, 1, optimize_insn_for_speed_p ())
4490 > COSTS_N_INSNS (1))
4491 && can_create_pseudo_p ())
4492 y
= force_reg (mode
, y
);
4494 /* Don't let both operands fail to indicate the mode. */
4495 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
4496 x
= force_reg (mode
, x
);
4497 if (mode
== VOIDmode
)
4498 mode
= GET_MODE (x
) != VOIDmode
? GET_MODE (x
) : GET_MODE (y
);
4500 /* Handle all BLKmode compares. */
4502 if (mode
== BLKmode
)
4504 machine_mode result_mode
;
4505 enum insn_code cmp_code
;
4508 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
4512 /* Try to use a memory block compare insn - either cmpstr
4513 or cmpmem will do. */
4514 opt_scalar_int_mode cmp_mode_iter
;
4515 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter
, MODE_INT
)
4517 scalar_int_mode cmp_mode
= cmp_mode_iter
.require ();
4518 cmp_code
= direct_optab_handler (cmpmem_optab
, cmp_mode
);
4519 if (cmp_code
== CODE_FOR_nothing
)
4520 cmp_code
= direct_optab_handler (cmpstr_optab
, cmp_mode
);
4521 if (cmp_code
== CODE_FOR_nothing
)
4522 cmp_code
= direct_optab_handler (cmpstrn_optab
, cmp_mode
);
4523 if (cmp_code
== CODE_FOR_nothing
)
4526 /* Must make sure the size fits the insn's mode. */
4527 if (CONST_INT_P (size
)
4528 ? UINTVAL (size
) > GET_MODE_MASK (cmp_mode
)
4529 : (GET_MODE_BITSIZE (as_a
<scalar_int_mode
> (GET_MODE (size
)))
4530 > GET_MODE_BITSIZE (cmp_mode
)))
4533 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
4534 result
= gen_reg_rtx (result_mode
);
4535 size
= convert_to_mode (cmp_mode
, size
, 1);
4536 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
4538 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, result
, const0_rtx
);
4539 *pmode
= result_mode
;
4543 if (methods
!= OPTAB_LIB
&& methods
!= OPTAB_LIB_WIDEN
)
4546 /* Otherwise call a library function. */
4547 result
= emit_block_comp_via_libcall (x
, y
, size
);
4551 mode
= TYPE_MODE (integer_type_node
);
4552 methods
= OPTAB_LIB_WIDEN
;
4556 /* Don't allow operands to the compare to trap, as that can put the
4557 compare and branch in different basic blocks. */
4558 if (cfun
->can_throw_non_call_exceptions
)
4560 if (!can_create_pseudo_p () && (may_trap_p (x
) || may_trap_p (y
)))
4563 x
= copy_to_reg (x
);
4565 y
= copy_to_reg (y
);
4568 if (GET_MODE_CLASS (mode
) == MODE_CC
)
4570 enum insn_code icode
= optab_handler (cbranch_optab
, CCmode
);
4571 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
4572 if (icode
!= CODE_FOR_nothing
4573 && insn_operand_matches (icode
, 0, test
))
4582 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
4583 FOR_EACH_WIDER_MODE_FROM (cmp_mode
, mode
)
4585 enum insn_code icode
;
4586 icode
= optab_handler (cbranch_optab
, cmp_mode
);
4587 if (icode
!= CODE_FOR_nothing
4588 && insn_operand_matches (icode
, 0, test
))
4590 rtx_insn
*last
= get_last_insn ();
4591 rtx op0
= prepare_operand (icode
, x
, 1, mode
, cmp_mode
, unsignedp
);
4592 rtx op1
= prepare_operand (icode
, y
, 2, mode
, cmp_mode
, unsignedp
);
4594 && insn_operand_matches (icode
, 1, op0
)
4595 && insn_operand_matches (icode
, 2, op1
))
4597 XEXP (test
, 0) = op0
;
4598 XEXP (test
, 1) = op1
;
4603 delete_insns_since (last
);
4606 if (methods
== OPTAB_DIRECT
)
4610 if (methods
!= OPTAB_LIB_WIDEN
)
4613 if (SCALAR_FLOAT_MODE_P (mode
))
4615 /* Small trick if UNORDERED isn't implemented by the hardware. */
4616 if (comparison
== UNORDERED
&& rtx_equal_p (x
, y
))
4618 prepare_cmp_insn (x
, y
, UNLT
, NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4624 prepare_float_lib_cmp (x
, y
, comparison
, ptest
, pmode
);
4629 machine_mode ret_mode
;
4631 /* Handle a libcall just for the mode we are using. */
4632 libfunc
= optab_libfunc (cmp_optab
, mode
);
4633 gcc_assert (libfunc
);
4635 /* If we want unsigned, and this mode has a distinct unsigned
4636 comparison routine, use that. */
4639 rtx ulibfunc
= optab_libfunc (ucmp_optab
, mode
);
4644 ret_mode
= targetm
.libgcc_cmp_return_mode ();
4645 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4646 ret_mode
, x
, mode
, y
, mode
);
4648 /* There are two kinds of comparison routines. Biased routines
4649 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4650 of gcc expect that the comparison operation is equivalent
4651 to the modified comparison. For signed comparisons compare the
4652 result against 1 in the biased case, and zero in the unbiased
4653 case. For unsigned comparisons always compare against 1 after
4654 biasing the unbiased result by adding 1. This gives us a way to
4656 The comparisons in the fixed-point helper library are always
4661 if (!TARGET_LIB_INT_CMP_BIASED
&& !ALL_FIXED_POINT_MODE_P (mode
))
4664 x
= plus_constant (ret_mode
, result
, 1);
4670 prepare_cmp_insn (x
, y
, comparison
, NULL_RTX
, unsignedp
, methods
,
4680 /* Before emitting an insn with code ICODE, make sure that X, which is going
4681 to be used for operand OPNUM of the insn, is converted from mode MODE to
4682 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4683 that it is accepted by the operand predicate. Return the new value. */
4686 prepare_operand (enum insn_code icode
, rtx x
, int opnum
, machine_mode mode
,
4687 machine_mode wider_mode
, int unsignedp
)
4689 if (mode
!= wider_mode
)
4690 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
4692 if (!insn_operand_matches (icode
, opnum
, x
))
4694 machine_mode op_mode
= insn_data
[(int) icode
].operand
[opnum
].mode
;
4695 if (reload_completed
)
4697 if (GET_MODE (x
) != op_mode
&& GET_MODE (x
) != VOIDmode
)
4699 x
= copy_to_mode_reg (op_mode
, x
);
4705 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4706 we can do the branch. */
4709 emit_cmp_and_jump_insn_1 (rtx test
, machine_mode mode
, rtx label
,
4710 direct_optab cmp_optab
, profile_probability prob
,
4713 machine_mode optab_mode
;
4714 enum mode_class mclass
;
4715 enum insn_code icode
;
4718 mclass
= GET_MODE_CLASS (mode
);
4719 optab_mode
= (mclass
== MODE_CC
) ? CCmode
: mode
;
4720 icode
= optab_handler (cmp_optab
, optab_mode
);
4722 gcc_assert (icode
!= CODE_FOR_nothing
);
4723 gcc_assert (test_branch
|| insn_operand_matches (icode
, 0, test
));
4725 insn
= emit_jump_insn (GEN_FCN (icode
) (XEXP (test
, 0),
4726 XEXP (test
, 1), label
));
4728 insn
= emit_jump_insn (GEN_FCN (icode
) (test
, XEXP (test
, 0),
4729 XEXP (test
, 1), label
));
4731 if (prob
.initialized_p ()
4732 && profile_status_for_fn (cfun
) != PROFILE_ABSENT
4735 && any_condjump_p (insn
)
4736 && !find_reg_note (insn
, REG_BR_PROB
, 0))
4737 add_reg_br_prob_note (insn
, prob
);
4740 /* PTEST points to a comparison that compares its first operand with zero.
4741 Check to see if it can be performed as a bit-test-and-branch instead.
4742 On success, return the instruction that performs the bit-test-and-branch
4743 and replace the second operand of *PTEST with the bit number to test.
4744 On failure, return CODE_FOR_nothing and leave *PTEST unchanged.
4746 Note that the comparison described by *PTEST should not be taken
4747 literally after a successful return. *PTEST is just a convenient
4748 place to store the two operands of the bit-and-test.
4750 VAL must contain the original tree expression for the first operand
4753 static enum insn_code
4754 validate_test_and_branch (tree val
, rtx
*ptest
, machine_mode
*pmode
, optab
*res
)
4756 if (!val
|| TREE_CODE (val
) != SSA_NAME
)
4757 return CODE_FOR_nothing
;
4759 machine_mode mode
= TYPE_MODE (TREE_TYPE (val
));
4763 if (GET_CODE (test
) == EQ
)
4764 optab
= tbranch_eq_optab
;
4765 else if (GET_CODE (test
) == NE
)
4766 optab
= tbranch_ne_optab
;
4768 return CODE_FOR_nothing
;
4772 /* If the target supports the testbit comparison directly, great. */
4773 auto icode
= direct_optab_handler (optab
, mode
);
4774 if (icode
== CODE_FOR_nothing
)
4777 if (tree_zero_one_valued_p (val
))
4779 auto pos
= BITS_BIG_ENDIAN
? GET_MODE_BITSIZE (mode
) - 1 : 0;
4780 XEXP (test
, 1) = gen_int_mode (pos
, mode
);
4786 wide_int wcst
= get_nonzero_bits (val
);
4788 return CODE_FOR_nothing
;
4792 if ((bitpos
= wi::exact_log2 (wcst
)) == -1)
4793 return CODE_FOR_nothing
;
4795 auto pos
= BITS_BIG_ENDIAN
? GET_MODE_BITSIZE (mode
) - 1 - bitpos
: bitpos
;
4796 XEXP (test
, 1) = gen_int_mode (pos
, mode
);
4802 /* Generate code to compare X with Y so that the condition codes are
4803 set and to jump to LABEL if the condition is true. If X is a
4804 constant and Y is not a constant, then the comparison is swapped to
4805 ensure that the comparison RTL has the canonical form.
4807 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4808 need to be widened. UNSIGNEDP is also used to select the proper
4809 branch condition code.
4811 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4813 MODE is the mode of the inputs (in case they are const_int).
4815 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4816 It will be potentially converted into an unsigned variant based on
4817 UNSIGNEDP to select a proper jump instruction.
4819 PROB is the probability of jumping to LABEL. If the comparison is against
4820 zero then VAL contains the expression from which the non-zero RTL is
4824 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4825 machine_mode mode
, int unsignedp
, tree val
, rtx label
,
4826 profile_probability prob
)
4828 rtx op0
= x
, op1
= y
;
4831 /* Swap operands and condition to ensure canonical RTL. */
4832 if (swap_commutative_operands_p (x
, y
)
4833 && can_compare_p (swap_condition (comparison
), mode
, ccp_jump
))
4836 comparison
= swap_condition (comparison
);
4839 /* If OP0 is still a constant, then both X and Y must be constants
4840 or the opposite comparison is not supported. Force X into a register
4841 to create canonical RTL. */
4842 if (CONSTANT_P (op0
))
4843 op0
= force_reg (mode
, op0
);
4846 comparison
= unsigned_condition (comparison
);
4848 prepare_cmp_insn (op0
, op1
, comparison
, size
, unsignedp
, OPTAB_LIB_WIDEN
,
4851 /* Check if we're comparing a truth type with 0, and if so check if
4852 the target supports tbranch. */
4853 machine_mode tmode
= mode
;
4855 if (op1
== CONST0_RTX (GET_MODE (op1
))
4856 && validate_test_and_branch (val
, &test
, &tmode
,
4857 &optab
) != CODE_FOR_nothing
)
4859 emit_cmp_and_jump_insn_1 (test
, tmode
, label
, optab
, prob
, true);
4863 emit_cmp_and_jump_insn_1 (test
, mode
, label
, cbranch_optab
, prob
, false);
4866 /* Overloaded version of emit_cmp_and_jump_insns in which VAL is unknown. */
4869 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4870 machine_mode mode
, int unsignedp
, rtx label
,
4871 profile_probability prob
)
4873 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, NULL
,
4878 /* Emit a library call comparison between floating point X and Y.
4879 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4882 prepare_float_lib_cmp (rtx x
, rtx y
, enum rtx_code comparison
,
4883 rtx
*ptest
, machine_mode
*pmode
)
4885 enum rtx_code swapped
= swap_condition (comparison
);
4886 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
4887 machine_mode orig_mode
= GET_MODE (x
);
4889 rtx true_rtx
, false_rtx
;
4890 rtx value
, target
, equiv
;
4893 bool reversed_p
= false;
4894 scalar_int_mode cmp_mode
= targetm
.libgcc_cmp_return_mode ();
4896 FOR_EACH_WIDER_MODE_FROM (mode
, orig_mode
)
4898 if (code_to_optab (comparison
)
4899 && (libfunc
= optab_libfunc (code_to_optab (comparison
), mode
)))
4902 if (code_to_optab (swapped
)
4903 && (libfunc
= optab_libfunc (code_to_optab (swapped
), mode
)))
4906 comparison
= swapped
;
4910 if (code_to_optab (reversed
)
4911 && (libfunc
= optab_libfunc (code_to_optab (reversed
), mode
)))
4913 comparison
= reversed
;
4919 gcc_assert (mode
!= VOIDmode
);
4921 if (mode
!= orig_mode
)
4923 x
= convert_to_mode (mode
, x
, 0);
4924 y
= convert_to_mode (mode
, y
, 0);
4927 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4928 the RTL. The allows the RTL optimizers to delete the libcall if the
4929 condition can be determined at compile-time. */
4930 if (comparison
== UNORDERED
4931 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4933 true_rtx
= const_true_rtx
;
4934 false_rtx
= const0_rtx
;
4941 true_rtx
= const0_rtx
;
4942 false_rtx
= const_true_rtx
;
4946 true_rtx
= const_true_rtx
;
4947 false_rtx
= const0_rtx
;
4951 true_rtx
= const1_rtx
;
4952 false_rtx
= const0_rtx
;
4956 true_rtx
= const0_rtx
;
4957 false_rtx
= constm1_rtx
;
4961 true_rtx
= constm1_rtx
;
4962 false_rtx
= const0_rtx
;
4966 true_rtx
= const0_rtx
;
4967 false_rtx
= const1_rtx
;
4975 if (comparison
== UNORDERED
)
4977 rtx temp
= simplify_gen_relational (NE
, cmp_mode
, mode
, x
, x
);
4978 equiv
= simplify_gen_relational (NE
, cmp_mode
, mode
, y
, y
);
4979 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4980 temp
, const_true_rtx
, equiv
);
4984 equiv
= simplify_gen_relational (comparison
, cmp_mode
, mode
, x
, y
);
4985 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4986 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4987 equiv
, true_rtx
, false_rtx
);
4991 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4992 cmp_mode
, x
, mode
, y
, mode
);
4993 insns
= get_insns ();
4996 target
= gen_reg_rtx (cmp_mode
);
4997 emit_libcall_block (insns
, target
, value
, equiv
);
4999 if (comparison
== UNORDERED
5000 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
)
5002 *ptest
= gen_rtx_fmt_ee (reversed_p
? EQ
: NE
, VOIDmode
, target
, false_rtx
);
5004 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, target
, const0_rtx
);
5009 /* Generate code to indirectly jump to a location given in the rtx LOC. */
5012 emit_indirect_jump (rtx loc
)
5014 if (!targetm
.have_indirect_jump ())
5015 sorry ("indirect jumps are not available on this target");
5018 class expand_operand ops
[1];
5019 create_address_operand (&ops
[0], loc
);
5020 expand_jump_insn (targetm
.code_for_indirect_jump
, 1, ops
);
5026 /* Emit a conditional move instruction if the machine supports one for that
5027 condition and machine mode.
5029 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
5030 the mode to use should they be constants. If it is VOIDmode, they cannot
5033 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
5034 should be stored there. MODE is the mode to use should they be constants.
5035 If it is VOIDmode, they cannot both be constants.
5037 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
5038 is not supported. */
5041 emit_conditional_move (rtx target
, struct rtx_comparison comp
,
5043 machine_mode mode
, int unsignedp
)
5047 enum insn_code icode
;
5048 enum rtx_code reversed
;
5050 /* If the two source operands are identical, that's just a move. */
5052 if (rtx_equal_p (op2
, op3
))
5055 target
= gen_reg_rtx (mode
);
5057 emit_move_insn (target
, op3
);
5061 /* If one operand is constant, make it the second one. Only do this
5062 if the other operand is not constant as well. */
5064 if (swap_commutative_operands_p (comp
.op0
, comp
.op1
))
5066 std::swap (comp
.op0
, comp
.op1
);
5067 comp
.code
= swap_condition (comp
.code
);
5070 /* get_condition will prefer to generate LT and GT even if the old
5071 comparison was against zero, so undo that canonicalization here since
5072 comparisons against zero are cheaper. */
5074 if (comp
.code
== LT
&& comp
.op1
== const1_rtx
)
5075 comp
.code
= LE
, comp
.op1
= const0_rtx
;
5076 else if (comp
.code
== GT
&& comp
.op1
== constm1_rtx
)
5077 comp
.code
= GE
, comp
.op1
= const0_rtx
;
5079 if (comp
.mode
== VOIDmode
)
5080 comp
.mode
= GET_MODE (comp
.op0
);
5082 enum rtx_code orig_code
= comp
.code
;
5083 bool swapped
= false;
5084 if (swap_commutative_operands_p (op2
, op3
)
5086 reversed_comparison_code_parts (comp
.code
, comp
.op0
, comp
.op1
, NULL
))
5089 std::swap (op2
, op3
);
5090 comp
.code
= reversed
;
5094 if (mode
== VOIDmode
)
5095 mode
= GET_MODE (op2
);
5097 icode
= direct_optab_handler (movcc_optab
, mode
);
5099 if (icode
== CODE_FOR_nothing
)
5103 target
= gen_reg_rtx (mode
);
5105 for (int pass
= 0; ; pass
++)
5107 comp
.code
= unsignedp
? unsigned_condition (comp
.code
) : comp
.code
;
5109 simplify_gen_relational (comp
.code
, VOIDmode
,
5110 comp
.mode
, comp
.op0
, comp
.op1
);
5112 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
5113 punt and let the caller figure out how best to deal with this
5115 if (COMPARISON_P (comparison
))
5117 saved_pending_stack_adjust save
;
5118 save_pending_stack_adjust (&save
);
5119 last
= get_last_insn ();
5120 do_pending_stack_adjust ();
5121 machine_mode cmpmode
= comp
.mode
;
5122 rtx orig_op0
= XEXP (comparison
, 0);
5123 rtx orig_op1
= XEXP (comparison
, 1);
5126 /* If we are optimizing, force expensive constants into a register
5127 but preserve an eventual equality with op2/op3. */
5128 if (CONSTANT_P (orig_op0
) && optimize
5129 && (rtx_cost (orig_op0
, mode
, COMPARE
, 0,
5130 optimize_insn_for_speed_p ())
5131 > COSTS_N_INSNS (1))
5132 && can_create_pseudo_p ())
5134 if (rtx_equal_p (orig_op0
, op2
))
5135 op2p
= XEXP (comparison
, 0) = force_reg (cmpmode
, orig_op0
);
5136 else if (rtx_equal_p (orig_op0
, op3
))
5137 op3p
= XEXP (comparison
, 0) = force_reg (cmpmode
, orig_op0
);
5139 if (CONSTANT_P (orig_op1
) && optimize
5140 && (rtx_cost (orig_op1
, mode
, COMPARE
, 0,
5141 optimize_insn_for_speed_p ())
5142 > COSTS_N_INSNS (1))
5143 && can_create_pseudo_p ())
5145 if (rtx_equal_p (orig_op1
, op2
))
5146 op2p
= XEXP (comparison
, 1) = force_reg (cmpmode
, orig_op1
);
5147 else if (rtx_equal_p (orig_op1
, op3
))
5148 op3p
= XEXP (comparison
, 1) = force_reg (cmpmode
, orig_op1
);
5150 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
5151 GET_CODE (comparison
), NULL_RTX
, unsignedp
,
5152 OPTAB_WIDEN
, &comparison
, &cmpmode
);
5155 rtx res
= emit_conditional_move_1 (target
, comparison
,
5157 if (res
!= NULL_RTX
)
5160 delete_insns_since (last
);
5161 restore_pending_stack_adjust (&save
);
5167 /* If the preferred op2/op3 order is not usable, retry with other
5168 operand order, perhaps it will expand successfully. */
5170 comp
.code
= orig_code
;
5171 else if ((reversed
=
5172 reversed_comparison_code_parts (orig_code
, comp
.op0
, comp
.op1
,
5175 comp
.code
= reversed
;
5178 std::swap (op2
, op3
);
5182 /* Helper function that, in addition to COMPARISON, also tries
5183 the reversed REV_COMPARISON with swapped OP2 and OP3. As opposed
5184 to when we pass the specific constituents of a comparison, no
5185 additional insns are emitted for it. It might still be necessary
5186 to emit more than one insn for the final conditional move, though. */
5189 emit_conditional_move (rtx target
, rtx comparison
, rtx rev_comparison
,
5190 rtx op2
, rtx op3
, machine_mode mode
)
5192 rtx res
= emit_conditional_move_1 (target
, comparison
, op2
, op3
, mode
);
5194 if (res
!= NULL_RTX
)
5197 return emit_conditional_move_1 (target
, rev_comparison
, op3
, op2
, mode
);
5200 /* Helper for emitting a conditional move. */
5203 emit_conditional_move_1 (rtx target
, rtx comparison
,
5204 rtx op2
, rtx op3
, machine_mode mode
)
5206 enum insn_code icode
;
5208 if (comparison
== NULL_RTX
|| !COMPARISON_P (comparison
))
5211 /* If the two source operands are identical, that's just a move.
5212 As the comparison comes in non-canonicalized, we must make
5213 sure not to discard any possible side effects. If there are
5214 side effects, just let the target handle it. */
5215 if (!side_effects_p (comparison
) && rtx_equal_p (op2
, op3
))
5218 target
= gen_reg_rtx (mode
);
5220 emit_move_insn (target
, op3
);
5224 if (mode
== VOIDmode
)
5225 mode
= GET_MODE (op2
);
5227 icode
= direct_optab_handler (movcc_optab
, mode
);
5229 if (icode
== CODE_FOR_nothing
)
5233 target
= gen_reg_rtx (mode
);
5235 class expand_operand ops
[4];
5237 create_output_operand (&ops
[0], target
, mode
);
5238 create_fixed_operand (&ops
[1], comparison
);
5239 create_input_operand (&ops
[2], op2
, mode
);
5240 create_input_operand (&ops
[3], op3
, mode
);
5242 if (maybe_expand_insn (icode
, 4, ops
))
5244 if (ops
[0].value
!= target
)
5245 convert_move (target
, ops
[0].value
, false);
5253 /* Emit a conditional negate or bitwise complement using the
5254 negcc or notcc optabs if available. Return NULL_RTX if such operations
5255 are not available. Otherwise return the RTX holding the result.
5256 TARGET is the desired destination of the result. COMP is the comparison
5257 on which to negate. If COND is true move into TARGET the negation
5258 or bitwise complement of OP1. Otherwise move OP2 into TARGET.
5259 CODE is either NEG or NOT. MODE is the machine mode in which the
5260 operation is performed. */
5263 emit_conditional_neg_or_complement (rtx target
, rtx_code code
,
5264 machine_mode mode
, rtx cond
, rtx op1
,
5267 optab op
= unknown_optab
;
5270 else if (code
== NOT
)
5275 insn_code icode
= direct_optab_handler (op
, mode
);
5277 if (icode
== CODE_FOR_nothing
)
5281 target
= gen_reg_rtx (mode
);
5283 rtx_insn
*last
= get_last_insn ();
5284 class expand_operand ops
[4];
5286 create_output_operand (&ops
[0], target
, mode
);
5287 create_fixed_operand (&ops
[1], cond
);
5288 create_input_operand (&ops
[2], op1
, mode
);
5289 create_input_operand (&ops
[3], op2
, mode
);
5291 if (maybe_expand_insn (icode
, 4, ops
))
5293 if (ops
[0].value
!= target
)
5294 convert_move (target
, ops
[0].value
, false);
5298 delete_insns_since (last
);
5302 /* Emit a conditional addition instruction if the machine supports one for that
5303 condition and machine mode.
5305 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
5306 the mode to use should they be constants. If it is VOIDmode, they cannot
5309 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
5310 should be stored there. MODE is the mode to use should they be constants.
5311 If it is VOIDmode, they cannot both be constants.
5313 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
5314 is not supported. */
5317 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5318 machine_mode cmode
, rtx op2
, rtx op3
,
5319 machine_mode mode
, int unsignedp
)
5323 enum insn_code icode
;
5325 /* If one operand is constant, make it the second one. Only do this
5326 if the other operand is not constant as well. */
5328 if (swap_commutative_operands_p (op0
, op1
))
5330 std::swap (op0
, op1
);
5331 code
= swap_condition (code
);
5334 /* get_condition will prefer to generate LT and GT even if the old
5335 comparison was against zero, so undo that canonicalization here since
5336 comparisons against zero are cheaper. */
5337 if (code
== LT
&& op1
== const1_rtx
)
5338 code
= LE
, op1
= const0_rtx
;
5339 else if (code
== GT
&& op1
== constm1_rtx
)
5340 code
= GE
, op1
= const0_rtx
;
5342 if (cmode
== VOIDmode
)
5343 cmode
= GET_MODE (op0
);
5345 if (mode
== VOIDmode
)
5346 mode
= GET_MODE (op2
);
5348 icode
= optab_handler (addcc_optab
, mode
);
5350 if (icode
== CODE_FOR_nothing
)
5354 target
= gen_reg_rtx (mode
);
5356 code
= unsignedp
? unsigned_condition (code
) : code
;
5357 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
5359 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
5360 return NULL and let the caller figure out how best to deal with this
5362 if (!COMPARISON_P (comparison
))
5365 do_pending_stack_adjust ();
5366 last
= get_last_insn ();
5367 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
5368 GET_CODE (comparison
), NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
5369 &comparison
, &cmode
);
5372 class expand_operand ops
[4];
5374 create_output_operand (&ops
[0], target
, mode
);
5375 create_fixed_operand (&ops
[1], comparison
);
5376 create_input_operand (&ops
[2], op2
, mode
);
5377 create_input_operand (&ops
[3], op3
, mode
);
5378 if (maybe_expand_insn (icode
, 4, ops
))
5380 if (ops
[0].value
!= target
)
5381 convert_move (target
, ops
[0].value
, false);
5385 delete_insns_since (last
);
5389 /* These functions attempt to generate an insn body, rather than
5390 emitting the insn, but if the gen function already emits them, we
5391 make no attempt to turn them back into naked patterns. */
5393 /* Generate and return an insn body to add Y to X. */
5396 gen_add2_insn (rtx x
, rtx y
)
5398 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (x
));
5400 gcc_assert (insn_operand_matches (icode
, 0, x
));
5401 gcc_assert (insn_operand_matches (icode
, 1, x
));
5402 gcc_assert (insn_operand_matches (icode
, 2, y
));
5404 return GEN_FCN (icode
) (x
, x
, y
);
5407 /* Generate and return an insn body to add r1 and c,
5408 storing the result in r0. */
5411 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
5413 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (r0
));
5415 if (icode
== CODE_FOR_nothing
5416 || !insn_operand_matches (icode
, 0, r0
)
5417 || !insn_operand_matches (icode
, 1, r1
)
5418 || !insn_operand_matches (icode
, 2, c
))
5421 return GEN_FCN (icode
) (r0
, r1
, c
);
5425 have_add2_insn (rtx x
, rtx y
)
5427 enum insn_code icode
;
5429 gcc_assert (GET_MODE (x
) != VOIDmode
);
5431 icode
= optab_handler (add_optab
, GET_MODE (x
));
5433 if (icode
== CODE_FOR_nothing
)
5436 if (!insn_operand_matches (icode
, 0, x
)
5437 || !insn_operand_matches (icode
, 1, x
)
5438 || !insn_operand_matches (icode
, 2, y
))
5444 /* Generate and return an insn body to add Y to X. */
5447 gen_addptr3_insn (rtx x
, rtx y
, rtx z
)
5449 enum insn_code icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
5451 gcc_assert (insn_operand_matches (icode
, 0, x
));
5452 gcc_assert (insn_operand_matches (icode
, 1, y
));
5453 gcc_assert (insn_operand_matches (icode
, 2, z
));
5455 return GEN_FCN (icode
) (x
, y
, z
);
5458 /* Return true if the target implements an addptr pattern and X, Y,
5459 and Z are valid for the pattern predicates. */
5462 have_addptr3_insn (rtx x
, rtx y
, rtx z
)
5464 enum insn_code icode
;
5466 gcc_assert (GET_MODE (x
) != VOIDmode
);
5468 icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
5470 if (icode
== CODE_FOR_nothing
)
5473 if (!insn_operand_matches (icode
, 0, x
)
5474 || !insn_operand_matches (icode
, 1, y
)
5475 || !insn_operand_matches (icode
, 2, z
))
5481 /* Generate and return an insn body to subtract Y from X. */
5484 gen_sub2_insn (rtx x
, rtx y
)
5486 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (x
));
5488 gcc_assert (insn_operand_matches (icode
, 0, x
));
5489 gcc_assert (insn_operand_matches (icode
, 1, x
));
5490 gcc_assert (insn_operand_matches (icode
, 2, y
));
5492 return GEN_FCN (icode
) (x
, x
, y
);
5495 /* Generate and return an insn body to subtract r1 and c,
5496 storing the result in r0. */
5499 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
5501 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (r0
));
5503 if (icode
== CODE_FOR_nothing
5504 || !insn_operand_matches (icode
, 0, r0
)
5505 || !insn_operand_matches (icode
, 1, r1
)
5506 || !insn_operand_matches (icode
, 2, c
))
5509 return GEN_FCN (icode
) (r0
, r1
, c
);
5513 have_sub2_insn (rtx x
, rtx y
)
5515 enum insn_code icode
;
5517 gcc_assert (GET_MODE (x
) != VOIDmode
);
5519 icode
= optab_handler (sub_optab
, GET_MODE (x
));
5521 if (icode
== CODE_FOR_nothing
)
5524 if (!insn_operand_matches (icode
, 0, x
)
5525 || !insn_operand_matches (icode
, 1, x
)
5526 || !insn_operand_matches (icode
, 2, y
))
5532 /* Generate the body of an insn to extend Y (with mode MFROM)
5533 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
5536 gen_extend_insn (rtx x
, rtx y
, machine_mode mto
,
5537 machine_mode mfrom
, int unsignedp
)
5539 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
5540 return GEN_FCN (icode
) (x
, y
);
5543 /* Generate code to convert FROM to floating point
5544 and store in TO. FROM must be fixed point and not VOIDmode.
5545 UNSIGNEDP nonzero means regard FROM as unsigned.
5546 Normally this is done by correcting the final value
5547 if it is negative. */
5550 expand_float (rtx to
, rtx from
, int unsignedp
)
5552 enum insn_code icode
;
5554 scalar_mode from_mode
, to_mode
;
5555 machine_mode fmode
, imode
;
5556 bool can_do_signed
= false;
5558 /* Crash now, because we won't be able to decide which mode to use. */
5559 gcc_assert (GET_MODE (from
) != VOIDmode
);
5561 /* Look for an insn to do the conversion. Do it in the specified
5562 modes if possible; otherwise convert either input, output or both to
5563 wider mode. If the integer mode is wider than the mode of FROM,
5564 we can do the conversion signed even if the input is unsigned. */
5566 FOR_EACH_MODE_FROM (fmode
, GET_MODE (to
))
5567 FOR_EACH_MODE_FROM (imode
, GET_MODE (from
))
5569 int doing_unsigned
= unsignedp
;
5571 if (fmode
!= GET_MODE (to
)
5572 && (significand_size (fmode
)
5573 < GET_MODE_UNIT_PRECISION (GET_MODE (from
))))
5576 icode
= can_float_p (fmode
, imode
, unsignedp
);
5577 if (icode
== CODE_FOR_nothing
&& unsignedp
)
5579 enum insn_code scode
= can_float_p (fmode
, imode
, 0);
5580 if (scode
!= CODE_FOR_nothing
)
5581 can_do_signed
= true;
5582 if (imode
!= GET_MODE (from
))
5583 icode
= scode
, doing_unsigned
= 0;
5586 if (icode
!= CODE_FOR_nothing
)
5588 if (imode
!= GET_MODE (from
))
5589 from
= convert_to_mode (imode
, from
, unsignedp
);
5591 if (fmode
!= GET_MODE (to
))
5592 target
= gen_reg_rtx (fmode
);
5594 emit_unop_insn (icode
, target
, from
,
5595 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
5598 convert_move (to
, target
, 0);
5603 /* Unsigned integer, and no way to convert directly. Convert as signed,
5604 then unconditionally adjust the result. */
5607 && is_a
<scalar_mode
> (GET_MODE (to
), &to_mode
)
5608 && is_a
<scalar_mode
> (GET_MODE (from
), &from_mode
))
5610 opt_scalar_mode fmode_iter
;
5611 rtx_code_label
*label
= gen_label_rtx ();
5613 REAL_VALUE_TYPE offset
;
5615 /* Look for a usable floating mode FMODE wider than the source and at
5616 least as wide as the target. Using FMODE will avoid rounding woes
5617 with unsigned values greater than the signed maximum value. */
5619 FOR_EACH_MODE_FROM (fmode_iter
, to_mode
)
5621 scalar_mode fmode
= fmode_iter
.require ();
5622 if (GET_MODE_PRECISION (from_mode
) < GET_MODE_BITSIZE (fmode
)
5623 && can_float_p (fmode
, from_mode
, 0) != CODE_FOR_nothing
)
5627 if (!fmode_iter
.exists (&fmode
))
5629 /* There is no such mode. Pretend the target is wide enough. */
5632 /* Avoid double-rounding when TO is narrower than FROM. */
5633 if ((significand_size (fmode
) + 1)
5634 < GET_MODE_PRECISION (from_mode
))
5637 rtx_code_label
*neglabel
= gen_label_rtx ();
5639 /* Don't use TARGET if it isn't a register, is a hard register,
5640 or is the wrong mode. */
5642 || REGNO (target
) < FIRST_PSEUDO_REGISTER
5643 || GET_MODE (target
) != fmode
)
5644 target
= gen_reg_rtx (fmode
);
5647 do_pending_stack_adjust ();
5649 /* Test whether the sign bit is set. */
5650 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
5653 /* The sign bit is not set. Convert as signed. */
5654 expand_float (target
, from
, 0);
5655 emit_jump_insn (targetm
.gen_jump (label
));
5658 /* The sign bit is set.
5659 Convert to a usable (positive signed) value by shifting right
5660 one bit, while remembering if a nonzero bit was shifted
5661 out; i.e., compute (from & 1) | (from >> 1). */
5663 emit_label (neglabel
);
5664 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
5665 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
5666 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, 1, NULL_RTX
, 1);
5667 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
5669 expand_float (target
, temp
, 0);
5671 /* Multiply by 2 to undo the shift above. */
5672 temp
= expand_binop (fmode
, add_optab
, target
, target
,
5673 target
, 0, OPTAB_LIB_WIDEN
);
5675 emit_move_insn (target
, temp
);
5677 do_pending_stack_adjust ();
5683 /* If we are about to do some arithmetic to correct for an
5684 unsigned operand, do it in a pseudo-register. */
5686 if (to_mode
!= fmode
5687 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
5688 target
= gen_reg_rtx (fmode
);
5690 /* Convert as signed integer to floating. */
5691 expand_float (target
, from
, 0);
5693 /* If FROM is negative (and therefore TO is negative),
5694 correct its value by 2**bitwidth. */
5696 do_pending_stack_adjust ();
5697 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, from_mode
,
5701 real_2expN (&offset
, GET_MODE_PRECISION (from_mode
), fmode
);
5702 temp
= expand_binop (fmode
, add_optab
, target
,
5703 const_double_from_real_value (offset
, fmode
),
5704 target
, 0, OPTAB_LIB_WIDEN
);
5706 emit_move_insn (target
, temp
);
5708 do_pending_stack_adjust ();
5713 /* No hardware instruction available; call a library routine. */
5718 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
5720 if (is_narrower_int_mode (GET_MODE (from
), SImode
))
5721 from
= convert_to_mode (SImode
, from
, unsignedp
);
5723 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5724 gcc_assert (libfunc
);
5728 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5729 GET_MODE (to
), from
, GET_MODE (from
));
5730 insns
= get_insns ();
5733 emit_libcall_block (insns
, target
, value
,
5734 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FLOAT
: FLOAT
,
5735 GET_MODE (to
), from
));
5740 /* Copy result to requested destination
5741 if we have been computing in a temp location. */
5745 if (GET_MODE (target
) == GET_MODE (to
))
5746 emit_move_insn (to
, target
);
5748 convert_move (to
, target
, 0);
5752 /* Generate code to convert FROM to fixed point and store in TO. FROM
5753 must be floating point. */
5756 expand_fix (rtx to
, rtx from
, int unsignedp
)
5758 enum insn_code icode
;
5760 machine_mode fmode
, imode
;
5761 opt_scalar_mode fmode_iter
;
5762 bool must_trunc
= false;
5764 /* We first try to find a pair of modes, one real and one integer, at
5765 least as wide as FROM and TO, respectively, in which we can open-code
5766 this conversion. If the integer mode is wider than the mode of TO,
5767 we can do the conversion either signed or unsigned. */
5769 FOR_EACH_MODE_FROM (fmode
, GET_MODE (from
))
5770 FOR_EACH_MODE_FROM (imode
, GET_MODE (to
))
5772 int doing_unsigned
= unsignedp
;
5774 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
5775 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
5776 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
5778 if (icode
!= CODE_FOR_nothing
)
5780 rtx_insn
*last
= get_last_insn ();
5782 if (fmode
!= GET_MODE (from
))
5784 if (REAL_MODE_FORMAT (GET_MODE (from
))
5785 == &arm_bfloat_half_format
5786 && REAL_MODE_FORMAT (fmode
) == &ieee_single_format
)
5787 /* The BF -> SF conversions can be just a shift, doesn't
5788 need to handle sNANs. */
5790 int save_flag_finite_math_only
= flag_finite_math_only
;
5791 flag_finite_math_only
= true;
5792 from1
= convert_to_mode (fmode
, from
, 0);
5793 flag_finite_math_only
= save_flag_finite_math_only
;
5796 from1
= convert_to_mode (fmode
, from
, 0);
5801 rtx temp
= gen_reg_rtx (GET_MODE (from1
));
5802 from1
= expand_unop (GET_MODE (from1
), ftrunc_optab
, from1
,
5806 if (imode
!= GET_MODE (to
))
5807 target
= gen_reg_rtx (imode
);
5809 if (maybe_emit_unop_insn (icode
, target
, from1
,
5810 doing_unsigned
? UNSIGNED_FIX
: FIX
))
5813 convert_move (to
, target
, unsignedp
);
5816 delete_insns_since (last
);
5820 /* For an unsigned conversion, there is one more way to do it.
5821 If we have a signed conversion, we generate code that compares
5822 the real value to the largest representable positive number. If if
5823 is smaller, the conversion is done normally. Otherwise, subtract
5824 one plus the highest signed number, convert, and add it back.
5826 We only need to check all real modes, since we know we didn't find
5827 anything with a wider integer mode.
5829 This code used to extend FP value into mode wider than the destination.
5830 This is needed for decimal float modes which cannot accurately
5831 represent one plus the highest signed number of the same size, but
5832 not for binary modes. Consider, for instance conversion from SFmode
5835 The hot path through the code is dealing with inputs smaller than 2^63
5836 and doing just the conversion, so there is no bits to lose.
5838 In the other path we know the value is positive in the range 2^63..2^64-1
5839 inclusive. (as for other input overflow happens and result is undefined)
5840 So we know that the most important bit set in mantissa corresponds to
5841 2^63. The subtraction of 2^63 should not generate any rounding as it
5842 simply clears out that bit. The rest is trivial. */
5844 scalar_int_mode to_mode
;
5846 && is_a
<scalar_int_mode
> (GET_MODE (to
), &to_mode
)
5847 && HWI_COMPUTABLE_MODE_P (to_mode
))
5848 FOR_EACH_MODE_FROM (fmode_iter
, as_a
<scalar_mode
> (GET_MODE (from
)))
5850 scalar_mode fmode
= fmode_iter
.require ();
5851 if (CODE_FOR_nothing
!= can_fix_p (to_mode
, fmode
,
5853 && (!DECIMAL_FLOAT_MODE_P (fmode
)
5854 || (GET_MODE_BITSIZE (fmode
) > GET_MODE_PRECISION (to_mode
))))
5857 REAL_VALUE_TYPE offset
;
5859 rtx_code_label
*lab1
, *lab2
;
5862 bitsize
= GET_MODE_PRECISION (to_mode
);
5863 real_2expN (&offset
, bitsize
- 1, fmode
);
5864 limit
= const_double_from_real_value (offset
, fmode
);
5865 lab1
= gen_label_rtx ();
5866 lab2
= gen_label_rtx ();
5868 if (fmode
!= GET_MODE (from
))
5870 if (REAL_MODE_FORMAT (GET_MODE (from
))
5871 == &arm_bfloat_half_format
5872 && REAL_MODE_FORMAT (fmode
) == &ieee_single_format
)
5873 /* The BF -> SF conversions can be just a shift, doesn't
5874 need to handle sNANs. */
5876 int save_flag_finite_math_only
= flag_finite_math_only
;
5877 flag_finite_math_only
= true;
5878 from
= convert_to_mode (fmode
, from
, 0);
5879 flag_finite_math_only
= save_flag_finite_math_only
;
5882 from
= convert_to_mode (fmode
, from
, 0);
5885 /* See if we need to do the subtraction. */
5886 do_pending_stack_adjust ();
5887 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
,
5888 GET_MODE (from
), 0, lab1
);
5890 /* If not, do the signed "fix" and branch around fixup code. */
5891 expand_fix (to
, from
, 0);
5892 emit_jump_insn (targetm
.gen_jump (lab2
));
5895 /* Otherwise, subtract 2**(N-1), convert to signed number,
5896 then add 2**(N-1). Do the addition using XOR since this
5897 will often generate better code. */
5899 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
5900 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
5901 expand_fix (to
, target
, 0);
5902 target
= expand_binop (to_mode
, xor_optab
, to
,
5904 (HOST_WIDE_INT_1
<< (bitsize
- 1),
5906 to
, 1, OPTAB_LIB_WIDEN
);
5909 emit_move_insn (to
, target
);
5913 if (optab_handler (mov_optab
, to_mode
) != CODE_FOR_nothing
)
5915 /* Make a place for a REG_NOTE and add it. */
5916 insn
= emit_move_insn (to
, to
);
5917 set_dst_reg_note (insn
, REG_EQUAL
,
5918 gen_rtx_fmt_e (UNSIGNED_FIX
, to_mode
,
5928 if (REAL_MODE_FORMAT (GET_MODE (from
)) == &arm_bfloat_half_format
5929 && REAL_MODE_FORMAT (SFmode
) == &ieee_single_format
)
5930 /* We don't have BF -> TI library functions, use BF -> SF -> TI
5931 instead but the BF -> SF conversion can be just a shift, doesn't
5932 need to handle sNANs. */
5934 int save_flag_finite_math_only
= flag_finite_math_only
;
5935 flag_finite_math_only
= true;
5936 from
= convert_to_mode (SFmode
, from
, 0);
5937 flag_finite_math_only
= save_flag_finite_math_only
;
5938 expand_fix (to
, from
, unsignedp
);
5943 /* We can't do it with an insn, so use a library call. But first ensure
5944 that the mode of TO is at least as wide as SImode, since those are the
5945 only library calls we know about. */
5947 if (is_narrower_int_mode (GET_MODE (to
), SImode
))
5949 target
= gen_reg_rtx (SImode
);
5951 expand_fix (target
, from
, unsignedp
);
5959 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
5960 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5961 gcc_assert (libfunc
);
5965 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5966 GET_MODE (to
), from
, GET_MODE (from
));
5967 insns
= get_insns ();
5970 emit_libcall_block (insns
, target
, value
,
5971 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
5972 GET_MODE (to
), from
));
5977 if (GET_MODE (to
) == GET_MODE (target
))
5978 emit_move_insn (to
, target
);
5980 convert_move (to
, target
, 0);
5985 /* Promote integer arguments for a libcall if necessary.
5986 emit_library_call_value cannot do the promotion because it does not
5987 know if it should do a signed or unsigned promotion. This is because
5988 there are no tree types defined for libcalls. */
5991 prepare_libcall_arg (rtx arg
, int uintp
)
5993 scalar_int_mode mode
;
5994 machine_mode arg_mode
;
5995 if (is_a
<scalar_int_mode
> (GET_MODE (arg
), &mode
))
5997 /* If we need to promote the integer function argument we need to do
5998 it here instead of inside emit_library_call_value because in
5999 emit_library_call_value we don't know if we should do a signed or
6000 unsigned promotion. */
6003 arg_mode
= promote_function_mode (NULL_TREE
, mode
,
6004 &unsigned_p
, NULL_TREE
, 0);
6005 if (arg_mode
!= mode
)
6006 return convert_to_mode (arg_mode
, arg
, uintp
);
6011 /* Generate code to convert FROM or TO a fixed-point.
6012 If UINTP is true, either TO or FROM is an unsigned integer.
6013 If SATP is true, we need to saturate the result. */
6016 expand_fixed_convert (rtx to
, rtx from
, int uintp
, int satp
)
6018 machine_mode to_mode
= GET_MODE (to
);
6019 machine_mode from_mode
= GET_MODE (from
);
6021 enum rtx_code this_code
;
6022 enum insn_code code
;
6027 if (to_mode
== from_mode
)
6029 emit_move_insn (to
, from
);
6035 tab
= satp
? satfractuns_optab
: fractuns_optab
;
6036 this_code
= satp
? UNSIGNED_SAT_FRACT
: UNSIGNED_FRACT_CONVERT
;
6040 tab
= satp
? satfract_optab
: fract_optab
;
6041 this_code
= satp
? SAT_FRACT
: FRACT_CONVERT
;
6043 code
= convert_optab_handler (tab
, to_mode
, from_mode
);
6044 if (code
!= CODE_FOR_nothing
)
6046 emit_unop_insn (code
, to
, from
, this_code
);
6050 libfunc
= convert_optab_libfunc (tab
, to_mode
, from_mode
);
6051 gcc_assert (libfunc
);
6053 from
= prepare_libcall_arg (from
, uintp
);
6054 from_mode
= GET_MODE (from
);
6057 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, to_mode
,
6059 insns
= get_insns ();
6062 emit_libcall_block (insns
, to
, value
,
6063 gen_rtx_fmt_e (optab_to_code (tab
), to_mode
, from
));
6066 /* Generate code to convert FROM to fixed point and store in TO. FROM
6067 must be floating point, TO must be signed. Use the conversion optab
6068 TAB to do the conversion. */
6071 expand_sfix_optab (rtx to
, rtx from
, convert_optab tab
)
6073 enum insn_code icode
;
6075 machine_mode fmode
, imode
;
6077 /* We first try to find a pair of modes, one real and one integer, at
6078 least as wide as FROM and TO, respectively, in which we can open-code
6079 this conversion. If the integer mode is wider than the mode of TO,
6080 we can do the conversion either signed or unsigned. */
6082 FOR_EACH_MODE_FROM (fmode
, GET_MODE (from
))
6083 FOR_EACH_MODE_FROM (imode
, GET_MODE (to
))
6085 icode
= convert_optab_handler (tab
, imode
, fmode
,
6086 insn_optimization_type ());
6087 if (icode
!= CODE_FOR_nothing
)
6089 rtx_insn
*last
= get_last_insn ();
6090 if (fmode
!= GET_MODE (from
))
6091 from
= convert_to_mode (fmode
, from
, 0);
6093 if (imode
!= GET_MODE (to
))
6094 target
= gen_reg_rtx (imode
);
6096 if (!maybe_emit_unop_insn (icode
, target
, from
, UNKNOWN
))
6098 delete_insns_since (last
);
6102 convert_move (to
, target
, 0);
6110 /* Report whether we have an instruction to perform the operation
6111 specified by CODE on operands of mode MODE. */
6113 have_insn_for (enum rtx_code code
, machine_mode mode
)
6115 return (code_to_optab (code
)
6116 && (optab_handler (code_to_optab (code
), mode
)
6117 != CODE_FOR_nothing
));
6120 /* Print information about the current contents of the optabs on
6124 debug_optab_libfuncs (void)
6128 /* Dump the arithmetic optabs. */
6129 for (i
= FIRST_NORM_OPTAB
; i
<= LAST_NORMLIB_OPTAB
; ++i
)
6130 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
6132 rtx l
= optab_libfunc ((optab
) i
, (machine_mode
) j
);
6135 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
6136 fprintf (stderr
, "%s\t%s:\t%s\n",
6137 GET_RTX_NAME (optab_to_code ((optab
) i
)),
6143 /* Dump the conversion optabs. */
6144 for (i
= FIRST_CONV_OPTAB
; i
<= LAST_CONVLIB_OPTAB
; ++i
)
6145 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
6146 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
6148 rtx l
= convert_optab_libfunc ((optab
) i
, (machine_mode
) j
,
6152 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
6153 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
6154 GET_RTX_NAME (optab_to_code ((optab
) i
)),
6162 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6163 CODE. Return 0 on failure. */
6166 gen_cond_trap (enum rtx_code code
, rtx op1
, rtx op2
, rtx tcode
)
6168 machine_mode mode
= GET_MODE (op1
);
6169 enum insn_code icode
;
6173 if (mode
== VOIDmode
)
6176 icode
= optab_handler (ctrap_optab
, mode
);
6177 if (icode
== CODE_FOR_nothing
)
6180 /* Some targets only accept a zero trap code. */
6181 if (!insn_operand_matches (icode
, 3, tcode
))
6184 do_pending_stack_adjust ();
6186 prepare_cmp_insn (op1
, op2
, code
, NULL_RTX
, false, OPTAB_DIRECT
,
6191 insn
= GEN_FCN (icode
) (trap_rtx
, XEXP (trap_rtx
, 0), XEXP (trap_rtx
, 1),
6194 /* If that failed, then give up. */
6202 insn
= get_insns ();
6207 /* Return rtx code for TCODE or UNKNOWN. Use UNSIGNEDP to select signed
6208 or unsigned operation code. */
6211 get_rtx_code_1 (enum tree_code tcode
, bool unsignedp
)
6223 code
= unsignedp
? LTU
: LT
;
6226 code
= unsignedp
? LEU
: LE
;
6229 code
= unsignedp
? GTU
: GT
;
6232 code
= unsignedp
? GEU
: GE
;
6235 case UNORDERED_EXPR
:
6275 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6276 or unsigned operation code. */
6279 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
6281 enum rtx_code code
= get_rtx_code_1 (tcode
, unsignedp
);
6282 gcc_assert (code
!= UNKNOWN
);
6286 /* Return a comparison rtx of mode CMP_MODE for COND. Use UNSIGNEDP to
6287 select signed or unsigned operators. OPNO holds the index of the
6288 first comparison operand for insn ICODE. Do not generate the
6289 compare instruction itself. */
6292 vector_compare_rtx (machine_mode cmp_mode
, enum tree_code tcode
,
6293 tree t_op0
, tree t_op1
, bool unsignedp
,
6294 enum insn_code icode
, unsigned int opno
)
6296 class expand_operand ops
[2];
6297 rtx rtx_op0
, rtx_op1
;
6298 machine_mode m0
, m1
;
6299 enum rtx_code rcode
= get_rtx_code (tcode
, unsignedp
);
6301 gcc_assert (TREE_CODE_CLASS (tcode
) == tcc_comparison
);
6303 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
6304 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
6305 cases, use the original mode. */
6306 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)),
6308 m0
= GET_MODE (rtx_op0
);
6310 m0
= TYPE_MODE (TREE_TYPE (t_op0
));
6312 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)),
6314 m1
= GET_MODE (rtx_op1
);
6316 m1
= TYPE_MODE (TREE_TYPE (t_op1
));
6318 create_input_operand (&ops
[0], rtx_op0
, m0
);
6319 create_input_operand (&ops
[1], rtx_op1
, m1
);
6320 if (!maybe_legitimize_operands (icode
, opno
, 2, ops
))
6322 return gen_rtx_fmt_ee (rcode
, cmp_mode
, ops
[0].value
, ops
[1].value
);
6325 /* Check if vec_perm mask SEL is a constant equivalent to a shift of
6326 the first vec_perm operand, assuming the second operand (for left shift
6327 first operand) is a constant vector of zeros. Return the shift distance
6328 in bits if so, or NULL_RTX if the vec_perm is not a shift. MODE is the
6329 mode of the value being shifted. SHIFT_OPTAB is vec_shr_optab for right
6330 shift or vec_shl_optab for left shift. */
6332 shift_amt_for_vec_perm_mask (machine_mode mode
, const vec_perm_indices
&sel
,
6335 unsigned int bitsize
= GET_MODE_UNIT_BITSIZE (mode
);
6336 poly_int64 first
= sel
[0];
6337 if (maybe_ge (sel
[0], GET_MODE_NUNITS (mode
)))
6340 if (shift_optab
== vec_shl_optab
)
6343 if (!GET_MODE_NUNITS (mode
).is_constant (&nelt
))
6345 unsigned firstidx
= 0;
6346 for (unsigned int i
= 0; i
< nelt
; i
++)
6348 if (known_eq (sel
[i
], nelt
))
6350 if (i
== 0 || firstidx
)
6355 ? maybe_ne (sel
[i
], nelt
+ i
- firstidx
)
6356 : maybe_ge (sel
[i
], nelt
))
6364 else if (!sel
.series_p (0, 1, first
, 1))
6367 if (!GET_MODE_NUNITS (mode
).is_constant (&nelt
))
6369 for (unsigned int i
= 1; i
< nelt
; i
++)
6371 poly_int64 expected
= i
+ first
;
6372 /* Indices into the second vector are all equivalent. */
6373 if (maybe_lt (sel
[i
], nelt
)
6374 ? maybe_ne (sel
[i
], expected
)
6375 : maybe_lt (expected
, nelt
))
6380 return gen_int_shift_amount (mode
, first
* bitsize
);
6383 /* A subroutine of expand_vec_perm_var for expanding one vec_perm insn. */
6386 expand_vec_perm_1 (enum insn_code icode
, rtx target
,
6387 rtx v0
, rtx v1
, rtx sel
)
6389 machine_mode tmode
= GET_MODE (target
);
6390 machine_mode smode
= GET_MODE (sel
);
6391 class expand_operand ops
[4];
6393 gcc_assert (GET_MODE_CLASS (smode
) == MODE_VECTOR_INT
6394 || related_int_vector_mode (tmode
).require () == smode
);
6395 create_output_operand (&ops
[0], target
, tmode
);
6396 create_input_operand (&ops
[3], sel
, smode
);
6398 /* Make an effort to preserve v0 == v1. The target expander is able to
6399 rely on this to determine if we're permuting a single input operand. */
6400 if (rtx_equal_p (v0
, v1
))
6402 if (!insn_operand_matches (icode
, 1, v0
))
6403 v0
= force_reg (tmode
, v0
);
6404 gcc_checking_assert (insn_operand_matches (icode
, 1, v0
));
6405 gcc_checking_assert (insn_operand_matches (icode
, 2, v0
));
6407 create_fixed_operand (&ops
[1], v0
);
6408 create_fixed_operand (&ops
[2], v0
);
6412 create_input_operand (&ops
[1], v0
, tmode
);
6413 create_input_operand (&ops
[2], v1
, tmode
);
6416 if (maybe_expand_insn (icode
, 4, ops
))
6417 return ops
[0].value
;
6421 /* Implement a permutation of vectors v0 and v1 using the permutation
6422 vector in SEL and return the result. Use TARGET to hold the result
6423 if nonnull and convenient.
6425 MODE is the mode of the vectors being permuted (V0 and V1). SEL_MODE
6426 is the TYPE_MODE associated with SEL, or BLKmode if SEL isn't known
6427 to have a particular mode. */
6430 expand_vec_perm_const (machine_mode mode
, rtx v0
, rtx v1
,
6431 const vec_perm_builder
&sel
, machine_mode sel_mode
,
6434 if (!target
|| !register_operand (target
, mode
))
6435 target
= gen_reg_rtx (mode
);
6437 /* Set QIMODE to a different vector mode with byte elements.
6438 If no such mode, or if MODE already has byte elements, use VOIDmode. */
6439 machine_mode qimode
;
6440 if (!qimode_for_vec_perm (mode
).exists (&qimode
))
6443 rtx_insn
*last
= get_last_insn ();
6445 bool single_arg_p
= rtx_equal_p (v0
, v1
);
6446 /* Always specify two input vectors here and leave the target to handle
6447 cases in which the inputs are equal. Not all backends can cope with
6448 the single-input representation when testing for a double-input
6449 target instruction. */
6450 vec_perm_indices
indices (sel
, 2, GET_MODE_NUNITS (mode
));
6452 /* See if this can be handled with a vec_shr or vec_shl. We only do this
6453 if the second (for vec_shr) or first (for vec_shl) vector is all
6455 insn_code shift_code
= CODE_FOR_nothing
;
6456 insn_code shift_code_qi
= CODE_FOR_nothing
;
6457 optab shift_optab
= unknown_optab
;
6459 if (v1
== CONST0_RTX (GET_MODE (v1
)))
6460 shift_optab
= vec_shr_optab
;
6461 else if (v0
== CONST0_RTX (GET_MODE (v0
)))
6463 shift_optab
= vec_shl_optab
;
6466 if (shift_optab
!= unknown_optab
)
6468 shift_code
= optab_handler (shift_optab
, mode
);
6469 shift_code_qi
= ((qimode
!= VOIDmode
&& qimode
!= mode
)
6470 ? optab_handler (shift_optab
, qimode
)
6471 : CODE_FOR_nothing
);
6473 if (shift_code
!= CODE_FOR_nothing
|| shift_code_qi
!= CODE_FOR_nothing
)
6475 rtx shift_amt
= shift_amt_for_vec_perm_mask (mode
, indices
, shift_optab
);
6478 class expand_operand ops
[3];
6479 if (shift_amt
== const0_rtx
)
6481 if (shift_code
!= CODE_FOR_nothing
)
6483 create_output_operand (&ops
[0], target
, mode
);
6484 create_input_operand (&ops
[1], v2
, mode
);
6485 create_convert_operand_from_type (&ops
[2], shift_amt
, sizetype
);
6486 if (maybe_expand_insn (shift_code
, 3, ops
))
6487 return ops
[0].value
;
6489 if (shift_code_qi
!= CODE_FOR_nothing
)
6491 rtx tmp
= gen_reg_rtx (qimode
);
6492 create_output_operand (&ops
[0], tmp
, qimode
);
6493 create_input_operand (&ops
[1], gen_lowpart (qimode
, v2
), qimode
);
6494 create_convert_operand_from_type (&ops
[2], shift_amt
, sizetype
);
6495 if (maybe_expand_insn (shift_code_qi
, 3, ops
))
6496 return gen_lowpart (mode
, ops
[0].value
);
6501 if (targetm
.vectorize
.vec_perm_const
!= NULL
)
6506 gcc_checking_assert (GET_MODE (v0
) == GET_MODE (v1
));
6507 machine_mode op_mode
= GET_MODE (v0
);
6508 if (targetm
.vectorize
.vec_perm_const (mode
, op_mode
, target
, v0
, v1
,
6513 /* Fall back to a constant byte-based permutation. */
6514 vec_perm_indices qimode_indices
;
6515 rtx target_qi
= NULL_RTX
, v0_qi
= NULL_RTX
, v1_qi
= NULL_RTX
;
6516 if (qimode
!= VOIDmode
)
6518 qimode_indices
.new_expanded_vector (indices
, GET_MODE_UNIT_SIZE (mode
));
6519 target_qi
= gen_reg_rtx (qimode
);
6520 v0_qi
= gen_lowpart (qimode
, v0
);
6521 v1_qi
= gen_lowpart (qimode
, v1
);
6522 if (targetm
.vectorize
.vec_perm_const
!= NULL
6523 && targetm
.vectorize
.vec_perm_const (qimode
, qimode
, target_qi
, v0_qi
,
6524 v1_qi
, qimode_indices
))
6525 return gen_lowpart (mode
, target_qi
);
6528 v0
= force_reg (mode
, v0
);
6531 v1
= force_reg (mode
, v1
);
6533 /* Otherwise expand as a fully variable permuation. */
6535 /* The optabs are only defined for selectors with the same width
6536 as the values being permuted. */
6537 machine_mode required_sel_mode
;
6538 if (!related_int_vector_mode (mode
).exists (&required_sel_mode
))
6540 delete_insns_since (last
);
6544 /* We know that it is semantically valid to treat SEL as having SEL_MODE.
6545 If that isn't the mode we want then we need to prove that using
6546 REQUIRED_SEL_MODE is OK. */
6547 if (sel_mode
!= required_sel_mode
)
6549 if (!selector_fits_mode_p (required_sel_mode
, indices
))
6551 delete_insns_since (last
);
6554 sel_mode
= required_sel_mode
;
6557 insn_code icode
= direct_optab_handler (vec_perm_optab
, mode
);
6558 if (icode
!= CODE_FOR_nothing
)
6560 rtx sel_rtx
= vec_perm_indices_to_rtx (sel_mode
, indices
);
6561 rtx tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel_rtx
);
6566 if (qimode
!= VOIDmode
6567 && selector_fits_mode_p (qimode
, qimode_indices
))
6569 icode
= direct_optab_handler (vec_perm_optab
, qimode
);
6570 if (icode
!= CODE_FOR_nothing
)
6572 rtx sel_qi
= vec_perm_indices_to_rtx (qimode
, qimode_indices
);
6573 rtx tmp
= expand_vec_perm_1 (icode
, target_qi
, v0_qi
, v1_qi
, sel_qi
);
6575 return gen_lowpart (mode
, tmp
);
6579 delete_insns_since (last
);
6583 /* Implement a permutation of vectors v0 and v1 using the permutation
6584 vector in SEL and return the result. Use TARGET to hold the result
6585 if nonnull and convenient.
6587 MODE is the mode of the vectors being permuted (V0 and V1).
6588 SEL must have the integer equivalent of MODE and is known to be
6589 unsuitable for permutes with a constant permutation vector. */
6592 expand_vec_perm_var (machine_mode mode
, rtx v0
, rtx v1
, rtx sel
, rtx target
)
6594 enum insn_code icode
;
6598 u
= GET_MODE_UNIT_SIZE (mode
);
6600 if (!target
|| GET_MODE (target
) != mode
)
6601 target
= gen_reg_rtx (mode
);
6603 icode
= direct_optab_handler (vec_perm_optab
, mode
);
6604 if (icode
!= CODE_FOR_nothing
)
6606 tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel
);
6611 /* As a special case to aid several targets, lower the element-based
6612 permutation to a byte-based permutation and try again. */
6613 machine_mode qimode
;
6614 if (!qimode_for_vec_perm (mode
).exists (&qimode
)
6615 || maybe_gt (GET_MODE_NUNITS (qimode
), GET_MODE_MASK (QImode
) + 1))
6617 icode
= direct_optab_handler (vec_perm_optab
, qimode
);
6618 if (icode
== CODE_FOR_nothing
)
6621 /* Multiply each element by its byte size. */
6622 machine_mode selmode
= GET_MODE (sel
);
6624 sel
= expand_simple_binop (selmode
, PLUS
, sel
, sel
,
6625 NULL
, 0, OPTAB_DIRECT
);
6627 sel
= expand_simple_binop (selmode
, ASHIFT
, sel
,
6628 gen_int_shift_amount (selmode
, exact_log2 (u
)),
6629 NULL
, 0, OPTAB_DIRECT
);
6630 gcc_assert (sel
!= NULL
);
6632 /* Broadcast the low byte each element into each of its bytes.
6633 The encoding has U interleaved stepped patterns, one for each
6634 byte of an element. */
6635 vec_perm_builder
const_sel (GET_MODE_SIZE (mode
), u
, 3);
6636 unsigned int low_byte_in_u
= BYTES_BIG_ENDIAN
? u
- 1 : 0;
6637 for (i
= 0; i
< 3; ++i
)
6638 for (unsigned int j
= 0; j
< u
; ++j
)
6639 const_sel
.quick_push (i
* u
+ low_byte_in_u
);
6640 sel
= gen_lowpart (qimode
, sel
);
6641 sel
= expand_vec_perm_const (qimode
, sel
, sel
, const_sel
, qimode
, NULL
);
6642 gcc_assert (sel
!= NULL
);
6644 /* Add the byte offset to each byte element. */
6645 /* Note that the definition of the indicies here is memory ordering,
6646 so there should be no difference between big and little endian. */
6647 rtx_vector_builder
byte_indices (qimode
, u
, 1);
6648 for (i
= 0; i
< u
; ++i
)
6649 byte_indices
.quick_push (GEN_INT (i
));
6650 tmp
= byte_indices
.build ();
6651 sel_qi
= expand_simple_binop (qimode
, PLUS
, sel
, tmp
,
6652 sel
, 0, OPTAB_DIRECT
);
6653 gcc_assert (sel_qi
!= NULL
);
6655 tmp
= mode
!= qimode
? gen_reg_rtx (qimode
) : target
;
6656 tmp
= expand_vec_perm_1 (icode
, tmp
, gen_lowpart (qimode
, v0
),
6657 gen_lowpart (qimode
, v1
), sel_qi
);
6659 tmp
= gen_lowpart (mode
, tmp
);
6663 /* Generate VEC_SERIES_EXPR <OP0, OP1>, returning a value of mode VMODE.
6664 Use TARGET for the result if nonnull and convenient. */
6667 expand_vec_series_expr (machine_mode vmode
, rtx op0
, rtx op1
, rtx target
)
6669 class expand_operand ops
[3];
6670 enum insn_code icode
;
6671 machine_mode emode
= GET_MODE_INNER (vmode
);
6673 icode
= direct_optab_handler (vec_series_optab
, vmode
);
6674 gcc_assert (icode
!= CODE_FOR_nothing
);
6676 create_output_operand (&ops
[0], target
, vmode
);
6677 create_input_operand (&ops
[1], op0
, emode
);
6678 create_input_operand (&ops
[2], op1
, emode
);
6680 expand_insn (icode
, 3, ops
);
6681 return ops
[0].value
;
6684 /* Generate insns for a vector comparison into a mask. */
6687 expand_vec_cmp_expr (tree type
, tree exp
, rtx target
)
6689 class expand_operand ops
[4];
6690 enum insn_code icode
;
6692 machine_mode mask_mode
= TYPE_MODE (type
);
6696 enum tree_code tcode
;
6698 op0a
= TREE_OPERAND (exp
, 0);
6699 op0b
= TREE_OPERAND (exp
, 1);
6700 tcode
= TREE_CODE (exp
);
6702 unsignedp
= TYPE_UNSIGNED (TREE_TYPE (op0a
));
6703 vmode
= TYPE_MODE (TREE_TYPE (op0a
));
6705 icode
= get_vec_cmp_icode (vmode
, mask_mode
, unsignedp
);
6706 if (icode
== CODE_FOR_nothing
)
6708 if (tcode
== EQ_EXPR
|| tcode
== NE_EXPR
)
6709 icode
= get_vec_cmp_eq_icode (vmode
, mask_mode
);
6710 if (icode
== CODE_FOR_nothing
)
6714 comparison
= vector_compare_rtx (mask_mode
, tcode
, op0a
, op0b
,
6715 unsignedp
, icode
, 2);
6716 create_output_operand (&ops
[0], target
, mask_mode
);
6717 create_fixed_operand (&ops
[1], comparison
);
6718 create_fixed_operand (&ops
[2], XEXP (comparison
, 0));
6719 create_fixed_operand (&ops
[3], XEXP (comparison
, 1));
6720 expand_insn (icode
, 4, ops
);
6721 return ops
[0].value
;
6724 /* Expand a highpart multiply. */
6727 expand_mult_highpart (machine_mode mode
, rtx op0
, rtx op1
,
6728 rtx target
, bool uns_p
)
6730 class expand_operand eops
[3];
6731 enum insn_code icode
;
6737 method
= can_mult_highpart_p (mode
, uns_p
);
6743 tab1
= uns_p
? umul_highpart_optab
: smul_highpart_optab
;
6744 return expand_binop (mode
, tab1
, op0
, op1
, target
, uns_p
,
6747 tab1
= uns_p
? vec_widen_umult_even_optab
: vec_widen_smult_even_optab
;
6748 tab2
= uns_p
? vec_widen_umult_odd_optab
: vec_widen_smult_odd_optab
;
6751 tab1
= uns_p
? vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
6752 tab2
= uns_p
? vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
6753 if (BYTES_BIG_ENDIAN
)
6754 std::swap (tab1
, tab2
);
6760 icode
= optab_handler (tab1
, mode
);
6761 wmode
= insn_data
[icode
].operand
[0].mode
;
6762 gcc_checking_assert (known_eq (2 * GET_MODE_NUNITS (wmode
),
6763 GET_MODE_NUNITS (mode
)));
6764 gcc_checking_assert (known_eq (GET_MODE_SIZE (wmode
), GET_MODE_SIZE (mode
)));
6766 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
6767 create_input_operand (&eops
[1], op0
, mode
);
6768 create_input_operand (&eops
[2], op1
, mode
);
6769 expand_insn (icode
, 3, eops
);
6770 m1
= gen_lowpart (mode
, eops
[0].value
);
6772 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
6773 create_input_operand (&eops
[1], op0
, mode
);
6774 create_input_operand (&eops
[2], op1
, mode
);
6775 expand_insn (optab_handler (tab2
, mode
), 3, eops
);
6776 m2
= gen_lowpart (mode
, eops
[0].value
);
6778 vec_perm_builder sel
;
6781 /* The encoding has 2 interleaved stepped patterns. */
6782 sel
.new_vector (GET_MODE_NUNITS (mode
), 2, 3);
6783 for (i
= 0; i
< 6; ++i
)
6784 sel
.quick_push (!BYTES_BIG_ENDIAN
+ (i
& ~1)
6785 + ((i
& 1) ? GET_MODE_NUNITS (mode
) : 0));
6789 /* The encoding has a single interleaved stepped pattern. */
6790 sel
.new_vector (GET_MODE_NUNITS (mode
), 1, 3);
6791 for (i
= 0; i
< 3; ++i
)
6792 sel
.quick_push (2 * i
+ (BYTES_BIG_ENDIAN
? 0 : 1));
6795 return expand_vec_perm_const (mode
, m1
, m2
, sel
, BLKmode
, target
);
6798 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
6802 find_cc_set (rtx x
, const_rtx pat
, void *data
)
6804 if (REG_P (x
) && GET_MODE_CLASS (GET_MODE (x
)) == MODE_CC
6805 && GET_CODE (pat
) == SET
)
6807 rtx
*p_cc_reg
= (rtx
*) data
;
6808 gcc_assert (!*p_cc_reg
);
6813 /* This is a helper function for the other atomic operations. This function
6814 emits a loop that contains SEQ that iterates until a compare-and-swap
6815 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6816 a set of instructions that takes a value from OLD_REG as an input and
6817 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6818 set to the current contents of MEM. After SEQ, a compare-and-swap will
6819 attempt to update MEM with NEW_REG. The function returns true when the
6820 loop was generated successfully. */
6823 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
6825 machine_mode mode
= GET_MODE (mem
);
6826 rtx_code_label
*label
;
6827 rtx cmp_reg
, success
, oldval
;
6829 /* The loop we want to generate looks like
6835 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
6839 Note that we only do the plain load from memory once. Subsequent
6840 iterations use the value loaded by the compare-and-swap pattern. */
6842 label
= gen_label_rtx ();
6843 cmp_reg
= gen_reg_rtx (mode
);
6845 emit_move_insn (cmp_reg
, mem
);
6847 emit_move_insn (old_reg
, cmp_reg
);
6853 if (!expand_atomic_compare_and_swap (&success
, &oldval
, mem
, old_reg
,
6854 new_reg
, false, MEMMODEL_SYNC_SEQ_CST
,
6858 if (oldval
!= cmp_reg
)
6859 emit_move_insn (cmp_reg
, oldval
);
6861 /* Mark this jump predicted not taken. */
6862 emit_cmp_and_jump_insns (success
, const0_rtx
, EQ
, const0_rtx
,
6863 GET_MODE (success
), 1, label
,
6864 profile_probability::guessed_never ());
6869 /* This function tries to emit an atomic_exchange intruction. VAL is written
6870 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
6871 using TARGET if possible. */
6874 maybe_emit_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
6876 machine_mode mode
= GET_MODE (mem
);
6877 enum insn_code icode
;
6879 /* If the target supports the exchange directly, great. */
6880 icode
= direct_optab_handler (atomic_exchange_optab
, mode
);
6881 if (icode
!= CODE_FOR_nothing
)
6883 class expand_operand ops
[4];
6885 create_output_operand (&ops
[0], target
, mode
);
6886 create_fixed_operand (&ops
[1], mem
);
6887 create_input_operand (&ops
[2], val
, mode
);
6888 create_integer_operand (&ops
[3], model
);
6889 if (maybe_expand_insn (icode
, 4, ops
))
6890 return ops
[0].value
;
6896 /* This function tries to implement an atomic exchange operation using
6897 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
6898 The previous contents of *MEM are returned, using TARGET if possible.
6899 Since this instructionn is an acquire barrier only, stronger memory
6900 models may require additional barriers to be emitted. */
6903 maybe_emit_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
,
6904 enum memmodel model
)
6906 machine_mode mode
= GET_MODE (mem
);
6907 enum insn_code icode
;
6908 rtx_insn
*last_insn
= get_last_insn ();
6910 icode
= optab_handler (sync_lock_test_and_set_optab
, mode
);
6912 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
6913 exists, and the memory model is stronger than acquire, add a release
6914 barrier before the instruction. */
6916 if (is_mm_seq_cst (model
) || is_mm_release (model
) || is_mm_acq_rel (model
))
6917 expand_mem_thread_fence (model
);
6919 if (icode
!= CODE_FOR_nothing
)
6921 class expand_operand ops
[3];
6922 create_output_operand (&ops
[0], target
, mode
);
6923 create_fixed_operand (&ops
[1], mem
);
6924 create_input_operand (&ops
[2], val
, mode
);
6925 if (maybe_expand_insn (icode
, 3, ops
))
6926 return ops
[0].value
;
6929 /* If an external test-and-set libcall is provided, use that instead of
6930 any external compare-and-swap that we might get from the compare-and-
6931 swap-loop expansion later. */
6932 if (!can_compare_and_swap_p (mode
, false))
6934 rtx libfunc
= optab_libfunc (sync_lock_test_and_set_optab
, mode
);
6935 if (libfunc
!= NULL
)
6939 addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
6940 return emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
6941 mode
, addr
, ptr_mode
,
6946 /* If the test_and_set can't be emitted, eliminate any barrier that might
6947 have been emitted. */
6948 delete_insns_since (last_insn
);
6952 /* This function tries to implement an atomic exchange operation using a
6953 compare_and_swap loop. VAL is written to *MEM. The previous contents of
6954 *MEM are returned, using TARGET if possible. No memory model is required
6955 since a compare_and_swap loop is seq-cst. */
6958 maybe_emit_compare_and_swap_exchange_loop (rtx target
, rtx mem
, rtx val
)
6960 machine_mode mode
= GET_MODE (mem
);
6962 if (can_compare_and_swap_p (mode
, true))
6964 if (!target
|| !register_operand (target
, mode
))
6965 target
= gen_reg_rtx (mode
);
6966 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
6973 /* This function tries to implement an atomic test-and-set operation
6974 using the atomic_test_and_set instruction pattern. A boolean value
6975 is returned from the operation, using TARGET if possible. */
6978 maybe_emit_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
6980 machine_mode pat_bool_mode
;
6981 class expand_operand ops
[3];
6983 if (!targetm
.have_atomic_test_and_set ())
6986 /* While we always get QImode from __atomic_test_and_set, we get
6987 other memory modes from __sync_lock_test_and_set. Note that we
6988 use no endian adjustment here. This matches the 4.6 behavior
6989 in the Sparc backend. */
6990 enum insn_code icode
= targetm
.code_for_atomic_test_and_set
;
6991 gcc_checking_assert (insn_data
[icode
].operand
[1].mode
== QImode
);
6992 if (GET_MODE (mem
) != QImode
)
6993 mem
= adjust_address_nv (mem
, QImode
, 0);
6995 pat_bool_mode
= insn_data
[icode
].operand
[0].mode
;
6996 create_output_operand (&ops
[0], target
, pat_bool_mode
);
6997 create_fixed_operand (&ops
[1], mem
);
6998 create_integer_operand (&ops
[2], model
);
7000 if (maybe_expand_insn (icode
, 3, ops
))
7001 return ops
[0].value
;
7005 /* This function expands the legacy _sync_lock test_and_set operation which is
7006 generally an atomic exchange. Some limited targets only allow the
7007 constant 1 to be stored. This is an ACQUIRE operation.
7009 TARGET is an optional place to stick the return value.
7010 MEM is where VAL is stored. */
7013 expand_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
)
7017 /* Try an atomic_exchange first. */
7018 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, MEMMODEL_SYNC_ACQUIRE
);
7022 ret
= maybe_emit_sync_lock_test_and_set (target
, mem
, val
,
7023 MEMMODEL_SYNC_ACQUIRE
);
7027 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
7031 /* If there are no other options, try atomic_test_and_set if the value
7032 being stored is 1. */
7033 if (val
== const1_rtx
)
7034 ret
= maybe_emit_atomic_test_and_set (target
, mem
, MEMMODEL_SYNC_ACQUIRE
);
7039 /* This function expands the atomic test_and_set operation:
7040 atomically store a boolean TRUE into MEM and return the previous value.
7042 MEMMODEL is the memory model variant to use.
7043 TARGET is an optional place to stick the return value. */
7046 expand_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
7048 machine_mode mode
= GET_MODE (mem
);
7049 rtx ret
, trueval
, subtarget
;
7051 ret
= maybe_emit_atomic_test_and_set (target
, mem
, model
);
7055 /* Be binary compatible with non-default settings of trueval, and different
7056 cpu revisions. E.g. one revision may have atomic-test-and-set, but
7057 another only has atomic-exchange. */
7058 if (targetm
.atomic_test_and_set_trueval
== 1)
7060 trueval
= const1_rtx
;
7061 subtarget
= target
? target
: gen_reg_rtx (mode
);
7065 trueval
= gen_int_mode (targetm
.atomic_test_and_set_trueval
, mode
);
7066 subtarget
= gen_reg_rtx (mode
);
7069 /* Try the atomic-exchange optab... */
7070 ret
= maybe_emit_atomic_exchange (subtarget
, mem
, trueval
, model
);
7072 /* ... then an atomic-compare-and-swap loop ... */
7074 ret
= maybe_emit_compare_and_swap_exchange_loop (subtarget
, mem
, trueval
);
7076 /* ... before trying the vaguely defined legacy lock_test_and_set. */
7078 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, trueval
, model
);
7080 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
7081 things with the value 1. Thus we try again without trueval. */
7082 if (!ret
&& targetm
.atomic_test_and_set_trueval
!= 1)
7084 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, const1_rtx
, model
);
7088 /* Rectify the not-one trueval. */
7089 ret
= emit_store_flag_force (target
, NE
, ret
, const0_rtx
, mode
, 0, 1);
7097 /* This function expands the atomic exchange operation:
7098 atomically store VAL in MEM and return the previous value in MEM.
7100 MEMMODEL is the memory model variant to use.
7101 TARGET is an optional place to stick the return value. */
7104 expand_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
7106 machine_mode mode
= GET_MODE (mem
);
7109 /* If loads are not atomic for the required size and we are not called to
7110 provide a __sync builtin, do not do anything so that we stay consistent
7111 with atomic loads of the same size. */
7112 if (!can_atomic_load_p (mode
) && !is_mm_sync (model
))
7115 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, model
);
7117 /* Next try a compare-and-swap loop for the exchange. */
7119 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
7124 /* This function expands the atomic compare exchange operation:
7126 *PTARGET_BOOL is an optional place to store the boolean success/failure.
7127 *PTARGET_OVAL is an optional place to store the old value from memory.
7128 Both target parameters may be NULL or const0_rtx to indicate that we do
7129 not care about that return value. Both target parameters are updated on
7130 success to the actual location of the corresponding result.
7132 MEMMODEL is the memory model variant to use.
7134 The return value of the function is true for success. */
7137 expand_atomic_compare_and_swap (rtx
*ptarget_bool
, rtx
*ptarget_oval
,
7138 rtx mem
, rtx expected
, rtx desired
,
7139 bool is_weak
, enum memmodel succ_model
,
7140 enum memmodel fail_model
)
7142 machine_mode mode
= GET_MODE (mem
);
7143 class expand_operand ops
[8];
7144 enum insn_code icode
;
7145 rtx target_oval
, target_bool
= NULL_RTX
;
7148 /* If loads are not atomic for the required size and we are not called to
7149 provide a __sync builtin, do not do anything so that we stay consistent
7150 with atomic loads of the same size. */
7151 if (!can_atomic_load_p (mode
) && !is_mm_sync (succ_model
))
7154 /* Load expected into a register for the compare and swap. */
7155 if (MEM_P (expected
))
7156 expected
= copy_to_reg (expected
);
7158 /* Make sure we always have some place to put the return oldval.
7159 Further, make sure that place is distinct from the input expected,
7160 just in case we need that path down below. */
7161 if (ptarget_oval
&& *ptarget_oval
== const0_rtx
)
7162 ptarget_oval
= NULL
;
7164 if (ptarget_oval
== NULL
7165 || (target_oval
= *ptarget_oval
) == NULL
7166 || reg_overlap_mentioned_p (expected
, target_oval
))
7167 target_oval
= gen_reg_rtx (mode
);
7169 icode
= direct_optab_handler (atomic_compare_and_swap_optab
, mode
);
7170 if (icode
!= CODE_FOR_nothing
)
7172 machine_mode bool_mode
= insn_data
[icode
].operand
[0].mode
;
7174 if (ptarget_bool
&& *ptarget_bool
== const0_rtx
)
7175 ptarget_bool
= NULL
;
7177 /* Make sure we always have a place for the bool operand. */
7178 if (ptarget_bool
== NULL
7179 || (target_bool
= *ptarget_bool
) == NULL
7180 || GET_MODE (target_bool
) != bool_mode
)
7181 target_bool
= gen_reg_rtx (bool_mode
);
7183 /* Emit the compare_and_swap. */
7184 create_output_operand (&ops
[0], target_bool
, bool_mode
);
7185 create_output_operand (&ops
[1], target_oval
, mode
);
7186 create_fixed_operand (&ops
[2], mem
);
7187 create_input_operand (&ops
[3], expected
, mode
);
7188 create_input_operand (&ops
[4], desired
, mode
);
7189 create_integer_operand (&ops
[5], is_weak
);
7190 create_integer_operand (&ops
[6], succ_model
);
7191 create_integer_operand (&ops
[7], fail_model
);
7192 if (maybe_expand_insn (icode
, 8, ops
))
7194 /* Return success/failure. */
7195 target_bool
= ops
[0].value
;
7196 target_oval
= ops
[1].value
;
7201 /* Otherwise fall back to the original __sync_val_compare_and_swap
7202 which is always seq-cst. */
7203 icode
= optab_handler (sync_compare_and_swap_optab
, mode
);
7204 if (icode
!= CODE_FOR_nothing
)
7208 create_output_operand (&ops
[0], target_oval
, mode
);
7209 create_fixed_operand (&ops
[1], mem
);
7210 create_input_operand (&ops
[2], expected
, mode
);
7211 create_input_operand (&ops
[3], desired
, mode
);
7212 if (!maybe_expand_insn (icode
, 4, ops
))
7215 target_oval
= ops
[0].value
;
7217 /* If the caller isn't interested in the boolean return value,
7218 skip the computation of it. */
7219 if (ptarget_bool
== NULL
)
7222 /* Otherwise, work out if the compare-and-swap succeeded. */
7224 if (have_insn_for (COMPARE
, CCmode
))
7225 note_stores (get_last_insn (), find_cc_set
, &cc_reg
);
7228 target_bool
= emit_store_flag_force (target_bool
, EQ
, cc_reg
,
7229 const0_rtx
, VOIDmode
, 0, 1);
7232 goto success_bool_from_val
;
7235 /* Also check for library support for __sync_val_compare_and_swap. */
7236 libfunc
= optab_libfunc (sync_compare_and_swap_optab
, mode
);
7237 if (libfunc
!= NULL
)
7239 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
7240 rtx target
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
7241 mode
, addr
, ptr_mode
,
7242 expected
, mode
, desired
, mode
);
7243 emit_move_insn (target_oval
, target
);
7245 /* Compute the boolean return value only if requested. */
7247 goto success_bool_from_val
;
7255 success_bool_from_val
:
7256 target_bool
= emit_store_flag_force (target_bool
, EQ
, target_oval
,
7257 expected
, VOIDmode
, 1, 1);
7259 /* Make sure that the oval output winds up where the caller asked. */
7261 *ptarget_oval
= target_oval
;
7263 *ptarget_bool
= target_bool
;
7267 /* Generate asm volatile("" : : : "memory") as the memory blockage. */
7270 expand_asm_memory_blockage (void)
7274 asm_op
= gen_rtx_ASM_OPERANDS (VOIDmode
, "", "", 0,
7275 rtvec_alloc (0), rtvec_alloc (0),
7276 rtvec_alloc (0), UNKNOWN_LOCATION
);
7277 MEM_VOLATILE_P (asm_op
) = 1;
7279 clob
= gen_rtx_SCRATCH (VOIDmode
);
7280 clob
= gen_rtx_MEM (BLKmode
, clob
);
7281 clob
= gen_rtx_CLOBBER (VOIDmode
, clob
);
7283 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, asm_op
, clob
)));
7286 /* Do not propagate memory accesses across this point. */
7289 expand_memory_blockage (void)
7291 if (targetm
.have_memory_blockage ())
7292 emit_insn (targetm
.gen_memory_blockage ());
7294 expand_asm_memory_blockage ();
7297 /* Generate asm volatile("" : : : "memory") as a memory blockage, at the
7298 same time clobbering the register set specified by REGS. */
7301 expand_asm_reg_clobber_mem_blockage (HARD_REG_SET regs
)
7303 rtx asm_op
, clob_mem
;
7305 unsigned int num_of_regs
= 0;
7306 for (unsigned int i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
7307 if (TEST_HARD_REG_BIT (regs
, i
))
7310 asm_op
= gen_rtx_ASM_OPERANDS (VOIDmode
, "", "", 0,
7311 rtvec_alloc (0), rtvec_alloc (0),
7312 rtvec_alloc (0), UNKNOWN_LOCATION
);
7313 MEM_VOLATILE_P (asm_op
) = 1;
7315 rtvec v
= rtvec_alloc (num_of_regs
+ 2);
7317 clob_mem
= gen_rtx_SCRATCH (VOIDmode
);
7318 clob_mem
= gen_rtx_MEM (BLKmode
, clob_mem
);
7319 clob_mem
= gen_rtx_CLOBBER (VOIDmode
, clob_mem
);
7321 RTVEC_ELT (v
, 0) = asm_op
;
7322 RTVEC_ELT (v
, 1) = clob_mem
;
7324 if (num_of_regs
> 0)
7327 for (unsigned int i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
7328 if (TEST_HARD_REG_BIT (regs
, i
))
7330 RTVEC_ELT (v
, j
) = gen_rtx_CLOBBER (VOIDmode
, regno_reg_rtx
[i
]);
7333 gcc_assert (j
== (num_of_regs
+ 2));
7336 emit_insn (gen_rtx_PARALLEL (VOIDmode
, v
));
7339 /* This routine will either emit the mem_thread_fence pattern or issue a
7340 sync_synchronize to generate a fence for memory model MEMMODEL. */
7343 expand_mem_thread_fence (enum memmodel model
)
7345 if (is_mm_relaxed (model
))
7347 if (targetm
.have_mem_thread_fence ())
7349 emit_insn (targetm
.gen_mem_thread_fence (GEN_INT (model
)));
7350 expand_memory_blockage ();
7352 else if (targetm
.have_memory_barrier ())
7353 emit_insn (targetm
.gen_memory_barrier ());
7354 else if (synchronize_libfunc
!= NULL_RTX
)
7355 emit_library_call (synchronize_libfunc
, LCT_NORMAL
, VOIDmode
);
7357 expand_memory_blockage ();
7360 /* Emit a signal fence with given memory model. */
7363 expand_mem_signal_fence (enum memmodel model
)
7365 /* No machine barrier is required to implement a signal fence, but
7366 a compiler memory barrier must be issued, except for relaxed MM. */
7367 if (!is_mm_relaxed (model
))
7368 expand_memory_blockage ();
7371 /* This function expands the atomic load operation:
7372 return the atomically loaded value in MEM.
7374 MEMMODEL is the memory model variant to use.
7375 TARGET is an option place to stick the return value. */
7378 expand_atomic_load (rtx target
, rtx mem
, enum memmodel model
)
7380 machine_mode mode
= GET_MODE (mem
);
7381 enum insn_code icode
;
7383 /* If the target supports the load directly, great. */
7384 icode
= direct_optab_handler (atomic_load_optab
, mode
);
7385 if (icode
!= CODE_FOR_nothing
)
7387 class expand_operand ops
[3];
7388 rtx_insn
*last
= get_last_insn ();
7389 if (is_mm_seq_cst (model
))
7390 expand_memory_blockage ();
7392 create_output_operand (&ops
[0], target
, mode
);
7393 create_fixed_operand (&ops
[1], mem
);
7394 create_integer_operand (&ops
[2], model
);
7395 if (maybe_expand_insn (icode
, 3, ops
))
7397 if (!is_mm_relaxed (model
))
7398 expand_memory_blockage ();
7399 return ops
[0].value
;
7401 delete_insns_since (last
);
7404 /* If the size of the object is greater than word size on this target,
7405 then we assume that a load will not be atomic. We could try to
7406 emulate a load with a compare-and-swap operation, but the store that
7407 doing this could result in would be incorrect if this is a volatile
7408 atomic load or targetting read-only-mapped memory. */
7409 if (maybe_gt (GET_MODE_PRECISION (mode
), BITS_PER_WORD
))
7410 /* If there is no atomic load, leave the library call. */
7413 /* Otherwise assume loads are atomic, and emit the proper barriers. */
7414 if (!target
|| target
== const0_rtx
)
7415 target
= gen_reg_rtx (mode
);
7417 /* For SEQ_CST, emit a barrier before the load. */
7418 if (is_mm_seq_cst (model
))
7419 expand_mem_thread_fence (model
);
7421 emit_move_insn (target
, mem
);
7423 /* Emit the appropriate barrier after the load. */
7424 expand_mem_thread_fence (model
);
7429 /* This function expands the atomic store operation:
7430 Atomically store VAL in MEM.
7431 MEMMODEL is the memory model variant to use.
7432 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
7433 function returns const0_rtx if a pattern was emitted. */
7436 expand_atomic_store (rtx mem
, rtx val
, enum memmodel model
, bool use_release
)
7438 machine_mode mode
= GET_MODE (mem
);
7439 enum insn_code icode
;
7440 class expand_operand ops
[3];
7442 /* If the target supports the store directly, great. */
7443 icode
= direct_optab_handler (atomic_store_optab
, mode
);
7444 if (icode
!= CODE_FOR_nothing
)
7446 rtx_insn
*last
= get_last_insn ();
7447 if (!is_mm_relaxed (model
))
7448 expand_memory_blockage ();
7449 create_fixed_operand (&ops
[0], mem
);
7450 create_input_operand (&ops
[1], val
, mode
);
7451 create_integer_operand (&ops
[2], model
);
7452 if (maybe_expand_insn (icode
, 3, ops
))
7454 if (is_mm_seq_cst (model
))
7455 expand_memory_blockage ();
7458 delete_insns_since (last
);
7461 /* If using __sync_lock_release is a viable alternative, try it.
7462 Note that this will not be set to true if we are expanding a generic
7463 __atomic_store_n. */
7466 icode
= direct_optab_handler (sync_lock_release_optab
, mode
);
7467 if (icode
!= CODE_FOR_nothing
)
7469 create_fixed_operand (&ops
[0], mem
);
7470 create_input_operand (&ops
[1], const0_rtx
, mode
);
7471 if (maybe_expand_insn (icode
, 2, ops
))
7473 /* lock_release is only a release barrier. */
7474 if (is_mm_seq_cst (model
))
7475 expand_mem_thread_fence (model
);
7481 /* If the size of the object is greater than word size on this target,
7482 a default store will not be atomic. */
7483 if (maybe_gt (GET_MODE_PRECISION (mode
), BITS_PER_WORD
))
7485 /* If loads are atomic or we are called to provide a __sync builtin,
7486 we can try a atomic_exchange and throw away the result. Otherwise,
7487 don't do anything so that we do not create an inconsistency between
7488 loads and stores. */
7489 if (can_atomic_load_p (mode
) || is_mm_sync (model
))
7491 rtx target
= maybe_emit_atomic_exchange (NULL_RTX
, mem
, val
, model
);
7493 target
= maybe_emit_compare_and_swap_exchange_loop (NULL_RTX
, mem
,
7501 /* Otherwise assume stores are atomic, and emit the proper barriers. */
7502 expand_mem_thread_fence (model
);
7504 emit_move_insn (mem
, val
);
7506 /* For SEQ_CST, also emit a barrier after the store. */
7507 if (is_mm_seq_cst (model
))
7508 expand_mem_thread_fence (model
);
7514 /* Structure containing the pointers and values required to process the
7515 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
7517 struct atomic_op_functions
7519 direct_optab mem_fetch_before
;
7520 direct_optab mem_fetch_after
;
7521 direct_optab mem_no_result
;
7524 direct_optab no_result
;
7525 enum rtx_code reverse_code
;
7529 /* Fill in structure pointed to by OP with the various optab entries for an
7530 operation of type CODE. */
7533 get_atomic_op_for_code (struct atomic_op_functions
*op
, enum rtx_code code
)
7535 gcc_assert (op
!= NULL
);
7537 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
7538 in the source code during compilation, and the optab entries are not
7539 computable until runtime. Fill in the values at runtime. */
7543 op
->mem_fetch_before
= atomic_fetch_add_optab
;
7544 op
->mem_fetch_after
= atomic_add_fetch_optab
;
7545 op
->mem_no_result
= atomic_add_optab
;
7546 op
->fetch_before
= sync_old_add_optab
;
7547 op
->fetch_after
= sync_new_add_optab
;
7548 op
->no_result
= sync_add_optab
;
7549 op
->reverse_code
= MINUS
;
7552 op
->mem_fetch_before
= atomic_fetch_sub_optab
;
7553 op
->mem_fetch_after
= atomic_sub_fetch_optab
;
7554 op
->mem_no_result
= atomic_sub_optab
;
7555 op
->fetch_before
= sync_old_sub_optab
;
7556 op
->fetch_after
= sync_new_sub_optab
;
7557 op
->no_result
= sync_sub_optab
;
7558 op
->reverse_code
= PLUS
;
7561 op
->mem_fetch_before
= atomic_fetch_xor_optab
;
7562 op
->mem_fetch_after
= atomic_xor_fetch_optab
;
7563 op
->mem_no_result
= atomic_xor_optab
;
7564 op
->fetch_before
= sync_old_xor_optab
;
7565 op
->fetch_after
= sync_new_xor_optab
;
7566 op
->no_result
= sync_xor_optab
;
7567 op
->reverse_code
= XOR
;
7570 op
->mem_fetch_before
= atomic_fetch_and_optab
;
7571 op
->mem_fetch_after
= atomic_and_fetch_optab
;
7572 op
->mem_no_result
= atomic_and_optab
;
7573 op
->fetch_before
= sync_old_and_optab
;
7574 op
->fetch_after
= sync_new_and_optab
;
7575 op
->no_result
= sync_and_optab
;
7576 op
->reverse_code
= UNKNOWN
;
7579 op
->mem_fetch_before
= atomic_fetch_or_optab
;
7580 op
->mem_fetch_after
= atomic_or_fetch_optab
;
7581 op
->mem_no_result
= atomic_or_optab
;
7582 op
->fetch_before
= sync_old_ior_optab
;
7583 op
->fetch_after
= sync_new_ior_optab
;
7584 op
->no_result
= sync_ior_optab
;
7585 op
->reverse_code
= UNKNOWN
;
7588 op
->mem_fetch_before
= atomic_fetch_nand_optab
;
7589 op
->mem_fetch_after
= atomic_nand_fetch_optab
;
7590 op
->mem_no_result
= atomic_nand_optab
;
7591 op
->fetch_before
= sync_old_nand_optab
;
7592 op
->fetch_after
= sync_new_nand_optab
;
7593 op
->no_result
= sync_nand_optab
;
7594 op
->reverse_code
= UNKNOWN
;
7601 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
7602 using memory order MODEL. If AFTER is true the operation needs to return
7603 the value of *MEM after the operation, otherwise the previous value.
7604 TARGET is an optional place to place the result. The result is unused if
7606 Return the result if there is a better sequence, otherwise NULL_RTX. */
7609 maybe_optimize_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
7610 enum memmodel model
, bool after
)
7612 /* If the value is prefetched, or not used, it may be possible to replace
7613 the sequence with a native exchange operation. */
7614 if (!after
|| target
== const0_rtx
)
7616 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
7617 if (code
== AND
&& val
== const0_rtx
)
7619 if (target
== const0_rtx
)
7620 target
= gen_reg_rtx (GET_MODE (mem
));
7621 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
7624 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
7625 if (code
== IOR
&& val
== constm1_rtx
)
7627 if (target
== const0_rtx
)
7628 target
= gen_reg_rtx (GET_MODE (mem
));
7629 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
7636 /* Try to emit an instruction for a specific operation varaition.
7637 OPTAB contains the OP functions.
7638 TARGET is an optional place to return the result. const0_rtx means unused.
7639 MEM is the memory location to operate on.
7640 VAL is the value to use in the operation.
7641 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
7642 MODEL is the memory model, if used.
7643 AFTER is true if the returned result is the value after the operation. */
7646 maybe_emit_op (const struct atomic_op_functions
*optab
, rtx target
, rtx mem
,
7647 rtx val
, bool use_memmodel
, enum memmodel model
, bool after
)
7649 machine_mode mode
= GET_MODE (mem
);
7650 class expand_operand ops
[4];
7651 enum insn_code icode
;
7655 /* Check to see if there is a result returned. */
7656 if (target
== const0_rtx
)
7660 icode
= direct_optab_handler (optab
->mem_no_result
, mode
);
7661 create_integer_operand (&ops
[2], model
);
7666 icode
= direct_optab_handler (optab
->no_result
, mode
);
7670 /* Otherwise, we need to generate a result. */
7675 icode
= direct_optab_handler (after
? optab
->mem_fetch_after
7676 : optab
->mem_fetch_before
, mode
);
7677 create_integer_operand (&ops
[3], model
);
7682 icode
= optab_handler (after
? optab
->fetch_after
7683 : optab
->fetch_before
, mode
);
7686 create_output_operand (&ops
[op_counter
++], target
, mode
);
7688 if (icode
== CODE_FOR_nothing
)
7691 create_fixed_operand (&ops
[op_counter
++], mem
);
7692 /* VAL may have been promoted to a wider mode. Shrink it if so. */
7693 create_convert_operand_to (&ops
[op_counter
++], val
, mode
, true);
7695 if (maybe_expand_insn (icode
, num_ops
, ops
))
7696 return (target
== const0_rtx
? const0_rtx
: ops
[0].value
);
7702 /* This function expands an atomic fetch_OP or OP_fetch operation:
7703 TARGET is an option place to stick the return value. const0_rtx indicates
7704 the result is unused.
7705 atomically fetch MEM, perform the operation with VAL and return it to MEM.
7706 CODE is the operation being performed (OP)
7707 MEMMODEL is the memory model variant to use.
7708 AFTER is true to return the result of the operation (OP_fetch).
7709 AFTER is false to return the value before the operation (fetch_OP).
7711 This function will *only* generate instructions if there is a direct
7712 optab. No compare and swap loops or libcalls will be generated. */
7715 expand_atomic_fetch_op_no_fallback (rtx target
, rtx mem
, rtx val
,
7716 enum rtx_code code
, enum memmodel model
,
7719 machine_mode mode
= GET_MODE (mem
);
7720 struct atomic_op_functions optab
;
7722 bool unused_result
= (target
== const0_rtx
);
7724 get_atomic_op_for_code (&optab
, code
);
7726 /* Check to see if there are any better instructions. */
7727 result
= maybe_optimize_fetch_op (target
, mem
, val
, code
, model
, after
);
7731 /* Check for the case where the result isn't used and try those patterns. */
7734 /* Try the memory model variant first. */
7735 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, true);
7739 /* Next try the old style withuot a memory model. */
7740 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, true);
7744 /* There is no no-result pattern, so try patterns with a result. */
7748 /* Try the __atomic version. */
7749 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, after
);
7753 /* Try the older __sync version. */
7754 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, after
);
7758 /* If the fetch value can be calculated from the other variation of fetch,
7759 try that operation. */
7760 if (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
)
7762 /* Try the __atomic version, then the older __sync version. */
7763 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, !after
);
7765 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, !after
);
7769 /* If the result isn't used, no need to do compensation code. */
7773 /* Issue compensation code. Fetch_after == fetch_before OP val.
7774 Fetch_before == after REVERSE_OP val. */
7776 code
= optab
.reverse_code
;
7779 result
= expand_simple_binop (mode
, AND
, result
, val
, NULL_RTX
,
7780 true, OPTAB_LIB_WIDEN
);
7781 result
= expand_simple_unop (mode
, NOT
, result
, target
, true);
7784 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
7785 true, OPTAB_LIB_WIDEN
);
7790 /* No direct opcode can be generated. */
7796 /* This function expands an atomic fetch_OP or OP_fetch operation:
7797 TARGET is an option place to stick the return value. const0_rtx indicates
7798 the result is unused.
7799 atomically fetch MEM, perform the operation with VAL and return it to MEM.
7800 CODE is the operation being performed (OP)
7801 MEMMODEL is the memory model variant to use.
7802 AFTER is true to return the result of the operation (OP_fetch).
7803 AFTER is false to return the value before the operation (fetch_OP). */
7805 expand_atomic_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
7806 enum memmodel model
, bool after
)
7808 machine_mode mode
= GET_MODE (mem
);
7810 bool unused_result
= (target
== const0_rtx
);
7812 /* If loads are not atomic for the required size and we are not called to
7813 provide a __sync builtin, do not do anything so that we stay consistent
7814 with atomic loads of the same size. */
7815 if (!can_atomic_load_p (mode
) && !is_mm_sync (model
))
7818 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, val
, code
, model
,
7824 /* Add/sub can be implemented by doing the reverse operation with -(val). */
7825 if (code
== PLUS
|| code
== MINUS
)
7828 enum rtx_code reverse
= (code
== PLUS
? MINUS
: PLUS
);
7831 tmp
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, true);
7832 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, tmp
, reverse
,
7836 /* PLUS worked so emit the insns and return. */
7843 /* PLUS did not work, so throw away the negation code and continue. */
7847 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
7848 if (!can_compare_and_swap_p (mode
, false))
7852 enum rtx_code orig_code
= code
;
7853 struct atomic_op_functions optab
;
7855 get_atomic_op_for_code (&optab
, code
);
7856 libfunc
= optab_libfunc (after
? optab
.fetch_after
7857 : optab
.fetch_before
, mode
);
7859 && (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
))
7863 code
= optab
.reverse_code
;
7864 libfunc
= optab_libfunc (after
? optab
.fetch_before
7865 : optab
.fetch_after
, mode
);
7867 if (libfunc
!= NULL
)
7869 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
7870 result
= emit_library_call_value (libfunc
, NULL
, LCT_NORMAL
, mode
,
7871 addr
, ptr_mode
, val
, mode
);
7873 if (!unused_result
&& fixup
)
7874 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
7875 true, OPTAB_LIB_WIDEN
);
7879 /* We need the original code for any further attempts. */
7883 /* If nothing else has succeeded, default to a compare and swap loop. */
7884 if (can_compare_and_swap_p (mode
, true))
7887 rtx t0
= gen_reg_rtx (mode
), t1
;
7891 /* If the result is used, get a register for it. */
7894 if (!target
|| !register_operand (target
, mode
))
7895 target
= gen_reg_rtx (mode
);
7896 /* If fetch_before, copy the value now. */
7898 emit_move_insn (target
, t0
);
7901 target
= const0_rtx
;
7906 t1
= expand_simple_binop (mode
, AND
, t1
, val
, NULL_RTX
,
7907 true, OPTAB_LIB_WIDEN
);
7908 t1
= expand_simple_unop (mode
, code
, t1
, NULL_RTX
, true);
7911 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
, true,
7914 /* For after, copy the value now. */
7915 if (!unused_result
&& after
)
7916 emit_move_insn (target
, t1
);
7917 insn
= get_insns ();
7920 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
7927 /* Return true if OPERAND is suitable for operand number OPNO of
7928 instruction ICODE. */
7931 insn_operand_matches (enum insn_code icode
, unsigned int opno
, rtx operand
)
7933 return (!insn_data
[(int) icode
].operand
[opno
].predicate
7934 || (insn_data
[(int) icode
].operand
[opno
].predicate
7935 (operand
, insn_data
[(int) icode
].operand
[opno
].mode
)));
7938 /* TARGET is a target of a multiword operation that we are going to
7939 implement as a series of word-mode operations. Return true if
7940 TARGET is suitable for this purpose. */
7943 valid_multiword_target_p (rtx target
)
7948 mode
= GET_MODE (target
);
7949 if (!GET_MODE_SIZE (mode
).is_constant (&size
))
7951 for (i
= 0; i
< size
; i
+= UNITS_PER_WORD
)
7952 if (!validate_subreg (word_mode
, mode
, target
, i
))
7957 /* Make OP describe an input operand that has value INTVAL and that has
7958 no inherent mode. This function should only be used for operands that
7959 are always expand-time constants. The backend may request that INTVAL
7960 be copied into a different kind of rtx, but it must specify the mode
7961 of that rtx if so. */
7964 create_integer_operand (class expand_operand
*op
, poly_int64 intval
)
7966 create_expand_operand (op
, EXPAND_INTEGER
,
7967 gen_int_mode (intval
, MAX_MODE_INT
),
7968 VOIDmode
, false, intval
);
7971 /* Like maybe_legitimize_operand, but do not change the code of the
7972 current rtx value. */
7975 maybe_legitimize_operand_same_code (enum insn_code icode
, unsigned int opno
,
7976 class expand_operand
*op
)
7978 /* See if the operand matches in its current form. */
7979 if (insn_operand_matches (icode
, opno
, op
->value
))
7982 /* If the operand is a memory whose address has no side effects,
7983 try forcing the address into a non-virtual pseudo register.
7984 The check for side effects is important because copy_to_mode_reg
7985 cannot handle things like auto-modified addresses. */
7986 if (insn_data
[(int) icode
].operand
[opno
].allows_mem
&& MEM_P (op
->value
))
7991 addr
= XEXP (mem
, 0);
7992 if (!(REG_P (addr
) && REGNO (addr
) > LAST_VIRTUAL_REGISTER
)
7993 && !side_effects_p (addr
))
7998 last
= get_last_insn ();
7999 mode
= get_address_mode (mem
);
8000 mem
= replace_equiv_address (mem
, copy_to_mode_reg (mode
, addr
));
8001 if (insn_operand_matches (icode
, opno
, mem
))
8006 delete_insns_since (last
);
8013 /* Try to make OP match operand OPNO of instruction ICODE. Return true
8014 on success, storing the new operand value back in OP. */
8017 maybe_legitimize_operand (enum insn_code icode
, unsigned int opno
,
8018 class expand_operand
*op
)
8020 machine_mode mode
, imode
, tmode
;
8027 temporary_volatile_ok
v (true);
8028 return maybe_legitimize_operand_same_code (icode
, opno
, op
);
8032 gcc_assert (mode
!= VOIDmode
);
8034 && op
->value
!= const0_rtx
8035 && GET_MODE (op
->value
) == mode
8036 && maybe_legitimize_operand_same_code (icode
, opno
, op
))
8039 op
->value
= gen_reg_rtx (mode
);
8045 gcc_assert (mode
!= VOIDmode
);
8046 gcc_assert (GET_MODE (op
->value
) == VOIDmode
8047 || GET_MODE (op
->value
) == mode
);
8048 if (maybe_legitimize_operand_same_code (icode
, opno
, op
))
8051 op
->value
= copy_to_mode_reg (mode
, op
->value
);
8054 case EXPAND_CONVERT_TO
:
8055 gcc_assert (mode
!= VOIDmode
);
8056 op
->value
= convert_to_mode (mode
, op
->value
, op
->unsigned_p
);
8059 case EXPAND_CONVERT_FROM
:
8060 if (GET_MODE (op
->value
) != VOIDmode
)
8061 mode
= GET_MODE (op
->value
);
8063 /* The caller must tell us what mode this value has. */
8064 gcc_assert (mode
!= VOIDmode
);
8066 imode
= insn_data
[(int) icode
].operand
[opno
].mode
;
8067 tmode
= (VECTOR_MODE_P (imode
) && !VECTOR_MODE_P (mode
)
8068 ? GET_MODE_INNER (imode
) : imode
);
8069 if (tmode
!= VOIDmode
&& tmode
!= mode
)
8071 op
->value
= convert_modes (tmode
, mode
, op
->value
, op
->unsigned_p
);
8074 if (imode
!= VOIDmode
&& imode
!= mode
)
8076 gcc_assert (VECTOR_MODE_P (imode
) && !VECTOR_MODE_P (mode
));
8077 op
->value
= expand_vector_broadcast (imode
, op
->value
);
8082 case EXPAND_ADDRESS
:
8083 op
->value
= convert_memory_address (as_a
<scalar_int_mode
> (mode
),
8087 case EXPAND_INTEGER
:
8088 mode
= insn_data
[(int) icode
].operand
[opno
].mode
;
8089 if (mode
!= VOIDmode
8090 && known_eq (trunc_int_for_mode (op
->int_value
, mode
),
8093 op
->value
= gen_int_mode (op
->int_value
, mode
);
8098 case EXPAND_UNDEFINED_INPUT
:
8099 /* See if the predicate accepts a SCRATCH rtx, which in this context
8100 indicates an undefined value. Use an uninitialized register if not. */
8101 if (!insn_operand_matches (icode
, opno
, op
->value
))
8103 op
->value
= gen_reg_rtx (op
->mode
);
8108 return insn_operand_matches (icode
, opno
, op
->value
);
8111 /* Make OP describe an input operand that should have the same value
8112 as VALUE, after any mode conversion that the target might request.
8113 TYPE is the type of VALUE. */
8116 create_convert_operand_from_type (class expand_operand
*op
,
8117 rtx value
, tree type
)
8119 create_convert_operand_from (op
, value
, TYPE_MODE (type
),
8120 TYPE_UNSIGNED (type
));
8123 /* Return true if the requirements on operands OP1 and OP2 of instruction
8124 ICODE are similar enough for the result of legitimizing OP1 to be
8125 reusable for OP2. OPNO1 and OPNO2 are the operand numbers associated
8126 with OP1 and OP2 respectively. */
8129 can_reuse_operands_p (enum insn_code icode
,
8130 unsigned int opno1
, unsigned int opno2
,
8131 const class expand_operand
*op1
,
8132 const class expand_operand
*op2
)
8134 /* Check requirements that are common to all types. */
8135 if (op1
->type
!= op2
->type
8136 || op1
->mode
!= op2
->mode
8137 || (insn_data
[(int) icode
].operand
[opno1
].mode
8138 != insn_data
[(int) icode
].operand
[opno2
].mode
))
8141 /* Check the requirements for specific types. */
8145 case EXPAND_UNDEFINED_INPUT
:
8146 /* Outputs and undefined intputs must remain distinct. */
8151 case EXPAND_ADDRESS
:
8152 case EXPAND_INTEGER
:
8155 case EXPAND_CONVERT_TO
:
8156 case EXPAND_CONVERT_FROM
:
8157 return op1
->unsigned_p
== op2
->unsigned_p
;
8162 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
8163 of instruction ICODE. Return true on success, leaving the new operand
8164 values in the OPS themselves. Emit no code on failure. */
8167 maybe_legitimize_operands (enum insn_code icode
, unsigned int opno
,
8168 unsigned int nops
, class expand_operand
*ops
)
8170 rtx_insn
*last
= get_last_insn ();
8171 rtx
*orig_values
= XALLOCAVEC (rtx
, nops
);
8172 for (unsigned int i
= 0; i
< nops
; i
++)
8174 orig_values
[i
] = ops
[i
].value
;
8176 /* First try reusing the result of an earlier legitimization.
8177 This avoids duplicate rtl and ensures that tied operands
8180 This search is linear, but NOPS is bounded at compile time
8181 to a small number (current a single digit). */
8184 if (can_reuse_operands_p (icode
, opno
+ j
, opno
+ i
, &ops
[j
], &ops
[i
])
8185 && rtx_equal_p (orig_values
[j
], orig_values
[i
])
8187 && insn_operand_matches (icode
, opno
+ i
, ops
[j
].value
))
8189 ops
[i
].value
= copy_rtx (ops
[j
].value
);
8193 /* Otherwise try legitimizing the operand on its own. */
8194 if (j
== i
&& !maybe_legitimize_operand (icode
, opno
+ i
, &ops
[i
]))
8196 delete_insns_since (last
);
8203 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
8204 as its operands. Return the instruction pattern on success,
8205 and emit any necessary set-up code. Return null and emit no
8209 maybe_gen_insn (enum insn_code icode
, unsigned int nops
,
8210 class expand_operand
*ops
)
8212 gcc_assert (nops
== (unsigned int) insn_data
[(int) icode
].n_generator_args
);
8213 if (!maybe_legitimize_operands (icode
, 0, nops
, ops
))
8219 return GEN_FCN (icode
) ();
8221 return GEN_FCN (icode
) (ops
[0].value
);
8223 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
);
8225 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
);
8227 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8230 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8231 ops
[3].value
, ops
[4].value
);
8233 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8234 ops
[3].value
, ops
[4].value
, ops
[5].value
);
8236 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8237 ops
[3].value
, ops
[4].value
, ops
[5].value
,
8240 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8241 ops
[3].value
, ops
[4].value
, ops
[5].value
,
8242 ops
[6].value
, ops
[7].value
);
8244 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8245 ops
[3].value
, ops
[4].value
, ops
[5].value
,
8246 ops
[6].value
, ops
[7].value
, ops
[8].value
);
8248 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8249 ops
[3].value
, ops
[4].value
, ops
[5].value
,
8250 ops
[6].value
, ops
[7].value
, ops
[8].value
,
8253 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
8254 ops
[3].value
, ops
[4].value
, ops
[5].value
,
8255 ops
[6].value
, ops
[7].value
, ops
[8].value
,
8256 ops
[9].value
, ops
[10].value
);
8261 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
8262 as its operands. Return true on success and emit no code on failure. */
8265 maybe_expand_insn (enum insn_code icode
, unsigned int nops
,
8266 class expand_operand
*ops
)
8268 rtx_insn
*pat
= maybe_gen_insn (icode
, nops
, ops
);
8277 /* Like maybe_expand_insn, but for jumps. */
8280 maybe_expand_jump_insn (enum insn_code icode
, unsigned int nops
,
8281 class expand_operand
*ops
)
8283 rtx_insn
*pat
= maybe_gen_insn (icode
, nops
, ops
);
8286 emit_jump_insn (pat
);
8292 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
8296 expand_insn (enum insn_code icode
, unsigned int nops
,
8297 class expand_operand
*ops
)
8299 if (!maybe_expand_insn (icode
, nops
, ops
))
8303 /* Like expand_insn, but for jumps. */
8306 expand_jump_insn (enum insn_code icode
, unsigned int nops
,
8307 class expand_operand
*ops
)
8309 if (!maybe_expand_jump_insn (icode
, nops
, ops
))