1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2021 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
35 #include "diagnostic-core.h"
36 #include "rtx-vector-builder.h"
38 /* Include insn-config.h before expr.h so that HAVE_conditional_move
39 is properly defined. */
40 #include "stor-layout.h"
45 #include "optabs-tree.h"
47 #include "internal-fn.h"
48 #include "langhooks.h"
50 static void prepare_float_lib_cmp (rtx
, rtx
, enum rtx_code
, rtx
*,
52 static rtx
expand_unop_direct (machine_mode
, optab
, rtx
, rtx
, int);
53 static void emit_libcall_block_1 (rtx_insn
*, rtx
, rtx
, rtx
, bool);
55 /* Debug facility for use in GDB. */
56 void debug_optab_libfuncs (void);
58 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
59 the result of operation CODE applied to OP0 (and OP1 if it is a binary
60 operation). OP0_MODE is OP0's mode.
62 If the last insn does not set TARGET, don't do anything, but return 1.
64 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
65 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
66 try again, ensuring that TARGET is not one of the operands. */
69 add_equal_note (rtx_insn
*insns
, rtx target
, enum rtx_code code
, rtx op0
,
70 rtx op1
, machine_mode op0_mode
)
76 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
78 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
79 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
80 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
81 && GET_RTX_CLASS (code
) != RTX_COMPARE
82 && GET_RTX_CLASS (code
) != RTX_UNARY
)
85 if (GET_CODE (target
) == ZERO_EXTRACT
)
88 for (last_insn
= insns
;
89 NEXT_INSN (last_insn
) != NULL_RTX
;
90 last_insn
= NEXT_INSN (last_insn
))
93 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
94 a value changing in the insn, so the note would be invalid for CSE. */
95 if (reg_overlap_mentioned_p (target
, op0
)
96 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
99 && (rtx_equal_p (target
, op0
)
100 || (op1
&& rtx_equal_p (target
, op1
))))
102 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
103 over expanding it as temp = MEM op X, MEM = temp. If the target
104 supports MEM = MEM op X instructions, it is sometimes too hard
105 to reconstruct that form later, especially if X is also a memory,
106 and due to multiple occurrences of addresses the address might
107 be forced into register unnecessarily.
108 Note that not emitting the REG_EQUIV note might inhibit
109 CSE in some cases. */
110 set
= single_set (last_insn
);
112 && GET_CODE (SET_SRC (set
)) == code
113 && MEM_P (SET_DEST (set
))
114 && (rtx_equal_p (SET_DEST (set
), XEXP (SET_SRC (set
), 0))
115 || (op1
&& rtx_equal_p (SET_DEST (set
),
116 XEXP (SET_SRC (set
), 1)))))
122 set
= set_for_reg_notes (last_insn
);
126 if (! rtx_equal_p (SET_DEST (set
), target
)
127 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
128 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
129 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
132 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
142 if (op0_mode
!= VOIDmode
&& GET_MODE (target
) != op0_mode
)
144 note
= gen_rtx_fmt_e (code
, op0_mode
, copy_rtx (op0
));
145 if (GET_MODE_UNIT_SIZE (op0_mode
)
146 > GET_MODE_UNIT_SIZE (GET_MODE (target
)))
147 note
= simplify_gen_unary (TRUNCATE
, GET_MODE (target
),
150 note
= simplify_gen_unary (ZERO_EXTEND
, GET_MODE (target
),
156 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
160 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
162 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
167 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
168 for a widening operation would be. In most cases this would be OP0, but if
169 that's a constant it'll be VOIDmode, which isn't useful. */
172 widened_mode (machine_mode to_mode
, rtx op0
, rtx op1
)
174 machine_mode m0
= GET_MODE (op0
);
175 machine_mode m1
= GET_MODE (op1
);
178 if (m0
== VOIDmode
&& m1
== VOIDmode
)
180 else if (m0
== VOIDmode
|| GET_MODE_UNIT_SIZE (m0
) < GET_MODE_UNIT_SIZE (m1
))
185 if (GET_MODE_UNIT_SIZE (result
) > GET_MODE_UNIT_SIZE (to_mode
))
191 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
192 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
193 not actually do a sign-extend or zero-extend, but can leave the
194 higher-order bits of the result rtx undefined, for example, in the case
195 of logical operations, but not right shifts. */
198 widen_operand (rtx op
, machine_mode mode
, machine_mode oldmode
,
199 int unsignedp
, int no_extend
)
202 scalar_int_mode int_mode
;
204 /* If we don't have to extend and this is a constant, return it. */
205 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
208 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
209 extend since it will be more efficient to do so unless the signedness of
210 a promoted object differs from our extension. */
212 || !is_a
<scalar_int_mode
> (mode
, &int_mode
)
213 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
214 && SUBREG_CHECK_PROMOTED_SIGN (op
, unsignedp
)))
215 return convert_modes (mode
, oldmode
, op
, unsignedp
);
217 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
219 if (GET_MODE_SIZE (int_mode
) <= UNITS_PER_WORD
)
220 return gen_lowpart (int_mode
, force_reg (GET_MODE (op
), op
));
222 /* Otherwise, get an object of MODE, clobber it, and set the low-order
225 result
= gen_reg_rtx (int_mode
);
226 emit_clobber (result
);
227 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
231 /* Expand vector widening operations.
233 There are two different classes of operations handled here:
234 1) Operations whose result is wider than all the arguments to the operation.
235 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
236 In this case OP0 and optionally OP1 would be initialized,
237 but WIDE_OP wouldn't (not relevant for this case).
238 2) Operations whose result is of the same size as the last argument to the
239 operation, but wider than all the other arguments to the operation.
240 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
241 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
243 E.g, when called to expand the following operations, this is how
244 the arguments will be initialized:
246 widening-sum 2 oprnd0 - oprnd1
247 widening-dot-product 3 oprnd0 oprnd1 oprnd2
248 widening-mult 2 oprnd0 oprnd1 -
249 type-promotion (vec-unpack) 1 oprnd0 - - */
252 expand_widen_pattern_expr (sepops ops
, rtx op0
, rtx op1
, rtx wide_op
,
253 rtx target
, int unsignedp
)
255 class expand_operand eops
[4];
256 tree oprnd0
, oprnd1
, oprnd2
;
257 machine_mode wmode
= VOIDmode
, tmode0
, tmode1
= VOIDmode
;
258 optab widen_pattern_optab
;
259 enum insn_code icode
;
260 int nops
= TREE_CODE_LENGTH (ops
->code
);
265 tmode0
= TYPE_MODE (TREE_TYPE (oprnd0
));
266 if (ops
->code
== VEC_UNPACK_FIX_TRUNC_HI_EXPR
267 || ops
->code
== VEC_UNPACK_FIX_TRUNC_LO_EXPR
)
268 /* The sign is from the result type rather than operand's type
271 = optab_for_tree_code (ops
->code
, ops
->type
, optab_default
);
272 else if ((ops
->code
== VEC_UNPACK_HI_EXPR
273 || ops
->code
== VEC_UNPACK_LO_EXPR
)
274 && VECTOR_BOOLEAN_TYPE_P (ops
->type
)
275 && VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (oprnd0
))
276 && TYPE_MODE (ops
->type
) == TYPE_MODE (TREE_TYPE (oprnd0
))
277 && SCALAR_INT_MODE_P (TYPE_MODE (ops
->type
)))
279 /* For VEC_UNPACK_{LO,HI}_EXPR if the mode of op0 and result is
280 the same scalar mode for VECTOR_BOOLEAN_TYPE_P vectors, use
281 vec_unpacks_sbool_{lo,hi}_optab, so that we can pass in
282 the pattern number of elements in the wider vector. */
284 = (ops
->code
== VEC_UNPACK_HI_EXPR
285 ? vec_unpacks_sbool_hi_optab
: vec_unpacks_sbool_lo_optab
);
290 = optab_for_tree_code (ops
->code
, TREE_TYPE (oprnd0
), optab_default
);
291 if (ops
->code
== WIDEN_MULT_PLUS_EXPR
292 || ops
->code
== WIDEN_MULT_MINUS_EXPR
)
293 icode
= find_widening_optab_handler (widen_pattern_optab
,
294 TYPE_MODE (TREE_TYPE (ops
->op2
)),
297 icode
= optab_handler (widen_pattern_optab
, tmode0
);
298 gcc_assert (icode
!= CODE_FOR_nothing
);
303 tmode1
= TYPE_MODE (TREE_TYPE (oprnd1
));
308 op1
= GEN_INT (TYPE_VECTOR_SUBPARTS (TREE_TYPE (oprnd0
)).to_constant ());
312 /* The last operand is of a wider mode than the rest of the operands. */
317 gcc_assert (tmode1
== tmode0
);
320 wmode
= TYPE_MODE (TREE_TYPE (oprnd2
));
324 create_output_operand (&eops
[op
++], target
, TYPE_MODE (ops
->type
));
325 create_convert_operand_from (&eops
[op
++], op0
, tmode0
, unsignedp
);
327 create_convert_operand_from (&eops
[op
++], op1
, tmode1
, unsignedp
);
329 create_convert_operand_from (&eops
[op
++], wide_op
, wmode
, unsignedp
);
330 expand_insn (icode
, op
, eops
);
331 return eops
[0].value
;
334 /* Generate code to perform an operation specified by TERNARY_OPTAB
335 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
337 UNSIGNEDP is for the case where we have to widen the operands
338 to perform the operation. It says to use zero-extension.
340 If TARGET is nonzero, the value
341 is generated there, if it is convenient to do so.
342 In all cases an rtx is returned for the locus of the value;
343 this may or may not be TARGET. */
346 expand_ternary_op (machine_mode mode
, optab ternary_optab
, rtx op0
,
347 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
349 class expand_operand ops
[4];
350 enum insn_code icode
= optab_handler (ternary_optab
, mode
);
352 gcc_assert (optab_handler (ternary_optab
, mode
) != CODE_FOR_nothing
);
354 create_output_operand (&ops
[0], target
, mode
);
355 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
356 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
357 create_convert_operand_from (&ops
[3], op2
, mode
, unsignedp
);
358 expand_insn (icode
, 4, ops
);
363 /* Like expand_binop, but return a constant rtx if the result can be
364 calculated at compile time. The arguments and return value are
365 otherwise the same as for expand_binop. */
368 simplify_expand_binop (machine_mode mode
, optab binoptab
,
369 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
370 enum optab_methods methods
)
372 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
374 rtx x
= simplify_binary_operation (optab_to_code (binoptab
),
380 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
383 /* Like simplify_expand_binop, but always put the result in TARGET.
384 Return true if the expansion succeeded. */
387 force_expand_binop (machine_mode mode
, optab binoptab
,
388 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
389 enum optab_methods methods
)
391 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
392 target
, unsignedp
, methods
);
396 emit_move_insn (target
, x
);
400 /* Create a new vector value in VMODE with all elements set to OP. The
401 mode of OP must be the element mode of VMODE. If OP is a constant,
402 then the return value will be a constant. */
405 expand_vector_broadcast (machine_mode vmode
, rtx op
)
410 gcc_checking_assert (VECTOR_MODE_P (vmode
));
412 if (valid_for_const_vector_p (vmode
, op
))
413 return gen_const_vec_duplicate (vmode
, op
);
415 insn_code icode
= optab_handler (vec_duplicate_optab
, vmode
);
416 if (icode
!= CODE_FOR_nothing
)
418 class expand_operand ops
[2];
419 create_output_operand (&ops
[0], NULL_RTX
, vmode
);
420 create_input_operand (&ops
[1], op
, GET_MODE (op
));
421 expand_insn (icode
, 2, ops
);
425 if (!GET_MODE_NUNITS (vmode
).is_constant (&n
))
428 /* ??? If the target doesn't have a vec_init, then we have no easy way
429 of performing this operation. Most of this sort of generic support
430 is hidden away in the vector lowering support in gimple. */
431 icode
= convert_optab_handler (vec_init_optab
, vmode
,
432 GET_MODE_INNER (vmode
));
433 if (icode
== CODE_FOR_nothing
)
436 vec
= rtvec_alloc (n
);
437 for (int i
= 0; i
< n
; ++i
)
438 RTVEC_ELT (vec
, i
) = op
;
439 rtx ret
= gen_reg_rtx (vmode
);
440 emit_insn (GEN_FCN (icode
) (ret
, gen_rtx_PARALLEL (vmode
, vec
)));
445 /* This subroutine of expand_doubleword_shift handles the cases in which
446 the effective shift value is >= BITS_PER_WORD. The arguments and return
447 value are the same as for the parent routine, except that SUPERWORD_OP1
448 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
449 INTO_TARGET may be null if the caller has decided to calculate it. */
452 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
453 rtx outof_target
, rtx into_target
,
454 int unsignedp
, enum optab_methods methods
)
456 if (into_target
!= 0)
457 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
458 into_target
, unsignedp
, methods
))
461 if (outof_target
!= 0)
463 /* For a signed right shift, we must fill OUTOF_TARGET with copies
464 of the sign bit, otherwise we must fill it with zeros. */
465 if (binoptab
!= ashr_optab
)
466 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
468 if (!force_expand_binop (word_mode
, binoptab
, outof_input
,
469 gen_int_shift_amount (word_mode
,
471 outof_target
, unsignedp
, methods
))
477 /* This subroutine of expand_doubleword_shift handles the cases in which
478 the effective shift value is < BITS_PER_WORD. The arguments and return
479 value are the same as for the parent routine. */
482 expand_subword_shift (scalar_int_mode op1_mode
, optab binoptab
,
483 rtx outof_input
, rtx into_input
, rtx op1
,
484 rtx outof_target
, rtx into_target
,
485 int unsignedp
, enum optab_methods methods
,
486 unsigned HOST_WIDE_INT shift_mask
)
488 optab reverse_unsigned_shift
, unsigned_shift
;
491 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
492 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
494 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
495 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
496 the opposite direction to BINOPTAB. */
497 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
499 carries
= outof_input
;
500 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
,
501 op1_mode
), op1_mode
);
502 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
507 /* We must avoid shifting by BITS_PER_WORD bits since that is either
508 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
509 has unknown behavior. Do a single shift first, then shift by the
510 remainder. It's OK to use ~OP1 as the remainder if shift counts
511 are truncated to the mode size. */
512 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
513 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
514 if (shift_mask
== BITS_PER_WORD
- 1)
516 tmp
= immed_wide_int_const
517 (wi::minus_one (GET_MODE_PRECISION (op1_mode
)), op1_mode
);
518 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
523 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
- 1,
524 op1_mode
), op1_mode
);
525 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
529 if (tmp
== 0 || carries
== 0)
531 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
532 carries
, tmp
, 0, unsignedp
, methods
);
536 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
537 so the result can go directly into INTO_TARGET if convenient. */
538 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
539 into_target
, unsignedp
, methods
);
543 /* Now OR in the bits carried over from OUTOF_INPUT. */
544 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
545 into_target
, unsignedp
, methods
))
548 /* Use a standard word_mode shift for the out-of half. */
549 if (outof_target
!= 0)
550 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
551 outof_target
, unsignedp
, methods
))
558 /* Try implementing expand_doubleword_shift using conditional moves.
559 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
560 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
561 are the shift counts to use in the former and latter case. All other
562 arguments are the same as the parent routine. */
565 expand_doubleword_shift_condmove (scalar_int_mode op1_mode
, optab binoptab
,
566 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
567 rtx outof_input
, rtx into_input
,
568 rtx subword_op1
, rtx superword_op1
,
569 rtx outof_target
, rtx into_target
,
570 int unsignedp
, enum optab_methods methods
,
571 unsigned HOST_WIDE_INT shift_mask
)
573 rtx outof_superword
, into_superword
;
575 /* Put the superword version of the output into OUTOF_SUPERWORD and
577 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
578 if (outof_target
!= 0 && subword_op1
== superword_op1
)
580 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
581 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
582 into_superword
= outof_target
;
583 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
584 outof_superword
, 0, unsignedp
, methods
))
589 into_superword
= gen_reg_rtx (word_mode
);
590 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
591 outof_superword
, into_superword
,
596 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
597 if (!expand_subword_shift (op1_mode
, binoptab
,
598 outof_input
, into_input
, subword_op1
,
599 outof_target
, into_target
,
600 unsignedp
, methods
, shift_mask
))
603 /* Select between them. Do the INTO half first because INTO_SUPERWORD
604 might be the current value of OUTOF_TARGET. */
605 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
606 into_target
, into_superword
, word_mode
, false))
609 if (outof_target
!= 0)
610 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
611 outof_target
, outof_superword
,
618 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
619 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
620 input operand; the shift moves bits in the direction OUTOF_INPUT->
621 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
622 of the target. OP1 is the shift count and OP1_MODE is its mode.
623 If OP1 is constant, it will have been truncated as appropriate
624 and is known to be nonzero.
626 If SHIFT_MASK is zero, the result of word shifts is undefined when the
627 shift count is outside the range [0, BITS_PER_WORD). This routine must
628 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
630 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
631 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
632 fill with zeros or sign bits as appropriate.
634 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
635 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
636 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
637 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
640 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
641 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
642 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
643 function wants to calculate it itself.
645 Return true if the shift could be successfully synthesized. */
648 expand_doubleword_shift (scalar_int_mode op1_mode
, optab binoptab
,
649 rtx outof_input
, rtx into_input
, rtx op1
,
650 rtx outof_target
, rtx into_target
,
651 int unsignedp
, enum optab_methods methods
,
652 unsigned HOST_WIDE_INT shift_mask
)
654 rtx superword_op1
, tmp
, cmp1
, cmp2
;
655 enum rtx_code cmp_code
;
657 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
658 fill the result with sign or zero bits as appropriate. If so, the value
659 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
660 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
661 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
663 This isn't worthwhile for constant shifts since the optimizers will
664 cope better with in-range shift counts. */
665 if (shift_mask
>= BITS_PER_WORD
667 && !CONSTANT_P (op1
))
669 if (!expand_doubleword_shift (op1_mode
, binoptab
,
670 outof_input
, into_input
, op1
,
672 unsignedp
, methods
, shift_mask
))
674 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
675 outof_target
, unsignedp
, methods
))
680 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
681 is true when the effective shift value is less than BITS_PER_WORD.
682 Set SUPERWORD_OP1 to the shift count that should be used to shift
683 OUTOF_INPUT into INTO_TARGET when the condition is false. */
684 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
, op1_mode
), op1_mode
);
685 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
687 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
688 is a subword shift count. */
689 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
691 cmp2
= CONST0_RTX (op1_mode
);
697 /* Set CMP1 to OP1 - BITS_PER_WORD. */
698 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
700 cmp2
= CONST0_RTX (op1_mode
);
702 superword_op1
= cmp1
;
707 /* If we can compute the condition at compile time, pick the
708 appropriate subroutine. */
709 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
710 if (tmp
!= 0 && CONST_INT_P (tmp
))
712 if (tmp
== const0_rtx
)
713 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
714 outof_target
, into_target
,
717 return expand_subword_shift (op1_mode
, binoptab
,
718 outof_input
, into_input
, op1
,
719 outof_target
, into_target
,
720 unsignedp
, methods
, shift_mask
);
723 /* Try using conditional moves to generate straight-line code. */
724 if (HAVE_conditional_move
)
726 rtx_insn
*start
= get_last_insn ();
727 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
728 cmp_code
, cmp1
, cmp2
,
729 outof_input
, into_input
,
731 outof_target
, into_target
,
732 unsignedp
, methods
, shift_mask
))
734 delete_insns_since (start
);
737 /* As a last resort, use branches to select the correct alternative. */
738 rtx_code_label
*subword_label
= gen_label_rtx ();
739 rtx_code_label
*done_label
= gen_label_rtx ();
742 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
744 profile_probability::uninitialized ());
747 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
748 outof_target
, into_target
,
752 emit_jump_insn (targetm
.gen_jump (done_label
));
754 emit_label (subword_label
);
756 if (!expand_subword_shift (op1_mode
, binoptab
,
757 outof_input
, into_input
, op1
,
758 outof_target
, into_target
,
759 unsignedp
, methods
, shift_mask
))
762 emit_label (done_label
);
766 /* Subroutine of expand_binop. Perform a double word multiplication of
767 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
768 as the target's word_mode. This function return NULL_RTX if anything
769 goes wrong, in which case it may have already emitted instructions
770 which need to be deleted.
772 If we want to multiply two two-word values and have normal and widening
773 multiplies of single-word values, we can do this with three smaller
776 The multiplication proceeds as follows:
777 _______________________
778 [__op0_high_|__op0_low__]
779 _______________________
780 * [__op1_high_|__op1_low__]
781 _______________________________________________
782 _______________________
783 (1) [__op0_low__*__op1_low__]
784 _______________________
785 (2a) [__op0_low__*__op1_high_]
786 _______________________
787 (2b) [__op0_high_*__op1_low__]
788 _______________________
789 (3) [__op0_high_*__op1_high_]
792 This gives a 4-word result. Since we are only interested in the
793 lower 2 words, partial result (3) and the upper words of (2a) and
794 (2b) don't need to be calculated. Hence (2a) and (2b) can be
795 calculated using non-widening multiplication.
797 (1), however, needs to be calculated with an unsigned widening
798 multiplication. If this operation is not directly supported we
799 try using a signed widening multiplication and adjust the result.
800 This adjustment works as follows:
802 If both operands are positive then no adjustment is needed.
804 If the operands have different signs, for example op0_low < 0 and
805 op1_low >= 0, the instruction treats the most significant bit of
806 op0_low as a sign bit instead of a bit with significance
807 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
808 with 2**BITS_PER_WORD - op0_low, and two's complements the
809 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
812 Similarly, if both operands are negative, we need to add
813 (op0_low + op1_low) * 2**BITS_PER_WORD.
815 We use a trick to adjust quickly. We logically shift op0_low right
816 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
817 op0_high (op1_high) before it is used to calculate 2b (2a). If no
818 logical shift exists, we do an arithmetic right shift and subtract
822 expand_doubleword_mult (machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
823 bool umulp
, enum optab_methods methods
)
825 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
826 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
827 rtx wordm1
= (umulp
? NULL_RTX
828 : gen_int_shift_amount (word_mode
, BITS_PER_WORD
- 1));
829 rtx product
, adjust
, product_high
, temp
;
831 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
832 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
833 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
834 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
836 /* If we're using an unsigned multiply to directly compute the product
837 of the low-order words of the operands and perform any required
838 adjustments of the operands, we begin by trying two more multiplications
839 and then computing the appropriate sum.
841 We have checked above that the required addition is provided.
842 Full-word addition will normally always succeed, especially if
843 it is provided at all, so we don't worry about its failure. The
844 multiplication may well fail, however, so we do handle that. */
848 /* ??? This could be done with emit_store_flag where available. */
849 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
850 NULL_RTX
, 1, methods
);
852 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
853 NULL_RTX
, 0, OPTAB_DIRECT
);
856 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
857 NULL_RTX
, 0, methods
);
860 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
861 NULL_RTX
, 0, OPTAB_DIRECT
);
868 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
869 NULL_RTX
, 0, OPTAB_DIRECT
);
873 /* OP0_HIGH should now be dead. */
877 /* ??? This could be done with emit_store_flag where available. */
878 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
879 NULL_RTX
, 1, methods
);
881 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
882 NULL_RTX
, 0, OPTAB_DIRECT
);
885 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
886 NULL_RTX
, 0, methods
);
889 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
890 NULL_RTX
, 0, OPTAB_DIRECT
);
897 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
898 NULL_RTX
, 0, OPTAB_DIRECT
);
902 /* OP1_HIGH should now be dead. */
904 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
905 NULL_RTX
, 0, OPTAB_DIRECT
);
907 if (target
&& !REG_P (target
))
910 /* *_widen_optab needs to determine operand mode, make sure at least
911 one operand has non-VOID mode. */
912 if (GET_MODE (op0_low
) == VOIDmode
&& GET_MODE (op1_low
) == VOIDmode
)
913 op0_low
= force_reg (word_mode
, op0_low
);
916 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
917 target
, 1, OPTAB_DIRECT
);
919 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
920 target
, 1, OPTAB_DIRECT
);
925 product_high
= operand_subword (product
, high
, 1, mode
);
926 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
927 NULL_RTX
, 0, OPTAB_DIRECT
);
928 emit_move_insn (product_high
, adjust
);
932 /* Subroutine of expand_binop. Optimize unsigned double-word OP0 % OP1 for
933 constant OP1. If for some bit in [BITS_PER_WORD / 2, BITS_PER_WORD] range
934 (prefer higher bits) ((1w << bit) % OP1) == 1, then the modulo can be
935 computed in word-mode as ((OP0 & (bit - 1)) + ((OP0 >> bit) & (bit - 1))
936 + (OP0 >> (2 * bit))) % OP1. Whether we need to sum 2, 3 or 4 values
937 depends on the bit value, if 2, then carry from the addition needs to be
938 added too, i.e. like:
939 sum += __builtin_add_overflow (low, high, &sum)
941 Optimize signed double-word OP0 % OP1 similarly, just apply some correction
942 factor to the sum before doing unsigned remainder, in the form of
943 sum += (((signed) OP0 >> (2 * BITS_PER_WORD - 1)) & const);
944 then perform unsigned
945 remainder = sum % OP1;
947 remainder += ((signed) OP0 >> (2 * BITS_PER_WORD - 1)) & (1 - OP1); */
950 expand_doubleword_mod (machine_mode mode
, rtx op0
, rtx op1
, bool unsignedp
)
952 if (INTVAL (op1
) <= 1 || (INTVAL (op1
) & 1) == 0)
955 rtx_insn
*last
= get_last_insn ();
956 for (int bit
= BITS_PER_WORD
; bit
>= BITS_PER_WORD
/ 2; bit
--)
958 wide_int w
= wi::shifted_mask (bit
, 1, false, 2 * BITS_PER_WORD
);
959 if (wi::ne_p (wi::umod_trunc (w
, INTVAL (op1
)), 1))
961 rtx sum
= NULL_RTX
, mask
= NULL_RTX
;
962 if (bit
== BITS_PER_WORD
)
964 /* For signed modulo we need to add correction to the sum
965 and that might again overflow. */
968 if (optab_handler (uaddv4_optab
, word_mode
) == CODE_FOR_nothing
)
970 tree wtype
= lang_hooks
.types
.type_for_mode (word_mode
, 1);
971 if (wtype
== NULL_TREE
)
973 tree ctype
= build_complex_type (wtype
);
974 if (TYPE_MODE (ctype
) != GET_MODE_COMPLEX_MODE (word_mode
))
976 machine_mode cmode
= TYPE_MODE (ctype
);
977 rtx op00
= operand_subword_force (op0
, 0, mode
);
978 rtx op01
= operand_subword_force (op0
, 1, mode
);
979 rtx cres
= gen_rtx_CONCAT (cmode
, gen_reg_rtx (word_mode
),
980 gen_reg_rtx (word_mode
));
981 tree lhs
= make_tree (ctype
, cres
);
982 tree arg0
= make_tree (wtype
, op00
);
983 tree arg1
= make_tree (wtype
, op01
);
984 expand_addsub_overflow (UNKNOWN_LOCATION
, PLUS_EXPR
, lhs
, arg0
,
985 arg1
, true, true, true, false, NULL
);
986 sum
= expand_simple_binop (word_mode
, PLUS
, XEXP (cres
, 0),
987 XEXP (cres
, 1), NULL_RTX
, 1,
994 /* Code below uses GEN_INT, so we need the masks to be representable
995 in HOST_WIDE_INTs. */
996 if (bit
>= HOST_BITS_PER_WIDE_INT
)
998 /* If op0 is e.g. -1 or -2 unsigned, then the 2 additions might
999 overflow. Consider 64-bit -1ULL for word size 32, if we add
1000 0x7fffffffU + 0x7fffffffU + 3U, it wraps around to 1. */
1001 if (bit
== BITS_PER_WORD
- 1)
1004 int count
= (2 * BITS_PER_WORD
+ bit
- 1) / bit
;
1005 rtx sum_corr
= NULL_RTX
;
1009 /* For signed modulo, compute it as unsigned modulo of
1010 sum with a correction added to it if OP0 is negative,
1011 such that the result can be computed as unsigned
1012 remainder + ((OP1 >> (2 * BITS_PER_WORD - 1)) & (1 - OP1). */
1013 w
= wi::min_value (2 * BITS_PER_WORD
, SIGNED
);
1014 wide_int wmod1
= wi::umod_trunc (w
, INTVAL (op1
));
1015 wide_int wmod2
= wi::smod_trunc (w
, INTVAL (op1
));
1016 /* wmod2 == -wmod1. */
1017 wmod2
= wmod2
+ (INTVAL (op1
) - 1);
1018 if (wi::ne_p (wmod1
, wmod2
))
1020 wide_int wcorr
= wmod2
- wmod1
;
1022 wcorr
= wcorr
+ INTVAL (op1
);
1023 /* Now verify if the count sums can't overflow, and punt
1025 w
= wi::mask (bit
, false, 2 * BITS_PER_WORD
);
1026 w
= w
* (count
- 1);
1027 w
= w
+ wi::mask (2 * BITS_PER_WORD
- (count
- 1) * bit
,
1028 false, 2 * BITS_PER_WORD
);
1030 w
= wi::lrshift (w
, BITS_PER_WORD
);
1031 if (wi::ne_p (w
, 0))
1034 mask
= operand_subword_force (op0
, WORDS_BIG_ENDIAN
? 0 : 1,
1036 mask
= expand_simple_binop (word_mode
, ASHIFTRT
, mask
,
1037 GEN_INT (BITS_PER_WORD
- 1),
1038 NULL_RTX
, 0, OPTAB_DIRECT
);
1039 if (mask
== NULL_RTX
)
1041 sum_corr
= immed_wide_int_const (wcorr
, word_mode
);
1042 sum_corr
= expand_simple_binop (word_mode
, AND
, mask
,
1043 sum_corr
, NULL_RTX
, 1,
1045 if (sum_corr
== NULL_RTX
)
1050 for (int i
= 0; i
< count
; i
++)
1054 v
= expand_simple_binop (mode
, LSHIFTRT
, v
, GEN_INT (i
* bit
),
1055 NULL_RTX
, 1, OPTAB_DIRECT
);
1058 v
= lowpart_subreg (word_mode
, v
, mode
);
1062 v
= expand_simple_binop (word_mode
, AND
, v
,
1063 GEN_INT ((HOST_WIDE_INT_1U
<< bit
)
1068 if (sum
== NULL_RTX
)
1071 sum
= expand_simple_binop (word_mode
, PLUS
, sum
, v
, NULL_RTX
,
1073 if (sum
== NULL_RTX
)
1078 sum
= expand_simple_binop (word_mode
, PLUS
, sum
, sum_corr
,
1079 NULL_RTX
, 1, OPTAB_DIRECT
);
1080 if (sum
== NULL_RTX
)
1084 rtx remainder
= expand_divmod (1, TRUNC_MOD_EXPR
, word_mode
, sum
,
1085 gen_int_mode (INTVAL (op1
), word_mode
),
1086 NULL_RTX
, 1, OPTAB_DIRECT
);
1087 if (remainder
== NULL_RTX
)
1092 if (mask
== NULL_RTX
)
1094 mask
= operand_subword_force (op0
, WORDS_BIG_ENDIAN
? 0 : 1,
1096 mask
= expand_simple_binop (word_mode
, ASHIFTRT
, mask
,
1097 GEN_INT (BITS_PER_WORD
- 1),
1098 NULL_RTX
, 0, OPTAB_DIRECT
);
1099 if (mask
== NULL_RTX
)
1102 mask
= expand_simple_binop (word_mode
, AND
, mask
,
1103 gen_int_mode (1 - INTVAL (op1
),
1105 NULL_RTX
, 1, OPTAB_DIRECT
);
1106 if (mask
== NULL_RTX
)
1108 remainder
= expand_simple_binop (word_mode
, PLUS
, remainder
,
1109 mask
, NULL_RTX
, 1, OPTAB_DIRECT
);
1110 if (remainder
== NULL_RTX
)
1114 remainder
= convert_modes (mode
, word_mode
, remainder
, unsignedp
);
1115 /* Punt if we need any library calls. */
1116 for (; last
; last
= NEXT_INSN (last
))
1124 /* Similarly to the above function, but compute both quotient and remainder.
1125 Quotient can be computed from the remainder as:
1126 rem = op0 % op1; // Handled using expand_doubleword_mod
1127 quot = (op0 - rem) * inv; // inv is multiplicative inverse of op1 modulo
1128 // 2 * BITS_PER_WORD
1130 We can also handle cases where op1 is a multiple of power of two constant
1131 and constant handled by expand_doubleword_mod.
1132 op11 = 1 << __builtin_ctz (op1);
1134 rem1 = op0 % op12; // Handled using expand_doubleword_mod
1135 quot1 = (op0 - rem1) * inv; // inv is multiplicative inverse of op12 modulo
1136 // 2 * BITS_PER_WORD
1137 rem = (quot1 % op11) * op12 + rem1;
1138 quot = quot1 / op11; */
1141 expand_doubleword_divmod (machine_mode mode
, rtx op0
, rtx op1
, rtx
*rem
,
1146 /* Negative dividend should have been optimized into positive,
1147 similarly modulo by 1 and modulo by power of two is optimized
1149 if (INTVAL (op1
) <= 1 || pow2p_hwi (INTVAL (op1
)))
1152 rtx op11
= const1_rtx
;
1154 if ((INTVAL (op1
) & 1) == 0)
1156 int bit
= ctz_hwi (INTVAL (op1
));
1157 op11
= GEN_INT (HOST_WIDE_INT_1
<< bit
);
1158 op12
= GEN_INT (INTVAL (op1
) >> bit
);
1161 rtx rem1
= expand_doubleword_mod (mode
, op0
, op12
, unsignedp
);
1162 if (rem1
== NULL_RTX
)
1165 int prec
= 2 * BITS_PER_WORD
;
1166 wide_int a
= wide_int::from (INTVAL (op12
), prec
+ 1, UNSIGNED
);
1167 wide_int b
= wi::shifted_mask (prec
, 1, false, prec
+ 1);
1168 wide_int m
= wide_int::from (wi::mod_inv (a
, b
), prec
, UNSIGNED
);
1169 rtx inv
= immed_wide_int_const (m
, mode
);
1171 rtx_insn
*last
= get_last_insn ();
1172 rtx quot1
= expand_simple_binop (mode
, MINUS
, op0
, rem1
,
1173 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1174 if (quot1
== NULL_RTX
)
1177 quot1
= expand_simple_binop (mode
, MULT
, quot1
, inv
,
1178 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1179 if (quot1
== NULL_RTX
)
1182 if (op11
!= const1_rtx
)
1184 rtx rem2
= expand_divmod (1, TRUNC_MOD_EXPR
, mode
, quot1
, op11
,
1185 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1186 if (rem2
== NULL_RTX
)
1189 rem2
= expand_simple_binop (mode
, MULT
, rem2
, op12
, NULL_RTX
,
1190 unsignedp
, OPTAB_DIRECT
);
1191 if (rem2
== NULL_RTX
)
1194 rem2
= expand_simple_binop (mode
, PLUS
, rem2
, rem1
, NULL_RTX
,
1195 unsignedp
, OPTAB_DIRECT
);
1196 if (rem2
== NULL_RTX
)
1199 rtx quot2
= expand_divmod (0, TRUNC_DIV_EXPR
, mode
, quot1
, op11
,
1200 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1201 if (quot2
== NULL_RTX
)
1208 /* Punt if we need any library calls. */
1209 for (; last
; last
= NEXT_INSN (last
))
1217 /* Wrapper around expand_binop which takes an rtx code to specify
1218 the operation to perform, not an optab pointer. All other
1219 arguments are the same. */
1221 expand_simple_binop (machine_mode mode
, enum rtx_code code
, rtx op0
,
1222 rtx op1
, rtx target
, int unsignedp
,
1223 enum optab_methods methods
)
1225 optab binop
= code_to_optab (code
);
1228 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
1231 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1232 binop. Order them according to commutative_operand_precedence and, if
1233 possible, try to put TARGET or a pseudo first. */
1235 swap_commutative_operands_with_target (rtx target
, rtx op0
, rtx op1
)
1237 int op0_prec
= commutative_operand_precedence (op0
);
1238 int op1_prec
= commutative_operand_precedence (op1
);
1240 if (op0_prec
< op1_prec
)
1243 if (op0_prec
> op1_prec
)
1246 /* With equal precedence, both orders are ok, but it is better if the
1247 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1248 if (target
== 0 || REG_P (target
))
1249 return (REG_P (op1
) && !REG_P (op0
)) || target
== op1
;
1251 return rtx_equal_p (op1
, target
);
1254 /* Return true if BINOPTAB implements a shift operation. */
1257 shift_optab_p (optab binoptab
)
1259 switch (optab_to_code (binoptab
))
1275 /* Return true if BINOPTAB implements a commutative binary operation. */
1278 commutative_optab_p (optab binoptab
)
1280 return (GET_RTX_CLASS (optab_to_code (binoptab
)) == RTX_COMM_ARITH
1281 || binoptab
== smul_widen_optab
1282 || binoptab
== umul_widen_optab
1283 || binoptab
== smul_highpart_optab
1284 || binoptab
== umul_highpart_optab
);
1287 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
1288 optimizing, and if the operand is a constant that costs more than
1289 1 instruction, force the constant into a register and return that
1290 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1293 avoid_expensive_constant (machine_mode mode
, optab binoptab
,
1294 int opn
, rtx x
, bool unsignedp
)
1296 bool speed
= optimize_insn_for_speed_p ();
1298 if (mode
!= VOIDmode
1301 && (rtx_cost (x
, mode
, optab_to_code (binoptab
), opn
, speed
)
1302 > set_src_cost (x
, mode
, speed
)))
1304 if (CONST_INT_P (x
))
1306 HOST_WIDE_INT intval
= trunc_int_for_mode (INTVAL (x
), mode
);
1307 if (intval
!= INTVAL (x
))
1308 x
= GEN_INT (intval
);
1311 x
= convert_modes (mode
, VOIDmode
, x
, unsignedp
);
1312 x
= force_reg (mode
, x
);
1317 /* Helper function for expand_binop: handle the case where there
1318 is an insn ICODE that directly implements the indicated operation.
1319 Returns null if this is not possible. */
1321 expand_binop_directly (enum insn_code icode
, machine_mode mode
, optab binoptab
,
1323 rtx target
, int unsignedp
, enum optab_methods methods
,
1326 machine_mode xmode0
= insn_data
[(int) icode
].operand
[1].mode
;
1327 machine_mode xmode1
= insn_data
[(int) icode
].operand
[2].mode
;
1328 machine_mode mode0
, mode1
, tmp_mode
;
1329 class expand_operand ops
[3];
1332 rtx xop0
= op0
, xop1
= op1
;
1333 bool canonicalize_op1
= false;
1335 /* If it is a commutative operator and the modes would match
1336 if we would swap the operands, we can save the conversions. */
1337 commutative_p
= commutative_optab_p (binoptab
);
1339 && GET_MODE (xop0
) != xmode0
&& GET_MODE (xop1
) != xmode1
1340 && GET_MODE (xop0
) == xmode1
&& GET_MODE (xop1
) == xmode0
)
1341 std::swap (xop0
, xop1
);
1343 /* If we are optimizing, force expensive constants into a register. */
1344 xop0
= avoid_expensive_constant (xmode0
, binoptab
, 0, xop0
, unsignedp
);
1345 if (!shift_optab_p (binoptab
))
1346 xop1
= avoid_expensive_constant (xmode1
, binoptab
, 1, xop1
, unsignedp
);
1348 /* Shifts and rotates often use a different mode for op1 from op0;
1349 for VOIDmode constants we don't know the mode, so force it
1350 to be canonicalized using convert_modes. */
1351 canonicalize_op1
= true;
1353 /* In case the insn wants input operands in modes different from
1354 those of the actual operands, convert the operands. It would
1355 seem that we don't need to convert CONST_INTs, but we do, so
1356 that they're properly zero-extended, sign-extended or truncated
1359 mode0
= GET_MODE (xop0
) != VOIDmode
? GET_MODE (xop0
) : mode
;
1360 if (xmode0
!= VOIDmode
&& xmode0
!= mode0
)
1362 xop0
= convert_modes (xmode0
, mode0
, xop0
, unsignedp
);
1366 mode1
= ((GET_MODE (xop1
) != VOIDmode
|| canonicalize_op1
)
1367 ? GET_MODE (xop1
) : mode
);
1368 if (xmode1
!= VOIDmode
&& xmode1
!= mode1
)
1370 xop1
= convert_modes (xmode1
, mode1
, xop1
, unsignedp
);
1374 /* If operation is commutative,
1375 try to make the first operand a register.
1376 Even better, try to make it the same as the target.
1377 Also try to make the last operand a constant. */
1379 && swap_commutative_operands_with_target (target
, xop0
, xop1
))
1380 std::swap (xop0
, xop1
);
1382 /* Now, if insn's predicates don't allow our operands, put them into
1385 if (binoptab
== vec_pack_trunc_optab
1386 || binoptab
== vec_pack_usat_optab
1387 || binoptab
== vec_pack_ssat_optab
1388 || binoptab
== vec_pack_ufix_trunc_optab
1389 || binoptab
== vec_pack_sfix_trunc_optab
1390 || binoptab
== vec_packu_float_optab
1391 || binoptab
== vec_packs_float_optab
)
1393 /* The mode of the result is different then the mode of the
1395 tmp_mode
= insn_data
[(int) icode
].operand
[0].mode
;
1396 if (VECTOR_MODE_P (mode
)
1397 && maybe_ne (GET_MODE_NUNITS (tmp_mode
), 2 * GET_MODE_NUNITS (mode
)))
1399 delete_insns_since (last
);
1406 create_output_operand (&ops
[0], target
, tmp_mode
);
1407 create_input_operand (&ops
[1], xop0
, mode0
);
1408 create_input_operand (&ops
[2], xop1
, mode1
);
1409 pat
= maybe_gen_insn (icode
, 3, ops
);
1412 /* If PAT is composed of more than one insn, try to add an appropriate
1413 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1414 operand, call expand_binop again, this time without a target. */
1415 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1416 && ! add_equal_note (pat
, ops
[0].value
,
1417 optab_to_code (binoptab
),
1418 ops
[1].value
, ops
[2].value
, mode0
))
1420 delete_insns_since (last
);
1421 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1422 unsignedp
, methods
);
1426 return ops
[0].value
;
1428 delete_insns_since (last
);
1432 /* Generate code to perform an operation specified by BINOPTAB
1433 on operands OP0 and OP1, with result having machine-mode MODE.
1435 UNSIGNEDP is for the case where we have to widen the operands
1436 to perform the operation. It says to use zero-extension.
1438 If TARGET is nonzero, the value
1439 is generated there, if it is convenient to do so.
1440 In all cases an rtx is returned for the locus of the value;
1441 this may or may not be TARGET. */
1444 expand_binop (machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1445 rtx target
, int unsignedp
, enum optab_methods methods
)
1447 enum optab_methods next_methods
1448 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1449 ? OPTAB_WIDEN
: methods
);
1450 enum mode_class mclass
;
1451 enum insn_code icode
;
1452 machine_mode wider_mode
;
1453 scalar_int_mode int_mode
;
1456 rtx_insn
*entry_last
= get_last_insn ();
1459 mclass
= GET_MODE_CLASS (mode
);
1461 /* If subtracting an integer constant, convert this into an addition of
1462 the negated constant. */
1464 if (binoptab
== sub_optab
&& CONST_INT_P (op1
))
1466 op1
= negate_rtx (mode
, op1
);
1467 binoptab
= add_optab
;
1469 /* For shifts, constant invalid op1 might be expanded from different
1470 mode than MODE. As those are invalid, force them to a register
1471 to avoid further problems during expansion. */
1472 else if (CONST_INT_P (op1
)
1473 && shift_optab_p (binoptab
)
1474 && UINTVAL (op1
) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode
)))
1476 op1
= gen_int_mode (INTVAL (op1
), GET_MODE_INNER (mode
));
1477 op1
= force_reg (GET_MODE_INNER (mode
), op1
);
1480 /* Record where to delete back to if we backtrack. */
1481 last
= get_last_insn ();
1483 /* If we can do it with a three-operand insn, do so. */
1485 if (methods
!= OPTAB_MUST_WIDEN
)
1487 if (convert_optab_p (binoptab
))
1489 machine_mode from_mode
= widened_mode (mode
, op0
, op1
);
1490 icode
= find_widening_optab_handler (binoptab
, mode
, from_mode
);
1493 icode
= optab_handler (binoptab
, mode
);
1494 if (icode
!= CODE_FOR_nothing
)
1496 temp
= expand_binop_directly (icode
, mode
, binoptab
, op0
, op1
,
1497 target
, unsignedp
, methods
, last
);
1503 /* If we were trying to rotate, and that didn't work, try rotating
1504 the other direction before falling back to shifts and bitwise-or. */
1505 if (((binoptab
== rotl_optab
1506 && (icode
= optab_handler (rotr_optab
, mode
)) != CODE_FOR_nothing
)
1507 || (binoptab
== rotr_optab
1508 && (icode
= optab_handler (rotl_optab
, mode
)) != CODE_FOR_nothing
))
1509 && is_int_mode (mode
, &int_mode
))
1511 optab otheroptab
= (binoptab
== rotl_optab
? rotr_optab
: rotl_optab
);
1513 unsigned int bits
= GET_MODE_PRECISION (int_mode
);
1515 if (CONST_INT_P (op1
))
1516 newop1
= gen_int_shift_amount (int_mode
, bits
- INTVAL (op1
));
1517 else if (targetm
.shift_truncation_mask (int_mode
) == bits
- 1)
1518 newop1
= negate_rtx (GET_MODE (op1
), op1
);
1520 newop1
= expand_binop (GET_MODE (op1
), sub_optab
,
1521 gen_int_mode (bits
, GET_MODE (op1
)), op1
,
1522 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1524 temp
= expand_binop_directly (icode
, int_mode
, otheroptab
, op0
, newop1
,
1525 target
, unsignedp
, methods
, last
);
1530 /* If this is a multiply, see if we can do a widening operation that
1531 takes operands of this mode and makes a wider mode. */
1533 if (binoptab
== smul_optab
1534 && GET_MODE_2XWIDER_MODE (mode
).exists (&wider_mode
)
1535 && (convert_optab_handler ((unsignedp
1537 : smul_widen_optab
),
1538 wider_mode
, mode
) != CODE_FOR_nothing
))
1540 /* *_widen_optab needs to determine operand mode, make sure at least
1541 one operand has non-VOID mode. */
1542 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
1543 op0
= force_reg (mode
, op0
);
1544 temp
= expand_binop (wider_mode
,
1545 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1546 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1550 if (GET_MODE_CLASS (mode
) == MODE_INT
1551 && TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (temp
)))
1552 return gen_lowpart (mode
, temp
);
1554 return convert_to_mode (mode
, temp
, unsignedp
);
1558 /* If this is a vector shift by a scalar, see if we can do a vector
1559 shift by a vector. If so, broadcast the scalar into a vector. */
1560 if (mclass
== MODE_VECTOR_INT
)
1562 optab otheroptab
= unknown_optab
;
1564 if (binoptab
== ashl_optab
)
1565 otheroptab
= vashl_optab
;
1566 else if (binoptab
== ashr_optab
)
1567 otheroptab
= vashr_optab
;
1568 else if (binoptab
== lshr_optab
)
1569 otheroptab
= vlshr_optab
;
1570 else if (binoptab
== rotl_optab
)
1571 otheroptab
= vrotl_optab
;
1572 else if (binoptab
== rotr_optab
)
1573 otheroptab
= vrotr_optab
;
1576 && (icode
= optab_handler (otheroptab
, mode
)) != CODE_FOR_nothing
)
1578 /* The scalar may have been extended to be too wide. Truncate
1579 it back to the proper size to fit in the broadcast vector. */
1580 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
1581 if (!CONST_INT_P (op1
)
1582 && (GET_MODE_BITSIZE (as_a
<scalar_int_mode
> (GET_MODE (op1
)))
1583 > GET_MODE_BITSIZE (inner_mode
)))
1584 op1
= force_reg (inner_mode
,
1585 simplify_gen_unary (TRUNCATE
, inner_mode
, op1
,
1587 rtx vop1
= expand_vector_broadcast (mode
, op1
);
1590 temp
= expand_binop_directly (icode
, mode
, otheroptab
, op0
, vop1
,
1591 target
, unsignedp
, methods
, last
);
1598 /* Look for a wider mode of the same class for which we think we
1599 can open-code the operation. Check for a widening multiply at the
1600 wider mode as well. */
1602 if (CLASS_HAS_WIDER_MODES_P (mclass
)
1603 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1604 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
1606 machine_mode next_mode
;
1607 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
1608 || (binoptab
== smul_optab
1609 && GET_MODE_WIDER_MODE (wider_mode
).exists (&next_mode
)
1610 && (find_widening_optab_handler ((unsignedp
1612 : smul_widen_optab
),
1614 != CODE_FOR_nothing
)))
1616 rtx xop0
= op0
, xop1
= op1
;
1619 /* For certain integer operations, we need not actually extend
1620 the narrow operands, as long as we will truncate
1621 the results to the same narrowness. */
1623 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1624 || binoptab
== xor_optab
1625 || binoptab
== add_optab
|| binoptab
== sub_optab
1626 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1627 && mclass
== MODE_INT
)
1630 xop0
= avoid_expensive_constant (mode
, binoptab
, 0,
1632 if (binoptab
!= ashl_optab
)
1633 xop1
= avoid_expensive_constant (mode
, binoptab
, 1,
1637 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1639 /* The second operand of a shift must always be extended. */
1640 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1641 no_extend
&& binoptab
!= ashl_optab
);
1643 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1644 unsignedp
, OPTAB_DIRECT
);
1647 if (mclass
!= MODE_INT
1648 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
1651 target
= gen_reg_rtx (mode
);
1652 convert_move (target
, temp
, 0);
1656 return gen_lowpart (mode
, temp
);
1659 delete_insns_since (last
);
1663 /* If operation is commutative,
1664 try to make the first operand a register.
1665 Even better, try to make it the same as the target.
1666 Also try to make the last operand a constant. */
1667 if (commutative_optab_p (binoptab
)
1668 && swap_commutative_operands_with_target (target
, op0
, op1
))
1669 std::swap (op0
, op1
);
1671 /* These can be done a word at a time. */
1672 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1673 && is_int_mode (mode
, &int_mode
)
1674 && GET_MODE_SIZE (int_mode
) > UNITS_PER_WORD
1675 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1680 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1681 won't be accurate, so use a new target. */
1685 || reg_overlap_mentioned_p (target
, op0
)
1686 || reg_overlap_mentioned_p (target
, op1
)
1687 || !valid_multiword_target_p (target
))
1688 target
= gen_reg_rtx (int_mode
);
1692 /* Do the actual arithmetic. */
1693 machine_mode op0_mode
= GET_MODE (op0
);
1694 machine_mode op1_mode
= GET_MODE (op1
);
1695 if (op0_mode
== VOIDmode
)
1696 op0_mode
= int_mode
;
1697 if (op1_mode
== VOIDmode
)
1698 op1_mode
= int_mode
;
1699 for (i
= 0; i
< GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
; i
++)
1701 rtx target_piece
= operand_subword (target
, i
, 1, int_mode
);
1702 rtx x
= expand_binop (word_mode
, binoptab
,
1703 operand_subword_force (op0
, i
, op0_mode
),
1704 operand_subword_force (op1
, i
, op1_mode
),
1705 target_piece
, unsignedp
, next_methods
);
1710 if (target_piece
!= x
)
1711 emit_move_insn (target_piece
, x
);
1714 insns
= get_insns ();
1717 if (i
== GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
)
1724 /* Synthesize double word shifts from single word shifts. */
1725 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1726 || binoptab
== ashr_optab
)
1727 && is_int_mode (mode
, &int_mode
)
1728 && (CONST_INT_P (op1
) || optimize_insn_for_speed_p ())
1729 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
1730 && GET_MODE_PRECISION (int_mode
) == GET_MODE_BITSIZE (int_mode
)
1731 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
1732 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1733 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1735 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1736 scalar_int_mode op1_mode
;
1738 double_shift_mask
= targetm
.shift_truncation_mask (int_mode
);
1739 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1740 op1_mode
= (GET_MODE (op1
) != VOIDmode
1741 ? as_a
<scalar_int_mode
> (GET_MODE (op1
))
1744 /* Apply the truncation to constant shifts. */
1745 if (double_shift_mask
> 0 && CONST_INT_P (op1
))
1746 op1
= gen_int_mode (INTVAL (op1
) & double_shift_mask
, op1_mode
);
1748 if (op1
== CONST0_RTX (op1_mode
))
1751 /* Make sure that this is a combination that expand_doubleword_shift
1752 can handle. See the comments there for details. */
1753 if (double_shift_mask
== 0
1754 || (shift_mask
== BITS_PER_WORD
- 1
1755 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1758 rtx into_target
, outof_target
;
1759 rtx into_input
, outof_input
;
1760 int left_shift
, outof_word
;
1762 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1763 won't be accurate, so use a new target. */
1767 || reg_overlap_mentioned_p (target
, op0
)
1768 || reg_overlap_mentioned_p (target
, op1
)
1769 || !valid_multiword_target_p (target
))
1770 target
= gen_reg_rtx (int_mode
);
1774 /* OUTOF_* is the word we are shifting bits away from, and
1775 INTO_* is the word that we are shifting bits towards, thus
1776 they differ depending on the direction of the shift and
1777 WORDS_BIG_ENDIAN. */
1779 left_shift
= binoptab
== ashl_optab
;
1780 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1782 outof_target
= operand_subword (target
, outof_word
, 1, int_mode
);
1783 into_target
= operand_subword (target
, 1 - outof_word
, 1, int_mode
);
1785 outof_input
= operand_subword_force (op0
, outof_word
, int_mode
);
1786 into_input
= operand_subword_force (op0
, 1 - outof_word
, int_mode
);
1788 if (expand_doubleword_shift (op1_mode
, binoptab
,
1789 outof_input
, into_input
, op1
,
1790 outof_target
, into_target
,
1791 unsignedp
, next_methods
, shift_mask
))
1793 insns
= get_insns ();
1803 /* Synthesize double word rotates from single word shifts. */
1804 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1805 && is_int_mode (mode
, &int_mode
)
1806 && CONST_INT_P (op1
)
1807 && GET_MODE_PRECISION (int_mode
) == 2 * BITS_PER_WORD
1808 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1809 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1812 rtx into_target
, outof_target
;
1813 rtx into_input
, outof_input
;
1815 int shift_count
, left_shift
, outof_word
;
1817 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1818 won't be accurate, so use a new target. Do this also if target is not
1819 a REG, first because having a register instead may open optimization
1820 opportunities, and second because if target and op0 happen to be MEMs
1821 designating the same location, we would risk clobbering it too early
1822 in the code sequence we generate below. */
1827 || reg_overlap_mentioned_p (target
, op0
)
1828 || reg_overlap_mentioned_p (target
, op1
)
1829 || !valid_multiword_target_p (target
))
1830 target
= gen_reg_rtx (int_mode
);
1834 shift_count
= INTVAL (op1
);
1836 /* OUTOF_* is the word we are shifting bits away from, and
1837 INTO_* is the word that we are shifting bits towards, thus
1838 they differ depending on the direction of the shift and
1839 WORDS_BIG_ENDIAN. */
1841 left_shift
= (binoptab
== rotl_optab
);
1842 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1844 outof_target
= operand_subword (target
, outof_word
, 1, int_mode
);
1845 into_target
= operand_subword (target
, 1 - outof_word
, 1, int_mode
);
1847 outof_input
= operand_subword_force (op0
, outof_word
, int_mode
);
1848 into_input
= operand_subword_force (op0
, 1 - outof_word
, int_mode
);
1850 if (shift_count
== BITS_PER_WORD
)
1852 /* This is just a word swap. */
1853 emit_move_insn (outof_target
, into_input
);
1854 emit_move_insn (into_target
, outof_input
);
1859 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1860 HOST_WIDE_INT first_shift_count
, second_shift_count
;
1861 optab reverse_unsigned_shift
, unsigned_shift
;
1863 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1864 ? lshr_optab
: ashl_optab
);
1866 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1867 ? ashl_optab
: lshr_optab
);
1869 if (shift_count
> BITS_PER_WORD
)
1871 first_shift_count
= shift_count
- BITS_PER_WORD
;
1872 second_shift_count
= 2 * BITS_PER_WORD
- shift_count
;
1876 first_shift_count
= BITS_PER_WORD
- shift_count
;
1877 second_shift_count
= shift_count
;
1879 rtx first_shift_count_rtx
1880 = gen_int_shift_amount (word_mode
, first_shift_count
);
1881 rtx second_shift_count_rtx
1882 = gen_int_shift_amount (word_mode
, second_shift_count
);
1884 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1885 outof_input
, first_shift_count_rtx
,
1886 NULL_RTX
, unsignedp
, next_methods
);
1887 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1888 into_input
, second_shift_count_rtx
,
1889 NULL_RTX
, unsignedp
, next_methods
);
1891 if (into_temp1
!= 0 && into_temp2
!= 0)
1892 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1893 into_target
, unsignedp
, next_methods
);
1897 if (inter
!= 0 && inter
!= into_target
)
1898 emit_move_insn (into_target
, inter
);
1900 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1901 into_input
, first_shift_count_rtx
,
1902 NULL_RTX
, unsignedp
, next_methods
);
1903 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1904 outof_input
, second_shift_count_rtx
,
1905 NULL_RTX
, unsignedp
, next_methods
);
1907 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1908 inter
= expand_binop (word_mode
, ior_optab
,
1909 outof_temp1
, outof_temp2
,
1910 outof_target
, unsignedp
, next_methods
);
1912 if (inter
!= 0 && inter
!= outof_target
)
1913 emit_move_insn (outof_target
, inter
);
1916 insns
= get_insns ();
1926 /* These can be done a word at a time by propagating carries. */
1927 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1928 && is_int_mode (mode
, &int_mode
)
1929 && GET_MODE_SIZE (int_mode
) >= 2 * UNITS_PER_WORD
1930 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1933 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1934 const unsigned int nwords
= GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
;
1935 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1936 rtx xop0
, xop1
, xtarget
;
1938 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1939 value is one of those, use it. Otherwise, use 1 since it is the
1940 one easiest to get. */
1941 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1942 int normalizep
= STORE_FLAG_VALUE
;
1947 /* Prepare the operands. */
1948 xop0
= force_reg (int_mode
, op0
);
1949 xop1
= force_reg (int_mode
, op1
);
1951 xtarget
= gen_reg_rtx (int_mode
);
1953 if (target
== 0 || !REG_P (target
) || !valid_multiword_target_p (target
))
1956 /* Indicate for flow that the entire target reg is being set. */
1958 emit_clobber (xtarget
);
1960 /* Do the actual arithmetic. */
1961 for (i
= 0; i
< nwords
; i
++)
1963 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1964 rtx target_piece
= operand_subword (xtarget
, index
, 1, int_mode
);
1965 rtx op0_piece
= operand_subword_force (xop0
, index
, int_mode
);
1966 rtx op1_piece
= operand_subword_force (xop1
, index
, int_mode
);
1969 /* Main add/subtract of the input operands. */
1970 x
= expand_binop (word_mode
, binoptab
,
1971 op0_piece
, op1_piece
,
1972 target_piece
, unsignedp
, next_methods
);
1978 /* Store carry from main add/subtract. */
1979 carry_out
= gen_reg_rtx (word_mode
);
1980 carry_out
= emit_store_flag_force (carry_out
,
1981 (binoptab
== add_optab
1984 word_mode
, 1, normalizep
);
1991 /* Add/subtract previous carry to main result. */
1992 newx
= expand_binop (word_mode
,
1993 normalizep
== 1 ? binoptab
: otheroptab
,
1995 NULL_RTX
, 1, next_methods
);
1999 /* Get out carry from adding/subtracting carry in. */
2000 rtx carry_tmp
= gen_reg_rtx (word_mode
);
2001 carry_tmp
= emit_store_flag_force (carry_tmp
,
2002 (binoptab
== add_optab
2005 word_mode
, 1, normalizep
);
2007 /* Logical-ior the two poss. carry together. */
2008 carry_out
= expand_binop (word_mode
, ior_optab
,
2009 carry_out
, carry_tmp
,
2010 carry_out
, 0, next_methods
);
2014 emit_move_insn (target_piece
, newx
);
2018 if (x
!= target_piece
)
2019 emit_move_insn (target_piece
, x
);
2022 carry_in
= carry_out
;
2025 if (i
== GET_MODE_BITSIZE (int_mode
) / (unsigned) BITS_PER_WORD
)
2027 if (optab_handler (mov_optab
, int_mode
) != CODE_FOR_nothing
2028 || ! rtx_equal_p (target
, xtarget
))
2030 rtx_insn
*temp
= emit_move_insn (target
, xtarget
);
2032 set_dst_reg_note (temp
, REG_EQUAL
,
2033 gen_rtx_fmt_ee (optab_to_code (binoptab
),
2034 int_mode
, copy_rtx (xop0
),
2045 delete_insns_since (last
);
2048 /* Attempt to synthesize double word multiplies using a sequence of word
2049 mode multiplications. We first attempt to generate a sequence using a
2050 more efficient unsigned widening multiply, and if that fails we then
2051 try using a signed widening multiply. */
2053 if (binoptab
== smul_optab
2054 && is_int_mode (mode
, &int_mode
)
2055 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
2056 && optab_handler (smul_optab
, word_mode
) != CODE_FOR_nothing
2057 && optab_handler (add_optab
, word_mode
) != CODE_FOR_nothing
)
2059 rtx product
= NULL_RTX
;
2060 if (convert_optab_handler (umul_widen_optab
, int_mode
, word_mode
)
2061 != CODE_FOR_nothing
)
2063 product
= expand_doubleword_mult (int_mode
, op0
, op1
, target
,
2066 delete_insns_since (last
);
2069 if (product
== NULL_RTX
2070 && (convert_optab_handler (smul_widen_optab
, int_mode
, word_mode
)
2071 != CODE_FOR_nothing
))
2073 product
= expand_doubleword_mult (int_mode
, op0
, op1
, target
,
2076 delete_insns_since (last
);
2079 if (product
!= NULL_RTX
)
2081 if (optab_handler (mov_optab
, int_mode
) != CODE_FOR_nothing
)
2083 rtx_insn
*move
= emit_move_insn (target
? target
: product
,
2085 set_dst_reg_note (move
,
2087 gen_rtx_fmt_ee (MULT
, int_mode
,
2090 target
? target
: product
);
2096 /* Attempt to synthetize double word modulo by constant divisor. */
2097 if ((binoptab
== umod_optab
2098 || binoptab
== smod_optab
2099 || binoptab
== udiv_optab
2100 || binoptab
== sdiv_optab
)
2102 && CONST_INT_P (op1
)
2103 && is_int_mode (mode
, &int_mode
)
2104 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
2105 && optab_handler ((binoptab
== umod_optab
|| binoptab
== udiv_optab
)
2106 ? udivmod_optab
: sdivmod_optab
,
2107 int_mode
) == CODE_FOR_nothing
2108 && optab_handler (and_optab
, word_mode
) != CODE_FOR_nothing
2109 && optab_handler (add_optab
, word_mode
) != CODE_FOR_nothing
2110 && optimize_insn_for_speed_p ())
2113 if ((binoptab
== umod_optab
|| binoptab
== smod_optab
)
2114 && (INTVAL (op1
) & 1) == 0)
2115 res
= expand_doubleword_mod (int_mode
, op0
, op1
,
2116 binoptab
== umod_optab
);
2119 rtx quot
= expand_doubleword_divmod (int_mode
, op0
, op1
, &res
,
2120 binoptab
== umod_optab
2121 || binoptab
== udiv_optab
);
2122 if (quot
== NULL_RTX
)
2124 else if (binoptab
== udiv_optab
|| binoptab
== sdiv_optab
)
2127 if (res
!= NULL_RTX
)
2129 if (optab_handler (mov_optab
, int_mode
) != CODE_FOR_nothing
)
2131 rtx_insn
*move
= emit_move_insn (target
? target
: res
,
2133 set_dst_reg_note (move
, REG_EQUAL
,
2134 gen_rtx_fmt_ee (optab_to_code (binoptab
),
2135 int_mode
, copy_rtx (op0
), op1
),
2136 target
? target
: res
);
2141 delete_insns_since (last
);
2144 /* It can't be open-coded in this mode.
2145 Use a library call if one is available and caller says that's ok. */
2147 libfunc
= optab_libfunc (binoptab
, mode
);
2149 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
2153 machine_mode op1_mode
= mode
;
2158 if (shift_optab_p (binoptab
))
2160 op1_mode
= targetm
.libgcc_shift_count_mode ();
2161 /* Specify unsigned here,
2162 since negative shift counts are meaningless. */
2163 op1x
= convert_to_mode (op1_mode
, op1
, 1);
2166 if (GET_MODE (op0
) != VOIDmode
2167 && GET_MODE (op0
) != mode
)
2168 op0
= convert_to_mode (mode
, op0
, unsignedp
);
2170 /* Pass 1 for NO_QUEUE so we don't lose any increments
2171 if the libcall is cse'd or moved. */
2172 value
= emit_library_call_value (libfunc
,
2173 NULL_RTX
, LCT_CONST
, mode
,
2174 op0
, mode
, op1x
, op1_mode
);
2176 insns
= get_insns ();
2179 bool trapv
= trapv_binoptab_p (binoptab
);
2180 target
= gen_reg_rtx (mode
);
2181 emit_libcall_block_1 (insns
, target
, value
,
2183 : gen_rtx_fmt_ee (optab_to_code (binoptab
),
2184 mode
, op0
, op1
), trapv
);
2189 delete_insns_since (last
);
2191 /* It can't be done in this mode. Can we do it in a wider mode? */
2193 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
2194 || methods
== OPTAB_MUST_WIDEN
))
2196 /* Caller says, don't even try. */
2197 delete_insns_since (entry_last
);
2201 /* Compute the value of METHODS to pass to recursive calls.
2202 Don't allow widening to be tried recursively. */
2204 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
2206 /* Look for a wider mode of the same class for which it appears we can do
2209 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2211 /* This code doesn't make sense for conversion optabs, since we
2212 wouldn't then want to extend the operands to be the same size
2214 gcc_assert (!convert_optab_p (binoptab
));
2215 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2217 if (optab_handler (binoptab
, wider_mode
)
2218 || (methods
== OPTAB_LIB
2219 && optab_libfunc (binoptab
, wider_mode
)))
2221 rtx xop0
= op0
, xop1
= op1
;
2224 /* For certain integer operations, we need not actually extend
2225 the narrow operands, as long as we will truncate
2226 the results to the same narrowness. */
2228 if ((binoptab
== ior_optab
|| binoptab
== and_optab
2229 || binoptab
== xor_optab
2230 || binoptab
== add_optab
|| binoptab
== sub_optab
2231 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
2232 && mclass
== MODE_INT
)
2235 xop0
= widen_operand (xop0
, wider_mode
, mode
,
2236 unsignedp
, no_extend
);
2238 /* The second operand of a shift must always be extended. */
2239 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
2240 no_extend
&& binoptab
!= ashl_optab
);
2242 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
2243 unsignedp
, methods
);
2246 if (mclass
!= MODE_INT
2247 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2250 target
= gen_reg_rtx (mode
);
2251 convert_move (target
, temp
, 0);
2255 return gen_lowpart (mode
, temp
);
2258 delete_insns_since (last
);
2263 delete_insns_since (entry_last
);
2267 /* Expand a binary operator which has both signed and unsigned forms.
2268 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2271 If we widen unsigned operands, we may use a signed wider operation instead
2272 of an unsigned wider operation, since the result would be the same. */
2275 sign_expand_binop (machine_mode mode
, optab uoptab
, optab soptab
,
2276 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
2277 enum optab_methods methods
)
2280 optab direct_optab
= unsignedp
? uoptab
: soptab
;
2283 /* Do it without widening, if possible. */
2284 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2285 unsignedp
, OPTAB_DIRECT
);
2286 if (temp
|| methods
== OPTAB_DIRECT
)
2289 /* Try widening to a signed int. Disable any direct use of any
2290 signed insn in the current mode. */
2291 save_enable
= swap_optab_enable (soptab
, mode
, false);
2293 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
2294 unsignedp
, OPTAB_WIDEN
);
2296 /* For unsigned operands, try widening to an unsigned int. */
2297 if (!temp
&& unsignedp
)
2298 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2299 unsignedp
, OPTAB_WIDEN
);
2300 if (temp
|| methods
== OPTAB_WIDEN
)
2303 /* Use the right width libcall if that exists. */
2304 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2305 unsignedp
, OPTAB_LIB
);
2306 if (temp
|| methods
== OPTAB_LIB
)
2309 /* Must widen and use a libcall, use either signed or unsigned. */
2310 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
2311 unsignedp
, methods
);
2312 if (!temp
&& unsignedp
)
2313 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2314 unsignedp
, methods
);
2317 /* Undo the fiddling above. */
2319 swap_optab_enable (soptab
, mode
, true);
2323 /* Generate code to perform an operation specified by UNOPPTAB
2324 on operand OP0, with two results to TARG0 and TARG1.
2325 We assume that the order of the operands for the instruction
2326 is TARG0, TARG1, OP0.
2328 Either TARG0 or TARG1 may be zero, but what that means is that
2329 the result is not actually wanted. We will generate it into
2330 a dummy pseudo-reg and discard it. They may not both be zero.
2332 Returns 1 if this operation can be performed; 0 if not. */
2335 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
2338 machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2339 enum mode_class mclass
;
2340 machine_mode wider_mode
;
2341 rtx_insn
*entry_last
= get_last_insn ();
2344 mclass
= GET_MODE_CLASS (mode
);
2347 targ0
= gen_reg_rtx (mode
);
2349 targ1
= gen_reg_rtx (mode
);
2351 /* Record where to go back to if we fail. */
2352 last
= get_last_insn ();
2354 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
2356 class expand_operand ops
[3];
2357 enum insn_code icode
= optab_handler (unoptab
, mode
);
2359 create_fixed_operand (&ops
[0], targ0
);
2360 create_fixed_operand (&ops
[1], targ1
);
2361 create_convert_operand_from (&ops
[2], op0
, mode
, unsignedp
);
2362 if (maybe_expand_insn (icode
, 3, ops
))
2366 /* It can't be done in this mode. Can we do it in a wider mode? */
2368 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2370 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2372 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2374 rtx t0
= gen_reg_rtx (wider_mode
);
2375 rtx t1
= gen_reg_rtx (wider_mode
);
2376 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2378 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
2380 convert_move (targ0
, t0
, unsignedp
);
2381 convert_move (targ1
, t1
, unsignedp
);
2385 delete_insns_since (last
);
2390 delete_insns_since (entry_last
);
2394 /* Generate code to perform an operation specified by BINOPTAB
2395 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2396 We assume that the order of the operands for the instruction
2397 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2398 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2400 Either TARG0 or TARG1 may be zero, but what that means is that
2401 the result is not actually wanted. We will generate it into
2402 a dummy pseudo-reg and discard it. They may not both be zero.
2404 Returns 1 if this operation can be performed; 0 if not. */
2407 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
2410 machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2411 enum mode_class mclass
;
2412 machine_mode wider_mode
;
2413 rtx_insn
*entry_last
= get_last_insn ();
2416 mclass
= GET_MODE_CLASS (mode
);
2419 targ0
= gen_reg_rtx (mode
);
2421 targ1
= gen_reg_rtx (mode
);
2423 /* Record where to go back to if we fail. */
2424 last
= get_last_insn ();
2426 if (optab_handler (binoptab
, mode
) != CODE_FOR_nothing
)
2428 class expand_operand ops
[4];
2429 enum insn_code icode
= optab_handler (binoptab
, mode
);
2430 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2431 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2432 rtx xop0
= op0
, xop1
= op1
;
2434 /* If we are optimizing, force expensive constants into a register. */
2435 xop0
= avoid_expensive_constant (mode0
, binoptab
, 0, xop0
, unsignedp
);
2436 xop1
= avoid_expensive_constant (mode1
, binoptab
, 1, xop1
, unsignedp
);
2438 create_fixed_operand (&ops
[0], targ0
);
2439 create_convert_operand_from (&ops
[1], xop0
, mode
, unsignedp
);
2440 create_convert_operand_from (&ops
[2], xop1
, mode
, unsignedp
);
2441 create_fixed_operand (&ops
[3], targ1
);
2442 if (maybe_expand_insn (icode
, 4, ops
))
2444 delete_insns_since (last
);
2447 /* It can't be done in this mode. Can we do it in a wider mode? */
2449 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2451 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2453 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
)
2455 rtx t0
= gen_reg_rtx (wider_mode
);
2456 rtx t1
= gen_reg_rtx (wider_mode
);
2457 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2458 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2460 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2463 convert_move (targ0
, t0
, unsignedp
);
2464 convert_move (targ1
, t1
, unsignedp
);
2468 delete_insns_since (last
);
2473 delete_insns_since (entry_last
);
2477 /* Expand the two-valued library call indicated by BINOPTAB, but
2478 preserve only one of the values. If TARG0 is non-NULL, the first
2479 value is placed into TARG0; otherwise the second value is placed
2480 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2481 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2482 This routine assumes that the value returned by the library call is
2483 as if the return value was of an integral mode twice as wide as the
2484 mode of OP0. Returns 1 if the call was successful. */
2487 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2488 rtx targ0
, rtx targ1
, enum rtx_code code
)
2491 machine_mode libval_mode
;
2496 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2497 gcc_assert (!targ0
!= !targ1
);
2499 mode
= GET_MODE (op0
);
2500 libfunc
= optab_libfunc (binoptab
, mode
);
2504 /* The value returned by the library function will have twice as
2505 many bits as the nominal MODE. */
2506 libval_mode
= smallest_int_mode_for_size (2 * GET_MODE_BITSIZE (mode
));
2508 libval
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
2512 /* Get the part of VAL containing the value that we want. */
2513 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2514 targ0
? 0 : GET_MODE_SIZE (mode
));
2515 insns
= get_insns ();
2517 /* Move the into the desired location. */
2518 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2519 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2525 /* Wrapper around expand_unop which takes an rtx code to specify
2526 the operation to perform, not an optab pointer. All other
2527 arguments are the same. */
2529 expand_simple_unop (machine_mode mode
, enum rtx_code code
, rtx op0
,
2530 rtx target
, int unsignedp
)
2532 optab unop
= code_to_optab (code
);
2535 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2541 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2543 A similar operation can be used for clrsb. UNOPTAB says which operation
2544 we are trying to expand. */
2546 widen_leading (scalar_int_mode mode
, rtx op0
, rtx target
, optab unoptab
)
2548 opt_scalar_int_mode wider_mode_iter
;
2549 FOR_EACH_WIDER_MODE (wider_mode_iter
, mode
)
2551 scalar_int_mode wider_mode
= wider_mode_iter
.require ();
2552 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2557 last
= get_last_insn ();
2560 target
= gen_reg_rtx (mode
);
2561 xop0
= widen_operand (op0
, wider_mode
, mode
,
2562 unoptab
!= clrsb_optab
, false);
2563 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2564 unoptab
!= clrsb_optab
);
2567 (wider_mode
, sub_optab
, temp
,
2568 gen_int_mode (GET_MODE_PRECISION (wider_mode
)
2569 - GET_MODE_PRECISION (mode
),
2571 target
, true, OPTAB_DIRECT
);
2573 delete_insns_since (last
);
2581 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2582 quantities, choosing which based on whether the high word is nonzero. */
2584 expand_doubleword_clz (scalar_int_mode mode
, rtx op0
, rtx target
)
2586 rtx xop0
= force_reg (mode
, op0
);
2587 rtx subhi
= gen_highpart (word_mode
, xop0
);
2588 rtx sublo
= gen_lowpart (word_mode
, xop0
);
2589 rtx_code_label
*hi0_label
= gen_label_rtx ();
2590 rtx_code_label
*after_label
= gen_label_rtx ();
2594 /* If we were not given a target, use a word_mode register, not a
2595 'mode' register. The result will fit, and nobody is expecting
2596 anything bigger (the return type of __builtin_clz* is int). */
2598 target
= gen_reg_rtx (word_mode
);
2600 /* In any case, write to a word_mode scratch in both branches of the
2601 conditional, so we can ensure there is a single move insn setting
2602 'target' to tag a REG_EQUAL note on. */
2603 result
= gen_reg_rtx (word_mode
);
2607 /* If the high word is not equal to zero,
2608 then clz of the full value is clz of the high word. */
2609 emit_cmp_and_jump_insns (subhi
, CONST0_RTX (word_mode
), EQ
, 0,
2610 word_mode
, true, hi0_label
);
2612 temp
= expand_unop_direct (word_mode
, clz_optab
, subhi
, result
, true);
2617 convert_move (result
, temp
, true);
2619 emit_jump_insn (targetm
.gen_jump (after_label
));
2622 /* Else clz of the full value is clz of the low word plus the number
2623 of bits in the high word. */
2624 emit_label (hi0_label
);
2626 temp
= expand_unop_direct (word_mode
, clz_optab
, sublo
, 0, true);
2629 temp
= expand_binop (word_mode
, add_optab
, temp
,
2630 gen_int_mode (GET_MODE_BITSIZE (word_mode
), word_mode
),
2631 result
, true, OPTAB_DIRECT
);
2635 convert_move (result
, temp
, true);
2637 emit_label (after_label
);
2638 convert_move (target
, result
, true);
2643 add_equal_note (seq
, target
, CLZ
, xop0
, NULL_RTX
, mode
);
2652 /* Try calculating popcount of a double-word quantity as two popcount's of
2653 word-sized quantities and summing up the results. */
2655 expand_doubleword_popcount (scalar_int_mode mode
, rtx op0
, rtx target
)
2662 t0
= expand_unop_direct (word_mode
, popcount_optab
,
2663 operand_subword_force (op0
, 0, mode
), NULL_RTX
,
2665 t1
= expand_unop_direct (word_mode
, popcount_optab
,
2666 operand_subword_force (op0
, 1, mode
), NULL_RTX
,
2674 /* If we were not given a target, use a word_mode register, not a
2675 'mode' register. The result will fit, and nobody is expecting
2676 anything bigger (the return type of __builtin_popcount* is int). */
2678 target
= gen_reg_rtx (word_mode
);
2680 t
= expand_binop (word_mode
, add_optab
, t0
, t1
, target
, 0, OPTAB_DIRECT
);
2685 add_equal_note (seq
, t
, POPCOUNT
, op0
, NULL_RTX
, mode
);
2693 (parity:narrow (low (x) ^ high (x))) */
2695 expand_doubleword_parity (scalar_int_mode mode
, rtx op0
, rtx target
)
2697 rtx t
= expand_binop (word_mode
, xor_optab
,
2698 operand_subword_force (op0
, 0, mode
),
2699 operand_subword_force (op0
, 1, mode
),
2700 NULL_RTX
, 0, OPTAB_DIRECT
);
2701 return expand_unop (word_mode
, parity_optab
, t
, target
, true);
2707 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2709 widen_bswap (scalar_int_mode mode
, rtx op0
, rtx target
)
2713 opt_scalar_int_mode wider_mode_iter
;
2715 FOR_EACH_WIDER_MODE (wider_mode_iter
, mode
)
2716 if (optab_handler (bswap_optab
, wider_mode_iter
.require ())
2717 != CODE_FOR_nothing
)
2720 if (!wider_mode_iter
.exists ())
2723 scalar_int_mode wider_mode
= wider_mode_iter
.require ();
2724 last
= get_last_insn ();
2726 x
= widen_operand (op0
, wider_mode
, mode
, true, true);
2727 x
= expand_unop (wider_mode
, bswap_optab
, x
, NULL_RTX
, true);
2729 gcc_assert (GET_MODE_PRECISION (wider_mode
) == GET_MODE_BITSIZE (wider_mode
)
2730 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
));
2732 x
= expand_shift (RSHIFT_EXPR
, wider_mode
, x
,
2733 GET_MODE_BITSIZE (wider_mode
)
2734 - GET_MODE_BITSIZE (mode
),
2740 target
= gen_reg_rtx (mode
);
2741 emit_move_insn (target
, gen_lowpart (mode
, x
));
2744 delete_insns_since (last
);
2749 /* Try calculating bswap as two bswaps of two word-sized operands. */
2752 expand_doubleword_bswap (machine_mode mode
, rtx op
, rtx target
)
2756 t1
= expand_unop (word_mode
, bswap_optab
,
2757 operand_subword_force (op
, 0, mode
), NULL_RTX
, true);
2758 t0
= expand_unop (word_mode
, bswap_optab
,
2759 operand_subword_force (op
, 1, mode
), NULL_RTX
, true);
2761 if (target
== 0 || !valid_multiword_target_p (target
))
2762 target
= gen_reg_rtx (mode
);
2764 emit_clobber (target
);
2765 emit_move_insn (operand_subword (target
, 0, 1, mode
), t0
);
2766 emit_move_insn (operand_subword (target
, 1, 1, mode
), t1
);
2771 /* Try calculating (parity x) as (and (popcount x) 1), where
2772 popcount can also be done in a wider mode. */
2774 expand_parity (scalar_int_mode mode
, rtx op0
, rtx target
)
2776 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2777 opt_scalar_int_mode wider_mode_iter
;
2778 FOR_EACH_MODE_FROM (wider_mode_iter
, mode
)
2780 scalar_int_mode wider_mode
= wider_mode_iter
.require ();
2781 if (optab_handler (popcount_optab
, wider_mode
) != CODE_FOR_nothing
)
2786 last
= get_last_insn ();
2788 if (target
== 0 || GET_MODE (target
) != wider_mode
)
2789 target
= gen_reg_rtx (wider_mode
);
2791 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2792 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2795 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2796 target
, true, OPTAB_DIRECT
);
2800 if (mclass
!= MODE_INT
2801 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2802 return convert_to_mode (mode
, temp
, 0);
2804 return gen_lowpart (mode
, temp
);
2807 delete_insns_since (last
);
2813 /* Try calculating ctz(x) as K - clz(x & -x) ,
2814 where K is GET_MODE_PRECISION(mode) - 1.
2816 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2817 don't have to worry about what the hardware does in that case. (If
2818 the clz instruction produces the usual value at 0, which is K, the
2819 result of this code sequence will be -1; expand_ffs, below, relies
2820 on this. It might be nice to have it be K instead, for consistency
2821 with the (very few) processors that provide a ctz with a defined
2822 value, but that would take one more instruction, and it would be
2823 less convenient for expand_ffs anyway. */
2826 expand_ctz (scalar_int_mode mode
, rtx op0
, rtx target
)
2831 if (optab_handler (clz_optab
, mode
) == CODE_FOR_nothing
)
2836 temp
= expand_unop_direct (mode
, neg_optab
, op0
, NULL_RTX
, true);
2838 temp
= expand_binop (mode
, and_optab
, op0
, temp
, NULL_RTX
,
2839 true, OPTAB_DIRECT
);
2841 temp
= expand_unop_direct (mode
, clz_optab
, temp
, NULL_RTX
, true);
2843 temp
= expand_binop (mode
, sub_optab
,
2844 gen_int_mode (GET_MODE_PRECISION (mode
) - 1, mode
),
2846 true, OPTAB_DIRECT
);
2856 add_equal_note (seq
, temp
, CTZ
, op0
, NULL_RTX
, mode
);
2862 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2863 else with the sequence used by expand_clz.
2865 The ffs builtin promises to return zero for a zero value and ctz/clz
2866 may have an undefined value in that case. If they do not give us a
2867 convenient value, we have to generate a test and branch. */
2869 expand_ffs (scalar_int_mode mode
, rtx op0
, rtx target
)
2871 HOST_WIDE_INT val
= 0;
2872 bool defined_at_zero
= false;
2876 if (optab_handler (ctz_optab
, mode
) != CODE_FOR_nothing
)
2880 temp
= expand_unop_direct (mode
, ctz_optab
, op0
, 0, true);
2884 defined_at_zero
= (CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2);
2886 else if (optab_handler (clz_optab
, mode
) != CODE_FOR_nothing
)
2889 temp
= expand_ctz (mode
, op0
, 0);
2893 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2)
2895 defined_at_zero
= true;
2896 val
= (GET_MODE_PRECISION (mode
) - 1) - val
;
2902 if (defined_at_zero
&& val
== -1)
2903 /* No correction needed at zero. */;
2906 /* We don't try to do anything clever with the situation found
2907 on some processors (eg Alpha) where ctz(0:mode) ==
2908 bitsize(mode). If someone can think of a way to send N to -1
2909 and leave alone all values in the range 0..N-1 (where N is a
2910 power of two), cheaper than this test-and-branch, please add it.
2912 The test-and-branch is done after the operation itself, in case
2913 the operation sets condition codes that can be recycled for this.
2914 (This is true on i386, for instance.) */
2916 rtx_code_label
*nonzero_label
= gen_label_rtx ();
2917 emit_cmp_and_jump_insns (op0
, CONST0_RTX (mode
), NE
, 0,
2918 mode
, true, nonzero_label
);
2920 convert_move (temp
, GEN_INT (-1), false);
2921 emit_label (nonzero_label
);
2924 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2925 to produce a value in the range 0..bitsize. */
2926 temp
= expand_binop (mode
, add_optab
, temp
, gen_int_mode (1, mode
),
2927 target
, false, OPTAB_DIRECT
);
2934 add_equal_note (seq
, temp
, FFS
, op0
, NULL_RTX
, mode
);
2943 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2944 conditions, VAL may already be a SUBREG against which we cannot generate
2945 a further SUBREG. In this case, we expect forcing the value into a
2946 register will work around the situation. */
2949 lowpart_subreg_maybe_copy (machine_mode omode
, rtx val
,
2953 ret
= lowpart_subreg (omode
, val
, imode
);
2956 val
= force_reg (imode
, val
);
2957 ret
= lowpart_subreg (omode
, val
, imode
);
2958 gcc_assert (ret
!= NULL
);
2963 /* Expand a floating point absolute value or negation operation via a
2964 logical operation on the sign bit. */
2967 expand_absneg_bit (enum rtx_code code
, scalar_float_mode mode
,
2968 rtx op0
, rtx target
)
2970 const struct real_format
*fmt
;
2971 int bitpos
, word
, nwords
, i
;
2972 scalar_int_mode imode
;
2976 /* The format has to have a simple sign bit. */
2977 fmt
= REAL_MODE_FORMAT (mode
);
2981 bitpos
= fmt
->signbit_rw
;
2985 /* Don't create negative zeros if the format doesn't support them. */
2986 if (code
== NEG
&& !fmt
->has_signed_zero
)
2989 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2991 if (!int_mode_for_mode (mode
).exists (&imode
))
3000 if (FLOAT_WORDS_BIG_ENDIAN
)
3001 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3003 word
= bitpos
/ BITS_PER_WORD
;
3004 bitpos
= bitpos
% BITS_PER_WORD
;
3005 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3008 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3014 || reg_overlap_mentioned_p (target
, op0
)
3015 || (nwords
> 1 && !valid_multiword_target_p (target
)))
3016 target
= gen_reg_rtx (mode
);
3022 for (i
= 0; i
< nwords
; ++i
)
3024 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3025 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3029 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
3031 immed_wide_int_const (mask
, imode
),
3032 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3033 if (temp
!= targ_piece
)
3034 emit_move_insn (targ_piece
, temp
);
3037 emit_move_insn (targ_piece
, op0_piece
);
3040 insns
= get_insns ();
3047 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
3048 gen_lowpart (imode
, op0
),
3049 immed_wide_int_const (mask
, imode
),
3050 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3051 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3053 set_dst_reg_note (get_last_insn (), REG_EQUAL
,
3054 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)),
3061 /* As expand_unop, but will fail rather than attempt the operation in a
3062 different mode or with a libcall. */
3064 expand_unop_direct (machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
3067 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
3069 class expand_operand ops
[2];
3070 enum insn_code icode
= optab_handler (unoptab
, mode
);
3071 rtx_insn
*last
= get_last_insn ();
3074 create_output_operand (&ops
[0], target
, mode
);
3075 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
3076 pat
= maybe_gen_insn (icode
, 2, ops
);
3079 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
3080 && ! add_equal_note (pat
, ops
[0].value
,
3081 optab_to_code (unoptab
),
3082 ops
[1].value
, NULL_RTX
, mode
))
3084 delete_insns_since (last
);
3085 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
3090 return ops
[0].value
;
3096 /* Generate code to perform an operation specified by UNOPTAB
3097 on operand OP0, with result having machine-mode MODE.
3099 UNSIGNEDP is for the case where we have to widen the operands
3100 to perform the operation. It says to use zero-extension.
3102 If TARGET is nonzero, the value
3103 is generated there, if it is convenient to do so.
3104 In all cases an rtx is returned for the locus of the value;
3105 this may or may not be TARGET. */
3108 expand_unop (machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
3111 enum mode_class mclass
= GET_MODE_CLASS (mode
);
3112 machine_mode wider_mode
;
3113 scalar_int_mode int_mode
;
3114 scalar_float_mode float_mode
;
3118 temp
= expand_unop_direct (mode
, unoptab
, op0
, target
, unsignedp
);
3122 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3124 /* Widening (or narrowing) clz needs special treatment. */
3125 if (unoptab
== clz_optab
)
3127 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
3129 temp
= widen_leading (int_mode
, op0
, target
, unoptab
);
3133 if (GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
3134 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
3136 temp
= expand_doubleword_clz (int_mode
, op0
, target
);
3145 if (unoptab
== clrsb_optab
)
3147 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
3149 temp
= widen_leading (int_mode
, op0
, target
, unoptab
);
3156 if (unoptab
== popcount_optab
3157 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3158 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
3159 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
3160 && optimize_insn_for_speed_p ())
3162 temp
= expand_doubleword_popcount (int_mode
, op0
, target
);
3167 if (unoptab
== parity_optab
3168 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3169 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
3170 && (optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
3171 || optab_handler (popcount_optab
, word_mode
) != CODE_FOR_nothing
)
3172 && optimize_insn_for_speed_p ())
3174 temp
= expand_doubleword_parity (int_mode
, op0
, target
);
3179 /* Widening (or narrowing) bswap needs special treatment. */
3180 if (unoptab
== bswap_optab
)
3182 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
3183 or ROTATERT. First try these directly; if this fails, then try the
3184 obvious pair of shifts with allowed widening, as this will probably
3185 be always more efficient than the other fallback methods. */
3191 if (optab_handler (rotl_optab
, mode
) != CODE_FOR_nothing
)
3193 temp
= expand_binop (mode
, rotl_optab
, op0
,
3194 gen_int_shift_amount (mode
, 8),
3195 target
, unsignedp
, OPTAB_DIRECT
);
3200 if (optab_handler (rotr_optab
, mode
) != CODE_FOR_nothing
)
3202 temp
= expand_binop (mode
, rotr_optab
, op0
,
3203 gen_int_shift_amount (mode
, 8),
3204 target
, unsignedp
, OPTAB_DIRECT
);
3209 last
= get_last_insn ();
3211 temp1
= expand_binop (mode
, ashl_optab
, op0
,
3212 gen_int_shift_amount (mode
, 8), NULL_RTX
,
3213 unsignedp
, OPTAB_WIDEN
);
3214 temp2
= expand_binop (mode
, lshr_optab
, op0
,
3215 gen_int_shift_amount (mode
, 8), NULL_RTX
,
3216 unsignedp
, OPTAB_WIDEN
);
3219 temp
= expand_binop (mode
, ior_optab
, temp1
, temp2
, target
,
3220 unsignedp
, OPTAB_WIDEN
);
3225 delete_insns_since (last
);
3228 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
3230 temp
= widen_bswap (int_mode
, op0
, target
);
3234 /* We do not provide a 128-bit bswap in libgcc so force the use of
3235 a double bswap for 64-bit targets. */
3236 if (GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
3237 && (UNITS_PER_WORD
== 8
3238 || optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
))
3240 temp
= expand_doubleword_bswap (mode
, op0
, target
);
3249 if (CLASS_HAS_WIDER_MODES_P (mclass
))
3250 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
3252 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
3255 rtx_insn
*last
= get_last_insn ();
3257 /* For certain operations, we need not actually extend
3258 the narrow operand, as long as we will truncate the
3259 results to the same narrowness. */
3261 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3262 (unoptab
== neg_optab
3263 || unoptab
== one_cmpl_optab
)
3264 && mclass
== MODE_INT
);
3266 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3271 if (mclass
!= MODE_INT
3272 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
3275 target
= gen_reg_rtx (mode
);
3276 convert_move (target
, temp
, 0);
3280 return gen_lowpart (mode
, temp
);
3283 delete_insns_since (last
);
3287 /* These can be done a word at a time. */
3288 if (unoptab
== one_cmpl_optab
3289 && is_int_mode (mode
, &int_mode
)
3290 && GET_MODE_SIZE (int_mode
) > UNITS_PER_WORD
3291 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
3298 || reg_overlap_mentioned_p (target
, op0
)
3299 || !valid_multiword_target_p (target
))
3300 target
= gen_reg_rtx (int_mode
);
3304 /* Do the actual arithmetic. */
3305 for (i
= 0; i
< GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
; i
++)
3307 rtx target_piece
= operand_subword (target
, i
, 1, int_mode
);
3308 rtx x
= expand_unop (word_mode
, unoptab
,
3309 operand_subword_force (op0
, i
, int_mode
),
3310 target_piece
, unsignedp
);
3312 if (target_piece
!= x
)
3313 emit_move_insn (target_piece
, x
);
3316 insns
= get_insns ();
3323 /* Emit ~op0 as op0 ^ -1. */
3324 if (unoptab
== one_cmpl_optab
3325 && (SCALAR_INT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
3326 && optab_handler (xor_optab
, mode
) != CODE_FOR_nothing
)
3328 temp
= expand_binop (mode
, xor_optab
, op0
, CONSTM1_RTX (mode
),
3329 target
, unsignedp
, OPTAB_DIRECT
);
3334 if (optab_to_code (unoptab
) == NEG
)
3336 /* Try negating floating point values by flipping the sign bit. */
3337 if (is_a
<scalar_float_mode
> (mode
, &float_mode
))
3339 temp
= expand_absneg_bit (NEG
, float_mode
, op0
, target
);
3344 /* If there is no negation pattern, and we have no negative zero,
3345 try subtracting from zero. */
3346 if (!HONOR_SIGNED_ZEROS (mode
))
3348 temp
= expand_binop (mode
, (unoptab
== negv_optab
3349 ? subv_optab
: sub_optab
),
3350 CONST0_RTX (mode
), op0
, target
,
3351 unsignedp
, OPTAB_DIRECT
);
3357 /* Try calculating parity (x) as popcount (x) % 2. */
3358 if (unoptab
== parity_optab
&& is_a
<scalar_int_mode
> (mode
, &int_mode
))
3360 temp
= expand_parity (int_mode
, op0
, target
);
3365 /* Try implementing ffs (x) in terms of clz (x). */
3366 if (unoptab
== ffs_optab
&& is_a
<scalar_int_mode
> (mode
, &int_mode
))
3368 temp
= expand_ffs (int_mode
, op0
, target
);
3373 /* Try implementing ctz (x) in terms of clz (x). */
3374 if (unoptab
== ctz_optab
&& is_a
<scalar_int_mode
> (mode
, &int_mode
))
3376 temp
= expand_ctz (int_mode
, op0
, target
);
3382 /* Now try a library call in this mode. */
3383 libfunc
= optab_libfunc (unoptab
, mode
);
3389 machine_mode outmode
= mode
;
3391 /* All of these functions return small values. Thus we choose to
3392 have them return something that isn't a double-word. */
3393 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
3394 || unoptab
== clrsb_optab
|| unoptab
== popcount_optab
3395 || unoptab
== parity_optab
)
3397 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
),
3398 optab_libfunc (unoptab
, mode
)));
3402 /* Pass 1 for NO_QUEUE so we don't lose any increments
3403 if the libcall is cse'd or moved. */
3404 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, outmode
,
3406 insns
= get_insns ();
3409 target
= gen_reg_rtx (outmode
);
3410 bool trapv
= trapv_unoptab_p (unoptab
);
3412 eq_value
= NULL_RTX
;
3415 eq_value
= gen_rtx_fmt_e (optab_to_code (unoptab
), mode
, op0
);
3416 if (GET_MODE_UNIT_SIZE (outmode
) < GET_MODE_UNIT_SIZE (mode
))
3417 eq_value
= simplify_gen_unary (TRUNCATE
, outmode
, eq_value
, mode
);
3418 else if (GET_MODE_UNIT_SIZE (outmode
) > GET_MODE_UNIT_SIZE (mode
))
3419 eq_value
= simplify_gen_unary (ZERO_EXTEND
,
3420 outmode
, eq_value
, mode
);
3422 emit_libcall_block_1 (insns
, target
, value
, eq_value
, trapv
);
3427 /* It can't be done in this mode. Can we do it in a wider mode? */
3429 if (CLASS_HAS_WIDER_MODES_P (mclass
))
3431 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
3433 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
3434 || optab_libfunc (unoptab
, wider_mode
))
3437 rtx_insn
*last
= get_last_insn ();
3439 /* For certain operations, we need not actually extend
3440 the narrow operand, as long as we will truncate the
3441 results to the same narrowness. */
3442 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3443 (unoptab
== neg_optab
3444 || unoptab
== one_cmpl_optab
3445 || unoptab
== bswap_optab
)
3446 && mclass
== MODE_INT
);
3448 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3451 /* If we are generating clz using wider mode, adjust the
3452 result. Similarly for clrsb. */
3453 if ((unoptab
== clz_optab
|| unoptab
== clrsb_optab
)
3456 scalar_int_mode wider_int_mode
3457 = as_a
<scalar_int_mode
> (wider_mode
);
3458 int_mode
= as_a
<scalar_int_mode
> (mode
);
3460 (wider_mode
, sub_optab
, temp
,
3461 gen_int_mode (GET_MODE_PRECISION (wider_int_mode
)
3462 - GET_MODE_PRECISION (int_mode
),
3464 target
, true, OPTAB_DIRECT
);
3467 /* Likewise for bswap. */
3468 if (unoptab
== bswap_optab
&& temp
!= 0)
3470 scalar_int_mode wider_int_mode
3471 = as_a
<scalar_int_mode
> (wider_mode
);
3472 int_mode
= as_a
<scalar_int_mode
> (mode
);
3473 gcc_assert (GET_MODE_PRECISION (wider_int_mode
)
3474 == GET_MODE_BITSIZE (wider_int_mode
)
3475 && GET_MODE_PRECISION (int_mode
)
3476 == GET_MODE_BITSIZE (int_mode
));
3478 temp
= expand_shift (RSHIFT_EXPR
, wider_int_mode
, temp
,
3479 GET_MODE_BITSIZE (wider_int_mode
)
3480 - GET_MODE_BITSIZE (int_mode
),
3486 if (mclass
!= MODE_INT
)
3489 target
= gen_reg_rtx (mode
);
3490 convert_move (target
, temp
, 0);
3494 return gen_lowpart (mode
, temp
);
3497 delete_insns_since (last
);
3502 /* One final attempt at implementing negation via subtraction,
3503 this time allowing widening of the operand. */
3504 if (optab_to_code (unoptab
) == NEG
&& !HONOR_SIGNED_ZEROS (mode
))
3507 temp
= expand_binop (mode
,
3508 unoptab
== negv_optab
? subv_optab
: sub_optab
,
3509 CONST0_RTX (mode
), op0
,
3510 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3518 /* Emit code to compute the absolute value of OP0, with result to
3519 TARGET if convenient. (TARGET may be 0.) The return value says
3520 where the result actually is to be found.
3522 MODE is the mode of the operand; the mode of the result is
3523 different but can be deduced from MODE.
3528 expand_abs_nojump (machine_mode mode
, rtx op0
, rtx target
,
3529 int result_unsignedp
)
3533 if (GET_MODE_CLASS (mode
) != MODE_INT
3535 result_unsignedp
= 1;
3537 /* First try to do it with a special abs instruction. */
3538 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
3543 /* For floating point modes, try clearing the sign bit. */
3544 scalar_float_mode float_mode
;
3545 if (is_a
<scalar_float_mode
> (mode
, &float_mode
))
3547 temp
= expand_absneg_bit (ABS
, float_mode
, op0
, target
);
3552 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3553 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
3554 && !HONOR_SIGNED_ZEROS (mode
))
3556 rtx_insn
*last
= get_last_insn ();
3558 temp
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3561 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3567 delete_insns_since (last
);
3570 /* If this machine has expensive jumps, we can do integer absolute
3571 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3572 where W is the width of MODE. */
3574 scalar_int_mode int_mode
;
3575 if (is_int_mode (mode
, &int_mode
)
3576 && BRANCH_COST (optimize_insn_for_speed_p (),
3579 rtx extended
= expand_shift (RSHIFT_EXPR
, int_mode
, op0
,
3580 GET_MODE_PRECISION (int_mode
) - 1,
3583 temp
= expand_binop (int_mode
, xor_optab
, extended
, op0
, target
, 0,
3586 temp
= expand_binop (int_mode
,
3587 result_unsignedp
? sub_optab
: subv_optab
,
3588 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
3598 expand_abs (machine_mode mode
, rtx op0
, rtx target
,
3599 int result_unsignedp
, int safe
)
3602 rtx_code_label
*op1
;
3604 if (GET_MODE_CLASS (mode
) != MODE_INT
3606 result_unsignedp
= 1;
3608 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
3612 /* If that does not win, use conditional jump and negate. */
3614 /* It is safe to use the target if it is the same
3615 as the source if this is also a pseudo register */
3616 if (op0
== target
&& REG_P (op0
)
3617 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
3620 op1
= gen_label_rtx ();
3621 if (target
== 0 || ! safe
3622 || GET_MODE (target
) != mode
3623 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
3625 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
3626 target
= gen_reg_rtx (mode
);
3628 emit_move_insn (target
, op0
);
3631 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
3632 NULL_RTX
, NULL
, op1
,
3633 profile_probability::uninitialized ());
3635 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3638 emit_move_insn (target
, op0
);
3644 /* Emit code to compute the one's complement absolute value of OP0
3645 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3646 (TARGET may be NULL_RTX.) The return value says where the result
3647 actually is to be found.
3649 MODE is the mode of the operand; the mode of the result is
3650 different but can be deduced from MODE. */
3653 expand_one_cmpl_abs_nojump (machine_mode mode
, rtx op0
, rtx target
)
3657 /* Not applicable for floating point modes. */
3658 if (FLOAT_MODE_P (mode
))
3661 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3662 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
)
3664 rtx_insn
*last
= get_last_insn ();
3666 temp
= expand_unop (mode
, one_cmpl_optab
, op0
, NULL_RTX
, 0);
3668 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3674 delete_insns_since (last
);
3677 /* If this machine has expensive jumps, we can do one's complement
3678 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3680 scalar_int_mode int_mode
;
3681 if (is_int_mode (mode
, &int_mode
)
3682 && BRANCH_COST (optimize_insn_for_speed_p (),
3685 rtx extended
= expand_shift (RSHIFT_EXPR
, int_mode
, op0
,
3686 GET_MODE_PRECISION (int_mode
) - 1,
3689 temp
= expand_binop (int_mode
, xor_optab
, extended
, op0
, target
, 0,
3699 /* A subroutine of expand_copysign, perform the copysign operation using the
3700 abs and neg primitives advertised to exist on the target. The assumption
3701 is that we have a split register file, and leaving op0 in fp registers,
3702 and not playing with subregs so much, will help the register allocator. */
3705 expand_copysign_absneg (scalar_float_mode mode
, rtx op0
, rtx op1
, rtx target
,
3706 int bitpos
, bool op0_is_abs
)
3708 scalar_int_mode imode
;
3709 enum insn_code icode
;
3711 rtx_code_label
*label
;
3716 /* Check if the back end provides an insn that handles signbit for the
3718 icode
= optab_handler (signbit_optab
, mode
);
3719 if (icode
!= CODE_FOR_nothing
)
3721 imode
= as_a
<scalar_int_mode
> (insn_data
[(int) icode
].operand
[0].mode
);
3722 sign
= gen_reg_rtx (imode
);
3723 emit_unop_insn (icode
, sign
, op1
, UNKNOWN
);
3727 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3729 if (!int_mode_for_mode (mode
).exists (&imode
))
3731 op1
= gen_lowpart (imode
, op1
);
3738 if (FLOAT_WORDS_BIG_ENDIAN
)
3739 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3741 word
= bitpos
/ BITS_PER_WORD
;
3742 bitpos
= bitpos
% BITS_PER_WORD
;
3743 op1
= operand_subword_force (op1
, word
, mode
);
3746 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3747 sign
= expand_binop (imode
, and_optab
, op1
,
3748 immed_wide_int_const (mask
, imode
),
3749 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3754 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
3761 if (target
== NULL_RTX
)
3762 target
= copy_to_reg (op0
);
3764 emit_move_insn (target
, op0
);
3767 label
= gen_label_rtx ();
3768 emit_cmp_and_jump_insns (sign
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
3770 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
3771 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3773 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
3775 emit_move_insn (target
, op0
);
3783 /* A subroutine of expand_copysign, perform the entire copysign operation
3784 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3785 is true if op0 is known to have its sign bit clear. */
3788 expand_copysign_bit (scalar_float_mode mode
, rtx op0
, rtx op1
, rtx target
,
3789 int bitpos
, bool op0_is_abs
)
3791 scalar_int_mode imode
;
3792 int word
, nwords
, i
;
3796 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3798 if (!int_mode_for_mode (mode
).exists (&imode
))
3807 if (FLOAT_WORDS_BIG_ENDIAN
)
3808 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3810 word
= bitpos
/ BITS_PER_WORD
;
3811 bitpos
= bitpos
% BITS_PER_WORD
;
3812 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3815 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3820 || reg_overlap_mentioned_p (target
, op0
)
3821 || reg_overlap_mentioned_p (target
, op1
)
3822 || (nwords
> 1 && !valid_multiword_target_p (target
)))
3823 target
= gen_reg_rtx (mode
);
3829 for (i
= 0; i
< nwords
; ++i
)
3831 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3832 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3838 = expand_binop (imode
, and_optab
, op0_piece
,
3839 immed_wide_int_const (~mask
, imode
),
3840 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3841 op1
= expand_binop (imode
, and_optab
,
3842 operand_subword_force (op1
, i
, mode
),
3843 immed_wide_int_const (mask
, imode
),
3844 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3846 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
3847 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3848 if (temp
!= targ_piece
)
3849 emit_move_insn (targ_piece
, temp
);
3852 emit_move_insn (targ_piece
, op0_piece
);
3855 insns
= get_insns ();
3862 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
3863 immed_wide_int_const (mask
, imode
),
3864 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3866 op0
= gen_lowpart (imode
, op0
);
3868 op0
= expand_binop (imode
, and_optab
, op0
,
3869 immed_wide_int_const (~mask
, imode
),
3870 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3872 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
3873 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3874 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3880 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3881 scalar floating point mode. Return NULL if we do not know how to
3882 expand the operation inline. */
3885 expand_copysign (rtx op0
, rtx op1
, rtx target
)
3887 scalar_float_mode mode
;
3888 const struct real_format
*fmt
;
3892 mode
= as_a
<scalar_float_mode
> (GET_MODE (op0
));
3893 gcc_assert (GET_MODE (op1
) == mode
);
3895 /* First try to do it with a special instruction. */
3896 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
3897 target
, 0, OPTAB_DIRECT
);
3901 fmt
= REAL_MODE_FORMAT (mode
);
3902 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
3906 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
3908 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
3909 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
3913 if (fmt
->signbit_ro
>= 0
3914 && (CONST_DOUBLE_AS_FLOAT_P (op0
)
3915 || (optab_handler (neg_optab
, mode
) != CODE_FOR_nothing
3916 && optab_handler (abs_optab
, mode
) != CODE_FOR_nothing
)))
3918 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
3919 fmt
->signbit_ro
, op0_is_abs
);
3924 if (fmt
->signbit_rw
< 0)
3926 return expand_copysign_bit (mode
, op0
, op1
, target
,
3927 fmt
->signbit_rw
, op0_is_abs
);
3930 /* Generate an instruction whose insn-code is INSN_CODE,
3931 with two operands: an output TARGET and an input OP0.
3932 TARGET *must* be nonzero, and the output is always stored there.
3933 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3934 the value that is stored into TARGET.
3936 Return false if expansion failed. */
3939 maybe_emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
,
3942 class expand_operand ops
[2];
3945 create_output_operand (&ops
[0], target
, GET_MODE (target
));
3946 create_input_operand (&ops
[1], op0
, GET_MODE (op0
));
3947 pat
= maybe_gen_insn (icode
, 2, ops
);
3951 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
3953 add_equal_note (pat
, ops
[0].value
, code
, ops
[1].value
, NULL_RTX
,
3958 if (ops
[0].value
!= target
)
3959 emit_move_insn (target
, ops
[0].value
);
3962 /* Generate an instruction whose insn-code is INSN_CODE,
3963 with two operands: an output TARGET and an input OP0.
3964 TARGET *must* be nonzero, and the output is always stored there.
3965 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3966 the value that is stored into TARGET. */
3969 emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
, enum rtx_code code
)
3971 bool ok
= maybe_emit_unop_insn (icode
, target
, op0
, code
);
3975 struct no_conflict_data
3978 rtx_insn
*first
, *insn
;
3982 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3983 the currently examined clobber / store has to stay in the list of
3984 insns that constitute the actual libcall block. */
3986 no_conflict_move_test (rtx dest
, const_rtx set
, void *p0
)
3988 struct no_conflict_data
*p
= (struct no_conflict_data
*) p0
;
3990 /* If this inns directly contributes to setting the target, it must stay. */
3991 if (reg_overlap_mentioned_p (p
->target
, dest
))
3992 p
->must_stay
= true;
3993 /* If we haven't committed to keeping any other insns in the list yet,
3994 there is nothing more to check. */
3995 else if (p
->insn
== p
->first
)
3997 /* If this insn sets / clobbers a register that feeds one of the insns
3998 already in the list, this insn has to stay too. */
3999 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
4000 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
4001 || reg_used_between_p (dest
, p
->first
, p
->insn
)
4002 /* Likewise if this insn depends on a register set by a previous
4003 insn in the list, or if it sets a result (presumably a hard
4004 register) that is set or clobbered by a previous insn.
4005 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
4006 SET_DEST perform the former check on the address, and the latter
4007 check on the MEM. */
4008 || (GET_CODE (set
) == SET
4009 && (modified_in_p (SET_SRC (set
), p
->first
)
4010 || modified_in_p (SET_DEST (set
), p
->first
)
4011 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
4012 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
4013 p
->must_stay
= true;
4017 /* Emit code to make a call to a constant function or a library call.
4019 INSNS is a list containing all insns emitted in the call.
4020 These insns leave the result in RESULT. Our block is to copy RESULT
4021 to TARGET, which is logically equivalent to EQUIV.
4023 We first emit any insns that set a pseudo on the assumption that these are
4024 loading constants into registers; doing so allows them to be safely cse'ed
4025 between blocks. Then we emit all the other insns in the block, followed by
4026 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
4027 note with an operand of EQUIV. */
4030 emit_libcall_block_1 (rtx_insn
*insns
, rtx target
, rtx result
, rtx equiv
,
4031 bool equiv_may_trap
)
4033 rtx final_dest
= target
;
4034 rtx_insn
*next
, *last
, *insn
;
4036 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
4037 into a MEM later. Protect the libcall block from this change. */
4038 if (! REG_P (target
) || REG_USERVAR_P (target
))
4039 target
= gen_reg_rtx (GET_MODE (target
));
4041 /* If we're using non-call exceptions, a libcall corresponding to an
4042 operation that may trap may also trap. */
4043 /* ??? See the comment in front of make_reg_eh_region_note. */
4044 if (cfun
->can_throw_non_call_exceptions
4045 && (equiv_may_trap
|| may_trap_p (equiv
)))
4047 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
4050 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
4053 int lp_nr
= INTVAL (XEXP (note
, 0));
4054 if (lp_nr
== 0 || lp_nr
== INT_MIN
)
4055 remove_note (insn
, note
);
4061 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
4062 reg note to indicate that this call cannot throw or execute a nonlocal
4063 goto (unless there is already a REG_EH_REGION note, in which case
4065 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
4067 make_reg_eh_region_note_nothrow_nononlocal (insn
);
4070 /* First emit all insns that set pseudos. Remove them from the list as
4071 we go. Avoid insns that set pseudos which were referenced in previous
4072 insns. These can be generated by move_by_pieces, for example,
4073 to update an address. Similarly, avoid insns that reference things
4074 set in previous insns. */
4076 for (insn
= insns
; insn
; insn
= next
)
4078 rtx set
= single_set (insn
);
4080 next
= NEXT_INSN (insn
);
4082 if (set
!= 0 && REG_P (SET_DEST (set
))
4083 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
4085 struct no_conflict_data data
;
4087 data
.target
= const0_rtx
;
4091 note_stores (insn
, no_conflict_move_test
, &data
);
4092 if (! data
.must_stay
)
4094 if (PREV_INSN (insn
))
4095 SET_NEXT_INSN (PREV_INSN (insn
)) = next
;
4100 SET_PREV_INSN (next
) = PREV_INSN (insn
);
4106 /* Some ports use a loop to copy large arguments onto the stack.
4107 Don't move anything outside such a loop. */
4112 /* Write the remaining insns followed by the final copy. */
4113 for (insn
= insns
; insn
; insn
= next
)
4115 next
= NEXT_INSN (insn
);
4120 last
= emit_move_insn (target
, result
);
4122 set_dst_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
), target
);
4124 if (final_dest
!= target
)
4125 emit_move_insn (final_dest
, target
);
4129 emit_libcall_block (rtx_insn
*insns
, rtx target
, rtx result
, rtx equiv
)
4131 emit_libcall_block_1 (insns
, target
, result
, equiv
, false);
4134 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
4135 PURPOSE describes how this comparison will be used. CODE is the rtx
4136 comparison code we will be using.
4138 ??? Actually, CODE is slightly weaker than that. A target is still
4139 required to implement all of the normal bcc operations, but not
4140 required to implement all (or any) of the unordered bcc operations. */
4143 can_compare_p (enum rtx_code code
, machine_mode mode
,
4144 enum can_compare_purpose purpose
)
4147 test
= gen_rtx_fmt_ee (code
, mode
, const0_rtx
, const0_rtx
);
4150 enum insn_code icode
;
4152 if (purpose
== ccp_jump
4153 && (icode
= optab_handler (cbranch_optab
, mode
)) != CODE_FOR_nothing
4154 && insn_operand_matches (icode
, 0, test
))
4156 if (purpose
== ccp_store_flag
4157 && (icode
= optab_handler (cstore_optab
, mode
)) != CODE_FOR_nothing
4158 && insn_operand_matches (icode
, 1, test
))
4160 if (purpose
== ccp_cmov
4161 && optab_handler (cmov_optab
, mode
) != CODE_FOR_nothing
)
4164 mode
= GET_MODE_WIDER_MODE (mode
).else_void ();
4165 PUT_MODE (test
, mode
);
4167 while (mode
!= VOIDmode
);
4172 /* Return whether RTL code CODE corresponds to an unsigned optab. */
4175 unsigned_optab_p (enum rtx_code code
)
4177 return code
== LTU
|| code
== LEU
|| code
== GTU
|| code
== GEU
;
4180 /* Return whether the backend-emitted comparison for code CODE, comparing
4181 operands of mode VALUE_MODE and producing a result with MASK_MODE, matches
4182 operand OPNO of pattern ICODE. */
4185 insn_predicate_matches_p (enum insn_code icode
, unsigned int opno
,
4186 enum rtx_code code
, machine_mode mask_mode
,
4187 machine_mode value_mode
)
4189 rtx reg1
= alloca_raw_REG (value_mode
, LAST_VIRTUAL_REGISTER
+ 1);
4190 rtx reg2
= alloca_raw_REG (value_mode
, LAST_VIRTUAL_REGISTER
+ 2);
4191 rtx test
= alloca_rtx_fmt_ee (code
, mask_mode
, reg1
, reg2
);
4192 return insn_operand_matches (icode
, opno
, test
);
4195 /* Return whether the backend can emit a vector comparison (vec_cmp/vec_cmpu)
4196 for code CODE, comparing operands of mode VALUE_MODE and producing a result
4200 can_vec_cmp_compare_p (enum rtx_code code
, machine_mode value_mode
,
4201 machine_mode mask_mode
)
4203 enum insn_code icode
4204 = get_vec_cmp_icode (value_mode
, mask_mode
, unsigned_optab_p (code
));
4205 if (icode
== CODE_FOR_nothing
)
4208 return insn_predicate_matches_p (icode
, 1, code
, mask_mode
, value_mode
);
4211 /* Return whether the backend can emit a vector comparison (vcond/vcondu) for
4212 code CODE, comparing operands of mode CMP_OP_MODE and producing a result
4216 can_vcond_compare_p (enum rtx_code code
, machine_mode value_mode
,
4217 machine_mode cmp_op_mode
)
4219 enum insn_code icode
4220 = get_vcond_icode (value_mode
, cmp_op_mode
, unsigned_optab_p (code
));
4221 if (icode
== CODE_FOR_nothing
)
4224 return insn_predicate_matches_p (icode
, 3, code
, value_mode
, cmp_op_mode
);
4227 /* Return whether the backend can emit vector set instructions for inserting
4228 element into vector at variable index position. */
4231 can_vec_set_var_idx_p (machine_mode vec_mode
)
4233 if (!VECTOR_MODE_P (vec_mode
))
4236 machine_mode inner_mode
= GET_MODE_INNER (vec_mode
);
4237 rtx reg1
= alloca_raw_REG (vec_mode
, LAST_VIRTUAL_REGISTER
+ 1);
4238 rtx reg2
= alloca_raw_REG (inner_mode
, LAST_VIRTUAL_REGISTER
+ 2);
4239 rtx reg3
= alloca_raw_REG (VOIDmode
, LAST_VIRTUAL_REGISTER
+ 3);
4241 enum insn_code icode
= optab_handler (vec_set_optab
, vec_mode
);
4243 return icode
!= CODE_FOR_nothing
&& insn_operand_matches (icode
, 0, reg1
)
4244 && insn_operand_matches (icode
, 1, reg2
)
4245 && insn_operand_matches (icode
, 2, reg3
);
4248 /* This function is called when we are going to emit a compare instruction that
4249 compares the values found in X and Y, using the rtl operator COMPARISON.
4251 If they have mode BLKmode, then SIZE specifies the size of both operands.
4253 UNSIGNEDP nonzero says that the operands are unsigned;
4254 this matters if they need to be widened (as given by METHODS).
4256 *PTEST is where the resulting comparison RTX is returned or NULL_RTX
4257 if we failed to produce one.
4259 *PMODE is the mode of the inputs (in case they are const_int).
4261 This function performs all the setup necessary so that the caller only has
4262 to emit a single comparison insn. This setup can involve doing a BLKmode
4263 comparison or emitting a library call to perform the comparison if no insn
4264 is available to handle it.
4265 The values which are passed in through pointers can be modified; the caller
4266 should perform the comparison on the modified values. Constant
4267 comparisons must have already been folded. */
4270 prepare_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4271 int unsignedp
, enum optab_methods methods
,
4272 rtx
*ptest
, machine_mode
*pmode
)
4274 machine_mode mode
= *pmode
;
4276 machine_mode cmp_mode
;
4277 enum mode_class mclass
;
4279 /* The other methods are not needed. */
4280 gcc_assert (methods
== OPTAB_DIRECT
|| methods
== OPTAB_WIDEN
4281 || methods
== OPTAB_LIB_WIDEN
);
4283 if (CONST_SCALAR_INT_P (y
))
4284 canonicalize_comparison (mode
, &comparison
, &y
);
4286 /* If we are optimizing, force expensive constants into a register. */
4287 if (CONSTANT_P (x
) && optimize
4288 && (rtx_cost (x
, mode
, COMPARE
, 0, optimize_insn_for_speed_p ())
4289 > COSTS_N_INSNS (1)))
4290 x
= force_reg (mode
, x
);
4292 if (CONSTANT_P (y
) && optimize
4293 && (rtx_cost (y
, mode
, COMPARE
, 1, optimize_insn_for_speed_p ())
4294 > COSTS_N_INSNS (1)))
4295 y
= force_reg (mode
, y
);
4298 /* Make sure if we have a canonical comparison. The RTL
4299 documentation states that canonical comparisons are required only
4300 for targets which have cc0. */
4301 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
4304 /* Don't let both operands fail to indicate the mode. */
4305 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
4306 x
= force_reg (mode
, x
);
4307 if (mode
== VOIDmode
)
4308 mode
= GET_MODE (x
) != VOIDmode
? GET_MODE (x
) : GET_MODE (y
);
4310 /* Handle all BLKmode compares. */
4312 if (mode
== BLKmode
)
4314 machine_mode result_mode
;
4315 enum insn_code cmp_code
;
4318 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
4322 /* Try to use a memory block compare insn - either cmpstr
4323 or cmpmem will do. */
4324 opt_scalar_int_mode cmp_mode_iter
;
4325 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter
, MODE_INT
)
4327 scalar_int_mode cmp_mode
= cmp_mode_iter
.require ();
4328 cmp_code
= direct_optab_handler (cmpmem_optab
, cmp_mode
);
4329 if (cmp_code
== CODE_FOR_nothing
)
4330 cmp_code
= direct_optab_handler (cmpstr_optab
, cmp_mode
);
4331 if (cmp_code
== CODE_FOR_nothing
)
4332 cmp_code
= direct_optab_handler (cmpstrn_optab
, cmp_mode
);
4333 if (cmp_code
== CODE_FOR_nothing
)
4336 /* Must make sure the size fits the insn's mode. */
4337 if (CONST_INT_P (size
)
4338 ? UINTVAL (size
) > GET_MODE_MASK (cmp_mode
)
4339 : (GET_MODE_BITSIZE (as_a
<scalar_int_mode
> (GET_MODE (size
)))
4340 > GET_MODE_BITSIZE (cmp_mode
)))
4343 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
4344 result
= gen_reg_rtx (result_mode
);
4345 size
= convert_to_mode (cmp_mode
, size
, 1);
4346 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
4348 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, result
, const0_rtx
);
4349 *pmode
= result_mode
;
4353 if (methods
!= OPTAB_LIB
&& methods
!= OPTAB_LIB_WIDEN
)
4356 /* Otherwise call a library function. */
4357 result
= emit_block_comp_via_libcall (x
, y
, size
);
4361 mode
= TYPE_MODE (integer_type_node
);
4362 methods
= OPTAB_LIB_WIDEN
;
4366 /* Don't allow operands to the compare to trap, as that can put the
4367 compare and branch in different basic blocks. */
4368 if (cfun
->can_throw_non_call_exceptions
)
4371 x
= copy_to_reg (x
);
4373 y
= copy_to_reg (y
);
4376 if (GET_MODE_CLASS (mode
) == MODE_CC
)
4378 enum insn_code icode
= optab_handler (cbranch_optab
, CCmode
);
4379 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
4380 gcc_assert (icode
!= CODE_FOR_nothing
4381 && insn_operand_matches (icode
, 0, test
));
4386 mclass
= GET_MODE_CLASS (mode
);
4387 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
4388 FOR_EACH_MODE_FROM (cmp_mode
, mode
)
4390 enum insn_code icode
;
4391 icode
= optab_handler (cbranch_optab
, cmp_mode
);
4392 if (icode
!= CODE_FOR_nothing
4393 && insn_operand_matches (icode
, 0, test
))
4395 rtx_insn
*last
= get_last_insn ();
4396 rtx op0
= prepare_operand (icode
, x
, 1, mode
, cmp_mode
, unsignedp
);
4397 rtx op1
= prepare_operand (icode
, y
, 2, mode
, cmp_mode
, unsignedp
);
4399 && insn_operand_matches (icode
, 1, op0
)
4400 && insn_operand_matches (icode
, 2, op1
))
4402 XEXP (test
, 0) = op0
;
4403 XEXP (test
, 1) = op1
;
4408 delete_insns_since (last
);
4411 if (methods
== OPTAB_DIRECT
|| !CLASS_HAS_WIDER_MODES_P (mclass
))
4415 if (methods
!= OPTAB_LIB_WIDEN
)
4418 if (SCALAR_FLOAT_MODE_P (mode
))
4420 /* Small trick if UNORDERED isn't implemented by the hardware. */
4421 if (comparison
== UNORDERED
&& rtx_equal_p (x
, y
))
4423 prepare_cmp_insn (x
, y
, UNLT
, NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4429 prepare_float_lib_cmp (x
, y
, comparison
, ptest
, pmode
);
4434 machine_mode ret_mode
;
4436 /* Handle a libcall just for the mode we are using. */
4437 libfunc
= optab_libfunc (cmp_optab
, mode
);
4438 gcc_assert (libfunc
);
4440 /* If we want unsigned, and this mode has a distinct unsigned
4441 comparison routine, use that. */
4444 rtx ulibfunc
= optab_libfunc (ucmp_optab
, mode
);
4449 ret_mode
= targetm
.libgcc_cmp_return_mode ();
4450 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4451 ret_mode
, x
, mode
, y
, mode
);
4453 /* There are two kinds of comparison routines. Biased routines
4454 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4455 of gcc expect that the comparison operation is equivalent
4456 to the modified comparison. For signed comparisons compare the
4457 result against 1 in the biased case, and zero in the unbiased
4458 case. For unsigned comparisons always compare against 1 after
4459 biasing the unbiased result by adding 1. This gives us a way to
4461 The comparisons in the fixed-point helper library are always
4466 if (!TARGET_LIB_INT_CMP_BIASED
&& !ALL_FIXED_POINT_MODE_P (mode
))
4469 x
= plus_constant (ret_mode
, result
, 1);
4475 prepare_cmp_insn (x
, y
, comparison
, NULL_RTX
, unsignedp
, methods
,
4485 /* Before emitting an insn with code ICODE, make sure that X, which is going
4486 to be used for operand OPNUM of the insn, is converted from mode MODE to
4487 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4488 that it is accepted by the operand predicate. Return the new value. */
4491 prepare_operand (enum insn_code icode
, rtx x
, int opnum
, machine_mode mode
,
4492 machine_mode wider_mode
, int unsignedp
)
4494 if (mode
!= wider_mode
)
4495 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
4497 if (!insn_operand_matches (icode
, opnum
, x
))
4499 machine_mode op_mode
= insn_data
[(int) icode
].operand
[opnum
].mode
;
4500 if (reload_completed
)
4502 if (GET_MODE (x
) != op_mode
&& GET_MODE (x
) != VOIDmode
)
4504 x
= copy_to_mode_reg (op_mode
, x
);
4510 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4511 we can do the branch. */
4514 emit_cmp_and_jump_insn_1 (rtx test
, machine_mode mode
, rtx label
,
4515 profile_probability prob
)
4517 machine_mode optab_mode
;
4518 enum mode_class mclass
;
4519 enum insn_code icode
;
4522 mclass
= GET_MODE_CLASS (mode
);
4523 optab_mode
= (mclass
== MODE_CC
) ? CCmode
: mode
;
4524 icode
= optab_handler (cbranch_optab
, optab_mode
);
4526 gcc_assert (icode
!= CODE_FOR_nothing
);
4527 gcc_assert (insn_operand_matches (icode
, 0, test
));
4528 insn
= emit_jump_insn (GEN_FCN (icode
) (test
, XEXP (test
, 0),
4529 XEXP (test
, 1), label
));
4530 if (prob
.initialized_p ()
4531 && profile_status_for_fn (cfun
) != PROFILE_ABSENT
4534 && any_condjump_p (insn
)
4535 && !find_reg_note (insn
, REG_BR_PROB
, 0))
4536 add_reg_br_prob_note (insn
, prob
);
4539 /* Generate code to compare X with Y so that the condition codes are
4540 set and to jump to LABEL if the condition is true. If X is a
4541 constant and Y is not a constant, then the comparison is swapped to
4542 ensure that the comparison RTL has the canonical form.
4544 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4545 need to be widened. UNSIGNEDP is also used to select the proper
4546 branch condition code.
4548 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4550 MODE is the mode of the inputs (in case they are const_int).
4552 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4553 It will be potentially converted into an unsigned variant based on
4554 UNSIGNEDP to select a proper jump instruction.
4556 PROB is the probability of jumping to LABEL. */
4559 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4560 machine_mode mode
, int unsignedp
, rtx label
,
4561 profile_probability prob
)
4563 rtx op0
= x
, op1
= y
;
4566 /* Swap operands and condition to ensure canonical RTL. */
4567 if (swap_commutative_operands_p (x
, y
)
4568 && can_compare_p (swap_condition (comparison
), mode
, ccp_jump
))
4571 comparison
= swap_condition (comparison
);
4574 /* If OP0 is still a constant, then both X and Y must be constants
4575 or the opposite comparison is not supported. Force X into a register
4576 to create canonical RTL. */
4577 if (CONSTANT_P (op0
))
4578 op0
= force_reg (mode
, op0
);
4581 comparison
= unsigned_condition (comparison
);
4583 prepare_cmp_insn (op0
, op1
, comparison
, size
, unsignedp
, OPTAB_LIB_WIDEN
,
4585 emit_cmp_and_jump_insn_1 (test
, mode
, label
, prob
);
4589 /* Emit a library call comparison between floating point X and Y.
4590 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4593 prepare_float_lib_cmp (rtx x
, rtx y
, enum rtx_code comparison
,
4594 rtx
*ptest
, machine_mode
*pmode
)
4596 enum rtx_code swapped
= swap_condition (comparison
);
4597 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
4598 machine_mode orig_mode
= GET_MODE (x
);
4600 rtx true_rtx
, false_rtx
;
4601 rtx value
, target
, equiv
;
4604 bool reversed_p
= false;
4605 scalar_int_mode cmp_mode
= targetm
.libgcc_cmp_return_mode ();
4607 FOR_EACH_MODE_FROM (mode
, orig_mode
)
4609 if (code_to_optab (comparison
)
4610 && (libfunc
= optab_libfunc (code_to_optab (comparison
), mode
)))
4613 if (code_to_optab (swapped
)
4614 && (libfunc
= optab_libfunc (code_to_optab (swapped
), mode
)))
4617 comparison
= swapped
;
4621 if (code_to_optab (reversed
)
4622 && (libfunc
= optab_libfunc (code_to_optab (reversed
), mode
)))
4624 comparison
= reversed
;
4630 gcc_assert (mode
!= VOIDmode
);
4632 if (mode
!= orig_mode
)
4634 x
= convert_to_mode (mode
, x
, 0);
4635 y
= convert_to_mode (mode
, y
, 0);
4638 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4639 the RTL. The allows the RTL optimizers to delete the libcall if the
4640 condition can be determined at compile-time. */
4641 if (comparison
== UNORDERED
4642 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4644 true_rtx
= const_true_rtx
;
4645 false_rtx
= const0_rtx
;
4652 true_rtx
= const0_rtx
;
4653 false_rtx
= const_true_rtx
;
4657 true_rtx
= const_true_rtx
;
4658 false_rtx
= const0_rtx
;
4662 true_rtx
= const1_rtx
;
4663 false_rtx
= const0_rtx
;
4667 true_rtx
= const0_rtx
;
4668 false_rtx
= constm1_rtx
;
4672 true_rtx
= constm1_rtx
;
4673 false_rtx
= const0_rtx
;
4677 true_rtx
= const0_rtx
;
4678 false_rtx
= const1_rtx
;
4686 if (comparison
== UNORDERED
)
4688 rtx temp
= simplify_gen_relational (NE
, cmp_mode
, mode
, x
, x
);
4689 equiv
= simplify_gen_relational (NE
, cmp_mode
, mode
, y
, y
);
4690 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4691 temp
, const_true_rtx
, equiv
);
4695 equiv
= simplify_gen_relational (comparison
, cmp_mode
, mode
, x
, y
);
4696 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4697 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4698 equiv
, true_rtx
, false_rtx
);
4702 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4703 cmp_mode
, x
, mode
, y
, mode
);
4704 insns
= get_insns ();
4707 target
= gen_reg_rtx (cmp_mode
);
4708 emit_libcall_block (insns
, target
, value
, equiv
);
4710 if (comparison
== UNORDERED
4711 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
)
4713 *ptest
= gen_rtx_fmt_ee (reversed_p
? EQ
: NE
, VOIDmode
, target
, false_rtx
);
4715 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, target
, const0_rtx
);
4720 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4723 emit_indirect_jump (rtx loc
)
4725 if (!targetm
.have_indirect_jump ())
4726 sorry ("indirect jumps are not available on this target");
4729 class expand_operand ops
[1];
4730 create_address_operand (&ops
[0], loc
);
4731 expand_jump_insn (targetm
.code_for_indirect_jump
, 1, ops
);
4737 /* Emit a conditional move instruction if the machine supports one for that
4738 condition and machine mode.
4740 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4741 the mode to use should they be constants. If it is VOIDmode, they cannot
4744 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4745 should be stored there. MODE is the mode to use should they be constants.
4746 If it is VOIDmode, they cannot both be constants.
4748 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4749 is not supported. */
4752 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4753 machine_mode cmode
, rtx op2
, rtx op3
,
4754 machine_mode mode
, int unsignedp
)
4758 enum insn_code icode
;
4759 enum rtx_code reversed
;
4761 /* If the two source operands are identical, that's just a move. */
4763 if (rtx_equal_p (op2
, op3
))
4766 target
= gen_reg_rtx (mode
);
4768 emit_move_insn (target
, op3
);
4772 /* If one operand is constant, make it the second one. Only do this
4773 if the other operand is not constant as well. */
4775 if (swap_commutative_operands_p (op0
, op1
))
4777 std::swap (op0
, op1
);
4778 code
= swap_condition (code
);
4781 /* get_condition will prefer to generate LT and GT even if the old
4782 comparison was against zero, so undo that canonicalization here since
4783 comparisons against zero are cheaper. */
4784 if (code
== LT
&& op1
== const1_rtx
)
4785 code
= LE
, op1
= const0_rtx
;
4786 else if (code
== GT
&& op1
== constm1_rtx
)
4787 code
= GE
, op1
= const0_rtx
;
4789 if (cmode
== VOIDmode
)
4790 cmode
= GET_MODE (op0
);
4792 enum rtx_code orig_code
= code
;
4793 bool swapped
= false;
4794 if (swap_commutative_operands_p (op2
, op3
)
4795 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4798 std::swap (op2
, op3
);
4803 if (mode
== VOIDmode
)
4804 mode
= GET_MODE (op2
);
4806 icode
= direct_optab_handler (movcc_optab
, mode
);
4808 if (icode
== CODE_FOR_nothing
)
4812 target
= gen_reg_rtx (mode
);
4814 for (int pass
= 0; ; pass
++)
4816 code
= unsignedp
? unsigned_condition (code
) : code
;
4817 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4819 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4820 punt and let the caller figure out how best to deal with this
4822 if (COMPARISON_P (comparison
))
4824 saved_pending_stack_adjust save
;
4825 save_pending_stack_adjust (&save
);
4826 last
= get_last_insn ();
4827 do_pending_stack_adjust ();
4828 machine_mode cmpmode
= cmode
;
4829 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4830 GET_CODE (comparison
), NULL_RTX
, unsignedp
,
4831 OPTAB_WIDEN
, &comparison
, &cmpmode
);
4834 class expand_operand ops
[4];
4836 create_output_operand (&ops
[0], target
, mode
);
4837 create_fixed_operand (&ops
[1], comparison
);
4838 create_input_operand (&ops
[2], op2
, mode
);
4839 create_input_operand (&ops
[3], op3
, mode
);
4840 if (maybe_expand_insn (icode
, 4, ops
))
4842 if (ops
[0].value
!= target
)
4843 convert_move (target
, ops
[0].value
, false);
4847 delete_insns_since (last
);
4848 restore_pending_stack_adjust (&save
);
4854 /* If the preferred op2/op3 order is not usable, retry with other
4855 operand order, perhaps it will expand successfully. */
4858 else if ((reversed
= reversed_comparison_code_parts (orig_code
, op0
, op1
,
4864 std::swap (op2
, op3
);
4869 /* Emit a conditional negate or bitwise complement using the
4870 negcc or notcc optabs if available. Return NULL_RTX if such operations
4871 are not available. Otherwise return the RTX holding the result.
4872 TARGET is the desired destination of the result. COMP is the comparison
4873 on which to negate. If COND is true move into TARGET the negation
4874 or bitwise complement of OP1. Otherwise move OP2 into TARGET.
4875 CODE is either NEG or NOT. MODE is the machine mode in which the
4876 operation is performed. */
4879 emit_conditional_neg_or_complement (rtx target
, rtx_code code
,
4880 machine_mode mode
, rtx cond
, rtx op1
,
4883 optab op
= unknown_optab
;
4886 else if (code
== NOT
)
4891 insn_code icode
= direct_optab_handler (op
, mode
);
4893 if (icode
== CODE_FOR_nothing
)
4897 target
= gen_reg_rtx (mode
);
4899 rtx_insn
*last
= get_last_insn ();
4900 class expand_operand ops
[4];
4902 create_output_operand (&ops
[0], target
, mode
);
4903 create_fixed_operand (&ops
[1], cond
);
4904 create_input_operand (&ops
[2], op1
, mode
);
4905 create_input_operand (&ops
[3], op2
, mode
);
4907 if (maybe_expand_insn (icode
, 4, ops
))
4909 if (ops
[0].value
!= target
)
4910 convert_move (target
, ops
[0].value
, false);
4914 delete_insns_since (last
);
4918 /* Emit a conditional addition instruction if the machine supports one for that
4919 condition and machine mode.
4921 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4922 the mode to use should they be constants. If it is VOIDmode, they cannot
4925 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
4926 should be stored there. MODE is the mode to use should they be constants.
4927 If it is VOIDmode, they cannot both be constants.
4929 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4930 is not supported. */
4933 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4934 machine_mode cmode
, rtx op2
, rtx op3
,
4935 machine_mode mode
, int unsignedp
)
4939 enum insn_code icode
;
4941 /* If one operand is constant, make it the second one. Only do this
4942 if the other operand is not constant as well. */
4944 if (swap_commutative_operands_p (op0
, op1
))
4946 std::swap (op0
, op1
);
4947 code
= swap_condition (code
);
4950 /* get_condition will prefer to generate LT and GT even if the old
4951 comparison was against zero, so undo that canonicalization here since
4952 comparisons against zero are cheaper. */
4953 if (code
== LT
&& op1
== const1_rtx
)
4954 code
= LE
, op1
= const0_rtx
;
4955 else if (code
== GT
&& op1
== constm1_rtx
)
4956 code
= GE
, op1
= const0_rtx
;
4958 if (cmode
== VOIDmode
)
4959 cmode
= GET_MODE (op0
);
4961 if (mode
== VOIDmode
)
4962 mode
= GET_MODE (op2
);
4964 icode
= optab_handler (addcc_optab
, mode
);
4966 if (icode
== CODE_FOR_nothing
)
4970 target
= gen_reg_rtx (mode
);
4972 code
= unsignedp
? unsigned_condition (code
) : code
;
4973 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4975 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4976 return NULL and let the caller figure out how best to deal with this
4978 if (!COMPARISON_P (comparison
))
4981 do_pending_stack_adjust ();
4982 last
= get_last_insn ();
4983 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4984 GET_CODE (comparison
), NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4985 &comparison
, &cmode
);
4988 class expand_operand ops
[4];
4990 create_output_operand (&ops
[0], target
, mode
);
4991 create_fixed_operand (&ops
[1], comparison
);
4992 create_input_operand (&ops
[2], op2
, mode
);
4993 create_input_operand (&ops
[3], op3
, mode
);
4994 if (maybe_expand_insn (icode
, 4, ops
))
4996 if (ops
[0].value
!= target
)
4997 convert_move (target
, ops
[0].value
, false);
5001 delete_insns_since (last
);
5005 /* These functions attempt to generate an insn body, rather than
5006 emitting the insn, but if the gen function already emits them, we
5007 make no attempt to turn them back into naked patterns. */
5009 /* Generate and return an insn body to add Y to X. */
5012 gen_add2_insn (rtx x
, rtx y
)
5014 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (x
));
5016 gcc_assert (insn_operand_matches (icode
, 0, x
));
5017 gcc_assert (insn_operand_matches (icode
, 1, x
));
5018 gcc_assert (insn_operand_matches (icode
, 2, y
));
5020 return GEN_FCN (icode
) (x
, x
, y
);
5023 /* Generate and return an insn body to add r1 and c,
5024 storing the result in r0. */
5027 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
5029 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (r0
));
5031 if (icode
== CODE_FOR_nothing
5032 || !insn_operand_matches (icode
, 0, r0
)
5033 || !insn_operand_matches (icode
, 1, r1
)
5034 || !insn_operand_matches (icode
, 2, c
))
5037 return GEN_FCN (icode
) (r0
, r1
, c
);
5041 have_add2_insn (rtx x
, rtx y
)
5043 enum insn_code icode
;
5045 gcc_assert (GET_MODE (x
) != VOIDmode
);
5047 icode
= optab_handler (add_optab
, GET_MODE (x
));
5049 if (icode
== CODE_FOR_nothing
)
5052 if (!insn_operand_matches (icode
, 0, x
)
5053 || !insn_operand_matches (icode
, 1, x
)
5054 || !insn_operand_matches (icode
, 2, y
))
5060 /* Generate and return an insn body to add Y to X. */
5063 gen_addptr3_insn (rtx x
, rtx y
, rtx z
)
5065 enum insn_code icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
5067 gcc_assert (insn_operand_matches (icode
, 0, x
));
5068 gcc_assert (insn_operand_matches (icode
, 1, y
));
5069 gcc_assert (insn_operand_matches (icode
, 2, z
));
5071 return GEN_FCN (icode
) (x
, y
, z
);
5074 /* Return true if the target implements an addptr pattern and X, Y,
5075 and Z are valid for the pattern predicates. */
5078 have_addptr3_insn (rtx x
, rtx y
, rtx z
)
5080 enum insn_code icode
;
5082 gcc_assert (GET_MODE (x
) != VOIDmode
);
5084 icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
5086 if (icode
== CODE_FOR_nothing
)
5089 if (!insn_operand_matches (icode
, 0, x
)
5090 || !insn_operand_matches (icode
, 1, y
)
5091 || !insn_operand_matches (icode
, 2, z
))
5097 /* Generate and return an insn body to subtract Y from X. */
5100 gen_sub2_insn (rtx x
, rtx y
)
5102 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (x
));
5104 gcc_assert (insn_operand_matches (icode
, 0, x
));
5105 gcc_assert (insn_operand_matches (icode
, 1, x
));
5106 gcc_assert (insn_operand_matches (icode
, 2, y
));
5108 return GEN_FCN (icode
) (x
, x
, y
);
5111 /* Generate and return an insn body to subtract r1 and c,
5112 storing the result in r0. */
5115 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
5117 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (r0
));
5119 if (icode
== CODE_FOR_nothing
5120 || !insn_operand_matches (icode
, 0, r0
)
5121 || !insn_operand_matches (icode
, 1, r1
)
5122 || !insn_operand_matches (icode
, 2, c
))
5125 return GEN_FCN (icode
) (r0
, r1
, c
);
5129 have_sub2_insn (rtx x
, rtx y
)
5131 enum insn_code icode
;
5133 gcc_assert (GET_MODE (x
) != VOIDmode
);
5135 icode
= optab_handler (sub_optab
, GET_MODE (x
));
5137 if (icode
== CODE_FOR_nothing
)
5140 if (!insn_operand_matches (icode
, 0, x
)
5141 || !insn_operand_matches (icode
, 1, x
)
5142 || !insn_operand_matches (icode
, 2, y
))
5148 /* Generate the body of an insn to extend Y (with mode MFROM)
5149 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
5152 gen_extend_insn (rtx x
, rtx y
, machine_mode mto
,
5153 machine_mode mfrom
, int unsignedp
)
5155 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
5156 return GEN_FCN (icode
) (x
, y
);
5159 /* Generate code to convert FROM to floating point
5160 and store in TO. FROM must be fixed point and not VOIDmode.
5161 UNSIGNEDP nonzero means regard FROM as unsigned.
5162 Normally this is done by correcting the final value
5163 if it is negative. */
5166 expand_float (rtx to
, rtx from
, int unsignedp
)
5168 enum insn_code icode
;
5170 scalar_mode from_mode
, to_mode
;
5171 machine_mode fmode
, imode
;
5172 bool can_do_signed
= false;
5174 /* Crash now, because we won't be able to decide which mode to use. */
5175 gcc_assert (GET_MODE (from
) != VOIDmode
);
5177 /* Look for an insn to do the conversion. Do it in the specified
5178 modes if possible; otherwise convert either input, output or both to
5179 wider mode. If the integer mode is wider than the mode of FROM,
5180 we can do the conversion signed even if the input is unsigned. */
5182 FOR_EACH_MODE_FROM (fmode
, GET_MODE (to
))
5183 FOR_EACH_MODE_FROM (imode
, GET_MODE (from
))
5185 int doing_unsigned
= unsignedp
;
5187 if (fmode
!= GET_MODE (to
)
5188 && (significand_size (fmode
)
5189 < GET_MODE_UNIT_PRECISION (GET_MODE (from
))))
5192 icode
= can_float_p (fmode
, imode
, unsignedp
);
5193 if (icode
== CODE_FOR_nothing
&& unsignedp
)
5195 enum insn_code scode
= can_float_p (fmode
, imode
, 0);
5196 if (scode
!= CODE_FOR_nothing
)
5197 can_do_signed
= true;
5198 if (imode
!= GET_MODE (from
))
5199 icode
= scode
, doing_unsigned
= 0;
5202 if (icode
!= CODE_FOR_nothing
)
5204 if (imode
!= GET_MODE (from
))
5205 from
= convert_to_mode (imode
, from
, unsignedp
);
5207 if (fmode
!= GET_MODE (to
))
5208 target
= gen_reg_rtx (fmode
);
5210 emit_unop_insn (icode
, target
, from
,
5211 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
5214 convert_move (to
, target
, 0);
5219 /* Unsigned integer, and no way to convert directly. Convert as signed,
5220 then unconditionally adjust the result. */
5223 && is_a
<scalar_mode
> (GET_MODE (to
), &to_mode
)
5224 && is_a
<scalar_mode
> (GET_MODE (from
), &from_mode
))
5226 opt_scalar_mode fmode_iter
;
5227 rtx_code_label
*label
= gen_label_rtx ();
5229 REAL_VALUE_TYPE offset
;
5231 /* Look for a usable floating mode FMODE wider than the source and at
5232 least as wide as the target. Using FMODE will avoid rounding woes
5233 with unsigned values greater than the signed maximum value. */
5235 FOR_EACH_MODE_FROM (fmode_iter
, to_mode
)
5237 scalar_mode fmode
= fmode_iter
.require ();
5238 if (GET_MODE_PRECISION (from_mode
) < GET_MODE_BITSIZE (fmode
)
5239 && can_float_p (fmode
, from_mode
, 0) != CODE_FOR_nothing
)
5243 if (!fmode_iter
.exists (&fmode
))
5245 /* There is no such mode. Pretend the target is wide enough. */
5248 /* Avoid double-rounding when TO is narrower than FROM. */
5249 if ((significand_size (fmode
) + 1)
5250 < GET_MODE_PRECISION (from_mode
))
5253 rtx_code_label
*neglabel
= gen_label_rtx ();
5255 /* Don't use TARGET if it isn't a register, is a hard register,
5256 or is the wrong mode. */
5258 || REGNO (target
) < FIRST_PSEUDO_REGISTER
5259 || GET_MODE (target
) != fmode
)
5260 target
= gen_reg_rtx (fmode
);
5263 do_pending_stack_adjust ();
5265 /* Test whether the sign bit is set. */
5266 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
5269 /* The sign bit is not set. Convert as signed. */
5270 expand_float (target
, from
, 0);
5271 emit_jump_insn (targetm
.gen_jump (label
));
5274 /* The sign bit is set.
5275 Convert to a usable (positive signed) value by shifting right
5276 one bit, while remembering if a nonzero bit was shifted
5277 out; i.e., compute (from & 1) | (from >> 1). */
5279 emit_label (neglabel
);
5280 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
5281 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
5282 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, 1, NULL_RTX
, 1);
5283 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
5285 expand_float (target
, temp
, 0);
5287 /* Multiply by 2 to undo the shift above. */
5288 temp
= expand_binop (fmode
, add_optab
, target
, target
,
5289 target
, 0, OPTAB_LIB_WIDEN
);
5291 emit_move_insn (target
, temp
);
5293 do_pending_stack_adjust ();
5299 /* If we are about to do some arithmetic to correct for an
5300 unsigned operand, do it in a pseudo-register. */
5302 if (to_mode
!= fmode
5303 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
5304 target
= gen_reg_rtx (fmode
);
5306 /* Convert as signed integer to floating. */
5307 expand_float (target
, from
, 0);
5309 /* If FROM is negative (and therefore TO is negative),
5310 correct its value by 2**bitwidth. */
5312 do_pending_stack_adjust ();
5313 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, from_mode
,
5317 real_2expN (&offset
, GET_MODE_PRECISION (from_mode
), fmode
);
5318 temp
= expand_binop (fmode
, add_optab
, target
,
5319 const_double_from_real_value (offset
, fmode
),
5320 target
, 0, OPTAB_LIB_WIDEN
);
5322 emit_move_insn (target
, temp
);
5324 do_pending_stack_adjust ();
5329 /* No hardware instruction available; call a library routine. */
5334 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
5336 if (is_narrower_int_mode (GET_MODE (from
), SImode
))
5337 from
= convert_to_mode (SImode
, from
, unsignedp
);
5339 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5340 gcc_assert (libfunc
);
5344 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5345 GET_MODE (to
), from
, GET_MODE (from
));
5346 insns
= get_insns ();
5349 emit_libcall_block (insns
, target
, value
,
5350 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FLOAT
: FLOAT
,
5351 GET_MODE (to
), from
));
5356 /* Copy result to requested destination
5357 if we have been computing in a temp location. */
5361 if (GET_MODE (target
) == GET_MODE (to
))
5362 emit_move_insn (to
, target
);
5364 convert_move (to
, target
, 0);
5368 /* Generate code to convert FROM to fixed point and store in TO. FROM
5369 must be floating point. */
5372 expand_fix (rtx to
, rtx from
, int unsignedp
)
5374 enum insn_code icode
;
5376 machine_mode fmode
, imode
;
5377 opt_scalar_mode fmode_iter
;
5378 bool must_trunc
= false;
5380 /* We first try to find a pair of modes, one real and one integer, at
5381 least as wide as FROM and TO, respectively, in which we can open-code
5382 this conversion. If the integer mode is wider than the mode of TO,
5383 we can do the conversion either signed or unsigned. */
5385 FOR_EACH_MODE_FROM (fmode
, GET_MODE (from
))
5386 FOR_EACH_MODE_FROM (imode
, GET_MODE (to
))
5388 int doing_unsigned
= unsignedp
;
5390 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
5391 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
5392 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
5394 if (icode
!= CODE_FOR_nothing
)
5396 rtx_insn
*last
= get_last_insn ();
5397 if (fmode
!= GET_MODE (from
))
5398 from
= convert_to_mode (fmode
, from
, 0);
5402 rtx temp
= gen_reg_rtx (GET_MODE (from
));
5403 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
5407 if (imode
!= GET_MODE (to
))
5408 target
= gen_reg_rtx (imode
);
5410 if (maybe_emit_unop_insn (icode
, target
, from
,
5411 doing_unsigned
? UNSIGNED_FIX
: FIX
))
5414 convert_move (to
, target
, unsignedp
);
5417 delete_insns_since (last
);
5421 /* For an unsigned conversion, there is one more way to do it.
5422 If we have a signed conversion, we generate code that compares
5423 the real value to the largest representable positive number. If if
5424 is smaller, the conversion is done normally. Otherwise, subtract
5425 one plus the highest signed number, convert, and add it back.
5427 We only need to check all real modes, since we know we didn't find
5428 anything with a wider integer mode.
5430 This code used to extend FP value into mode wider than the destination.
5431 This is needed for decimal float modes which cannot accurately
5432 represent one plus the highest signed number of the same size, but
5433 not for binary modes. Consider, for instance conversion from SFmode
5436 The hot path through the code is dealing with inputs smaller than 2^63
5437 and doing just the conversion, so there is no bits to lose.
5439 In the other path we know the value is positive in the range 2^63..2^64-1
5440 inclusive. (as for other input overflow happens and result is undefined)
5441 So we know that the most important bit set in mantissa corresponds to
5442 2^63. The subtraction of 2^63 should not generate any rounding as it
5443 simply clears out that bit. The rest is trivial. */
5445 scalar_int_mode to_mode
;
5447 && is_a
<scalar_int_mode
> (GET_MODE (to
), &to_mode
)
5448 && HWI_COMPUTABLE_MODE_P (to_mode
))
5449 FOR_EACH_MODE_FROM (fmode_iter
, as_a
<scalar_mode
> (GET_MODE (from
)))
5451 scalar_mode fmode
= fmode_iter
.require ();
5452 if (CODE_FOR_nothing
!= can_fix_p (to_mode
, fmode
,
5454 && (!DECIMAL_FLOAT_MODE_P (fmode
)
5455 || (GET_MODE_BITSIZE (fmode
) > GET_MODE_PRECISION (to_mode
))))
5458 REAL_VALUE_TYPE offset
;
5460 rtx_code_label
*lab1
, *lab2
;
5463 bitsize
= GET_MODE_PRECISION (to_mode
);
5464 real_2expN (&offset
, bitsize
- 1, fmode
);
5465 limit
= const_double_from_real_value (offset
, fmode
);
5466 lab1
= gen_label_rtx ();
5467 lab2
= gen_label_rtx ();
5469 if (fmode
!= GET_MODE (from
))
5470 from
= convert_to_mode (fmode
, from
, 0);
5472 /* See if we need to do the subtraction. */
5473 do_pending_stack_adjust ();
5474 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
,
5475 GET_MODE (from
), 0, lab1
);
5477 /* If not, do the signed "fix" and branch around fixup code. */
5478 expand_fix (to
, from
, 0);
5479 emit_jump_insn (targetm
.gen_jump (lab2
));
5482 /* Otherwise, subtract 2**(N-1), convert to signed number,
5483 then add 2**(N-1). Do the addition using XOR since this
5484 will often generate better code. */
5486 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
5487 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
5488 expand_fix (to
, target
, 0);
5489 target
= expand_binop (to_mode
, xor_optab
, to
,
5491 (HOST_WIDE_INT_1
<< (bitsize
- 1),
5493 to
, 1, OPTAB_LIB_WIDEN
);
5496 emit_move_insn (to
, target
);
5500 if (optab_handler (mov_optab
, to_mode
) != CODE_FOR_nothing
)
5502 /* Make a place for a REG_NOTE and add it. */
5503 insn
= emit_move_insn (to
, to
);
5504 set_dst_reg_note (insn
, REG_EQUAL
,
5505 gen_rtx_fmt_e (UNSIGNED_FIX
, to_mode
,
5514 /* We can't do it with an insn, so use a library call. But first ensure
5515 that the mode of TO is at least as wide as SImode, since those are the
5516 only library calls we know about. */
5518 if (is_narrower_int_mode (GET_MODE (to
), SImode
))
5520 target
= gen_reg_rtx (SImode
);
5522 expand_fix (target
, from
, unsignedp
);
5530 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
5531 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5532 gcc_assert (libfunc
);
5536 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5537 GET_MODE (to
), from
, GET_MODE (from
));
5538 insns
= get_insns ();
5541 emit_libcall_block (insns
, target
, value
,
5542 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
5543 GET_MODE (to
), from
));
5548 if (GET_MODE (to
) == GET_MODE (target
))
5549 emit_move_insn (to
, target
);
5551 convert_move (to
, target
, 0);
5556 /* Promote integer arguments for a libcall if necessary.
5557 emit_library_call_value cannot do the promotion because it does not
5558 know if it should do a signed or unsigned promotion. This is because
5559 there are no tree types defined for libcalls. */
5562 prepare_libcall_arg (rtx arg
, int uintp
)
5564 scalar_int_mode mode
;
5565 machine_mode arg_mode
;
5566 if (is_a
<scalar_int_mode
> (GET_MODE (arg
), &mode
))
5568 /* If we need to promote the integer function argument we need to do
5569 it here instead of inside emit_library_call_value because in
5570 emit_library_call_value we don't know if we should do a signed or
5571 unsigned promotion. */
5574 arg_mode
= promote_function_mode (NULL_TREE
, mode
,
5575 &unsigned_p
, NULL_TREE
, 0);
5576 if (arg_mode
!= mode
)
5577 return convert_to_mode (arg_mode
, arg
, uintp
);
5582 /* Generate code to convert FROM or TO a fixed-point.
5583 If UINTP is true, either TO or FROM is an unsigned integer.
5584 If SATP is true, we need to saturate the result. */
5587 expand_fixed_convert (rtx to
, rtx from
, int uintp
, int satp
)
5589 machine_mode to_mode
= GET_MODE (to
);
5590 machine_mode from_mode
= GET_MODE (from
);
5592 enum rtx_code this_code
;
5593 enum insn_code code
;
5598 if (to_mode
== from_mode
)
5600 emit_move_insn (to
, from
);
5606 tab
= satp
? satfractuns_optab
: fractuns_optab
;
5607 this_code
= satp
? UNSIGNED_SAT_FRACT
: UNSIGNED_FRACT_CONVERT
;
5611 tab
= satp
? satfract_optab
: fract_optab
;
5612 this_code
= satp
? SAT_FRACT
: FRACT_CONVERT
;
5614 code
= convert_optab_handler (tab
, to_mode
, from_mode
);
5615 if (code
!= CODE_FOR_nothing
)
5617 emit_unop_insn (code
, to
, from
, this_code
);
5621 libfunc
= convert_optab_libfunc (tab
, to_mode
, from_mode
);
5622 gcc_assert (libfunc
);
5624 from
= prepare_libcall_arg (from
, uintp
);
5625 from_mode
= GET_MODE (from
);
5628 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, to_mode
,
5630 insns
= get_insns ();
5633 emit_libcall_block (insns
, to
, value
,
5634 gen_rtx_fmt_e (optab_to_code (tab
), to_mode
, from
));
5637 /* Generate code to convert FROM to fixed point and store in TO. FROM
5638 must be floating point, TO must be signed. Use the conversion optab
5639 TAB to do the conversion. */
5642 expand_sfix_optab (rtx to
, rtx from
, convert_optab tab
)
5644 enum insn_code icode
;
5646 machine_mode fmode
, imode
;
5648 /* We first try to find a pair of modes, one real and one integer, at
5649 least as wide as FROM and TO, respectively, in which we can open-code
5650 this conversion. If the integer mode is wider than the mode of TO,
5651 we can do the conversion either signed or unsigned. */
5653 FOR_EACH_MODE_FROM (fmode
, GET_MODE (from
))
5654 FOR_EACH_MODE_FROM (imode
, GET_MODE (to
))
5656 icode
= convert_optab_handler (tab
, imode
, fmode
);
5657 if (icode
!= CODE_FOR_nothing
)
5659 rtx_insn
*last
= get_last_insn ();
5660 if (fmode
!= GET_MODE (from
))
5661 from
= convert_to_mode (fmode
, from
, 0);
5663 if (imode
!= GET_MODE (to
))
5664 target
= gen_reg_rtx (imode
);
5666 if (!maybe_emit_unop_insn (icode
, target
, from
, UNKNOWN
))
5668 delete_insns_since (last
);
5672 convert_move (to
, target
, 0);
5680 /* Report whether we have an instruction to perform the operation
5681 specified by CODE on operands of mode MODE. */
5683 have_insn_for (enum rtx_code code
, machine_mode mode
)
5685 return (code_to_optab (code
)
5686 && (optab_handler (code_to_optab (code
), mode
)
5687 != CODE_FOR_nothing
));
5690 /* Print information about the current contents of the optabs on
5694 debug_optab_libfuncs (void)
5698 /* Dump the arithmetic optabs. */
5699 for (i
= FIRST_NORM_OPTAB
; i
<= LAST_NORMLIB_OPTAB
; ++i
)
5700 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5702 rtx l
= optab_libfunc ((optab
) i
, (machine_mode
) j
);
5705 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
5706 fprintf (stderr
, "%s\t%s:\t%s\n",
5707 GET_RTX_NAME (optab_to_code ((optab
) i
)),
5713 /* Dump the conversion optabs. */
5714 for (i
= FIRST_CONV_OPTAB
; i
<= LAST_CONVLIB_OPTAB
; ++i
)
5715 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5716 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
5718 rtx l
= convert_optab_libfunc ((optab
) i
, (machine_mode
) j
,
5722 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
5723 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
5724 GET_RTX_NAME (optab_to_code ((optab
) i
)),
5732 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5733 CODE. Return 0 on failure. */
5736 gen_cond_trap (enum rtx_code code
, rtx op1
, rtx op2
, rtx tcode
)
5738 machine_mode mode
= GET_MODE (op1
);
5739 enum insn_code icode
;
5743 if (mode
== VOIDmode
)
5746 icode
= optab_handler (ctrap_optab
, mode
);
5747 if (icode
== CODE_FOR_nothing
)
5750 /* Some targets only accept a zero trap code. */
5751 if (!insn_operand_matches (icode
, 3, tcode
))
5754 do_pending_stack_adjust ();
5756 prepare_cmp_insn (op1
, op2
, code
, NULL_RTX
, false, OPTAB_DIRECT
,
5761 insn
= GEN_FCN (icode
) (trap_rtx
, XEXP (trap_rtx
, 0), XEXP (trap_rtx
, 1),
5764 /* If that failed, then give up. */
5772 insn
= get_insns ();
5777 /* Return rtx code for TCODE or UNKNOWN. Use UNSIGNEDP to select signed
5778 or unsigned operation code. */
5781 get_rtx_code_1 (enum tree_code tcode
, bool unsignedp
)
5793 code
= unsignedp
? LTU
: LT
;
5796 code
= unsignedp
? LEU
: LE
;
5799 code
= unsignedp
? GTU
: GT
;
5802 code
= unsignedp
? GEU
: GE
;
5805 case UNORDERED_EXPR
:
5845 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5846 or unsigned operation code. */
5849 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5851 enum rtx_code code
= get_rtx_code_1 (tcode
, unsignedp
);
5852 gcc_assert (code
!= UNKNOWN
);
5856 /* Return a comparison rtx of mode CMP_MODE for COND. Use UNSIGNEDP to
5857 select signed or unsigned operators. OPNO holds the index of the
5858 first comparison operand for insn ICODE. Do not generate the
5859 compare instruction itself. */
5862 vector_compare_rtx (machine_mode cmp_mode
, enum tree_code tcode
,
5863 tree t_op0
, tree t_op1
, bool unsignedp
,
5864 enum insn_code icode
, unsigned int opno
)
5866 class expand_operand ops
[2];
5867 rtx rtx_op0
, rtx_op1
;
5868 machine_mode m0
, m1
;
5869 enum rtx_code rcode
= get_rtx_code (tcode
, unsignedp
);
5871 gcc_assert (TREE_CODE_CLASS (tcode
) == tcc_comparison
);
5873 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
5874 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
5875 cases, use the original mode. */
5876 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)),
5878 m0
= GET_MODE (rtx_op0
);
5880 m0
= TYPE_MODE (TREE_TYPE (t_op0
));
5882 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)),
5884 m1
= GET_MODE (rtx_op1
);
5886 m1
= TYPE_MODE (TREE_TYPE (t_op1
));
5888 create_input_operand (&ops
[0], rtx_op0
, m0
);
5889 create_input_operand (&ops
[1], rtx_op1
, m1
);
5890 if (!maybe_legitimize_operands (icode
, opno
, 2, ops
))
5892 return gen_rtx_fmt_ee (rcode
, cmp_mode
, ops
[0].value
, ops
[1].value
);
5895 /* Check if vec_perm mask SEL is a constant equivalent to a shift of
5896 the first vec_perm operand, assuming the second operand (for left shift
5897 first operand) is a constant vector of zeros. Return the shift distance
5898 in bits if so, or NULL_RTX if the vec_perm is not a shift. MODE is the
5899 mode of the value being shifted. SHIFT_OPTAB is vec_shr_optab for right
5900 shift or vec_shl_optab for left shift. */
5902 shift_amt_for_vec_perm_mask (machine_mode mode
, const vec_perm_indices
&sel
,
5905 unsigned int bitsize
= GET_MODE_UNIT_BITSIZE (mode
);
5906 poly_int64 first
= sel
[0];
5907 if (maybe_ge (sel
[0], GET_MODE_NUNITS (mode
)))
5910 if (shift_optab
== vec_shl_optab
)
5913 if (!GET_MODE_NUNITS (mode
).is_constant (&nelt
))
5915 unsigned firstidx
= 0;
5916 for (unsigned int i
= 0; i
< nelt
; i
++)
5918 if (known_eq (sel
[i
], nelt
))
5920 if (i
== 0 || firstidx
)
5925 ? maybe_ne (sel
[i
], nelt
+ i
- firstidx
)
5926 : maybe_ge (sel
[i
], nelt
))
5934 else if (!sel
.series_p (0, 1, first
, 1))
5937 if (!GET_MODE_NUNITS (mode
).is_constant (&nelt
))
5939 for (unsigned int i
= 1; i
< nelt
; i
++)
5941 poly_int64 expected
= i
+ first
;
5942 /* Indices into the second vector are all equivalent. */
5943 if (maybe_lt (sel
[i
], nelt
)
5944 ? maybe_ne (sel
[i
], expected
)
5945 : maybe_lt (expected
, nelt
))
5950 return gen_int_shift_amount (mode
, first
* bitsize
);
5953 /* A subroutine of expand_vec_perm_var for expanding one vec_perm insn. */
5956 expand_vec_perm_1 (enum insn_code icode
, rtx target
,
5957 rtx v0
, rtx v1
, rtx sel
)
5959 machine_mode tmode
= GET_MODE (target
);
5960 machine_mode smode
= GET_MODE (sel
);
5961 class expand_operand ops
[4];
5963 gcc_assert (GET_MODE_CLASS (smode
) == MODE_VECTOR_INT
5964 || related_int_vector_mode (tmode
).require () == smode
);
5965 create_output_operand (&ops
[0], target
, tmode
);
5966 create_input_operand (&ops
[3], sel
, smode
);
5968 /* Make an effort to preserve v0 == v1. The target expander is able to
5969 rely on this to determine if we're permuting a single input operand. */
5970 if (rtx_equal_p (v0
, v1
))
5972 if (!insn_operand_matches (icode
, 1, v0
))
5973 v0
= force_reg (tmode
, v0
);
5974 gcc_checking_assert (insn_operand_matches (icode
, 1, v0
));
5975 gcc_checking_assert (insn_operand_matches (icode
, 2, v0
));
5977 create_fixed_operand (&ops
[1], v0
);
5978 create_fixed_operand (&ops
[2], v0
);
5982 create_input_operand (&ops
[1], v0
, tmode
);
5983 create_input_operand (&ops
[2], v1
, tmode
);
5986 if (maybe_expand_insn (icode
, 4, ops
))
5987 return ops
[0].value
;
5991 /* Implement a permutation of vectors v0 and v1 using the permutation
5992 vector in SEL and return the result. Use TARGET to hold the result
5993 if nonnull and convenient.
5995 MODE is the mode of the vectors being permuted (V0 and V1). SEL_MODE
5996 is the TYPE_MODE associated with SEL, or BLKmode if SEL isn't known
5997 to have a particular mode. */
6000 expand_vec_perm_const (machine_mode mode
, rtx v0
, rtx v1
,
6001 const vec_perm_builder
&sel
, machine_mode sel_mode
,
6004 if (!target
|| !register_operand (target
, mode
))
6005 target
= gen_reg_rtx (mode
);
6007 /* Set QIMODE to a different vector mode with byte elements.
6008 If no such mode, or if MODE already has byte elements, use VOIDmode. */
6009 machine_mode qimode
;
6010 if (!qimode_for_vec_perm (mode
).exists (&qimode
))
6013 rtx_insn
*last
= get_last_insn ();
6015 bool single_arg_p
= rtx_equal_p (v0
, v1
);
6016 /* Always specify two input vectors here and leave the target to handle
6017 cases in which the inputs are equal. Not all backends can cope with
6018 the single-input representation when testing for a double-input
6019 target instruction. */
6020 vec_perm_indices
indices (sel
, 2, GET_MODE_NUNITS (mode
));
6022 /* See if this can be handled with a vec_shr or vec_shl. We only do this
6023 if the second (for vec_shr) or first (for vec_shl) vector is all
6025 insn_code shift_code
= CODE_FOR_nothing
;
6026 insn_code shift_code_qi
= CODE_FOR_nothing
;
6027 optab shift_optab
= unknown_optab
;
6029 if (v1
== CONST0_RTX (GET_MODE (v1
)))
6030 shift_optab
= vec_shr_optab
;
6031 else if (v0
== CONST0_RTX (GET_MODE (v0
)))
6033 shift_optab
= vec_shl_optab
;
6036 if (shift_optab
!= unknown_optab
)
6038 shift_code
= optab_handler (shift_optab
, mode
);
6039 shift_code_qi
= ((qimode
!= VOIDmode
&& qimode
!= mode
)
6040 ? optab_handler (shift_optab
, qimode
)
6041 : CODE_FOR_nothing
);
6043 if (shift_code
!= CODE_FOR_nothing
|| shift_code_qi
!= CODE_FOR_nothing
)
6045 rtx shift_amt
= shift_amt_for_vec_perm_mask (mode
, indices
, shift_optab
);
6048 class expand_operand ops
[3];
6049 if (shift_amt
== const0_rtx
)
6051 if (shift_code
!= CODE_FOR_nothing
)
6053 create_output_operand (&ops
[0], target
, mode
);
6054 create_input_operand (&ops
[1], v2
, mode
);
6055 create_convert_operand_from_type (&ops
[2], shift_amt
, sizetype
);
6056 if (maybe_expand_insn (shift_code
, 3, ops
))
6057 return ops
[0].value
;
6059 if (shift_code_qi
!= CODE_FOR_nothing
)
6061 rtx tmp
= gen_reg_rtx (qimode
);
6062 create_output_operand (&ops
[0], tmp
, qimode
);
6063 create_input_operand (&ops
[1], gen_lowpart (qimode
, v2
), qimode
);
6064 create_convert_operand_from_type (&ops
[2], shift_amt
, sizetype
);
6065 if (maybe_expand_insn (shift_code_qi
, 3, ops
))
6066 return gen_lowpart (mode
, ops
[0].value
);
6071 if (targetm
.vectorize
.vec_perm_const
!= NULL
)
6076 if (targetm
.vectorize
.vec_perm_const (mode
, target
, v0
, v1
, indices
))
6080 /* Fall back to a constant byte-based permutation. */
6081 vec_perm_indices qimode_indices
;
6082 rtx target_qi
= NULL_RTX
, v0_qi
= NULL_RTX
, v1_qi
= NULL_RTX
;
6083 if (qimode
!= VOIDmode
)
6085 qimode_indices
.new_expanded_vector (indices
, GET_MODE_UNIT_SIZE (mode
));
6086 target_qi
= gen_reg_rtx (qimode
);
6087 v0_qi
= gen_lowpart (qimode
, v0
);
6088 v1_qi
= gen_lowpart (qimode
, v1
);
6089 if (targetm
.vectorize
.vec_perm_const
!= NULL
6090 && targetm
.vectorize
.vec_perm_const (qimode
, target_qi
, v0_qi
,
6091 v1_qi
, qimode_indices
))
6092 return gen_lowpart (mode
, target_qi
);
6095 v0
= force_reg (mode
, v0
);
6098 v1
= force_reg (mode
, v1
);
6100 /* Otherwise expand as a fully variable permuation. */
6102 /* The optabs are only defined for selectors with the same width
6103 as the values being permuted. */
6104 machine_mode required_sel_mode
;
6105 if (!related_int_vector_mode (mode
).exists (&required_sel_mode
))
6107 delete_insns_since (last
);
6111 /* We know that it is semantically valid to treat SEL as having SEL_MODE.
6112 If that isn't the mode we want then we need to prove that using
6113 REQUIRED_SEL_MODE is OK. */
6114 if (sel_mode
!= required_sel_mode
)
6116 if (!selector_fits_mode_p (required_sel_mode
, indices
))
6118 delete_insns_since (last
);
6121 sel_mode
= required_sel_mode
;
6124 insn_code icode
= direct_optab_handler (vec_perm_optab
, mode
);
6125 if (icode
!= CODE_FOR_nothing
)
6127 rtx sel_rtx
= vec_perm_indices_to_rtx (sel_mode
, indices
);
6128 rtx tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel_rtx
);
6133 if (qimode
!= VOIDmode
6134 && selector_fits_mode_p (qimode
, qimode_indices
))
6136 icode
= direct_optab_handler (vec_perm_optab
, qimode
);
6137 if (icode
!= CODE_FOR_nothing
)
6139 rtx sel_qi
= vec_perm_indices_to_rtx (qimode
, qimode_indices
);
6140 rtx tmp
= expand_vec_perm_1 (icode
, target_qi
, v0_qi
, v1_qi
, sel_qi
);
6142 return gen_lowpart (mode
, tmp
);
6146 delete_insns_since (last
);
6150 /* Implement a permutation of vectors v0 and v1 using the permutation
6151 vector in SEL and return the result. Use TARGET to hold the result
6152 if nonnull and convenient.
6154 MODE is the mode of the vectors being permuted (V0 and V1).
6155 SEL must have the integer equivalent of MODE and is known to be
6156 unsuitable for permutes with a constant permutation vector. */
6159 expand_vec_perm_var (machine_mode mode
, rtx v0
, rtx v1
, rtx sel
, rtx target
)
6161 enum insn_code icode
;
6165 u
= GET_MODE_UNIT_SIZE (mode
);
6167 if (!target
|| GET_MODE (target
) != mode
)
6168 target
= gen_reg_rtx (mode
);
6170 icode
= direct_optab_handler (vec_perm_optab
, mode
);
6171 if (icode
!= CODE_FOR_nothing
)
6173 tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel
);
6178 /* As a special case to aid several targets, lower the element-based
6179 permutation to a byte-based permutation and try again. */
6180 machine_mode qimode
;
6181 if (!qimode_for_vec_perm (mode
).exists (&qimode
)
6182 || maybe_gt (GET_MODE_NUNITS (qimode
), GET_MODE_MASK (QImode
) + 1))
6184 icode
= direct_optab_handler (vec_perm_optab
, qimode
);
6185 if (icode
== CODE_FOR_nothing
)
6188 /* Multiply each element by its byte size. */
6189 machine_mode selmode
= GET_MODE (sel
);
6191 sel
= expand_simple_binop (selmode
, PLUS
, sel
, sel
,
6192 NULL
, 0, OPTAB_DIRECT
);
6194 sel
= expand_simple_binop (selmode
, ASHIFT
, sel
,
6195 gen_int_shift_amount (selmode
, exact_log2 (u
)),
6196 NULL
, 0, OPTAB_DIRECT
);
6197 gcc_assert (sel
!= NULL
);
6199 /* Broadcast the low byte each element into each of its bytes.
6200 The encoding has U interleaved stepped patterns, one for each
6201 byte of an element. */
6202 vec_perm_builder
const_sel (GET_MODE_SIZE (mode
), u
, 3);
6203 unsigned int low_byte_in_u
= BYTES_BIG_ENDIAN
? u
- 1 : 0;
6204 for (i
= 0; i
< 3; ++i
)
6205 for (unsigned int j
= 0; j
< u
; ++j
)
6206 const_sel
.quick_push (i
* u
+ low_byte_in_u
);
6207 sel
= gen_lowpart (qimode
, sel
);
6208 sel
= expand_vec_perm_const (qimode
, sel
, sel
, const_sel
, qimode
, NULL
);
6209 gcc_assert (sel
!= NULL
);
6211 /* Add the byte offset to each byte element. */
6212 /* Note that the definition of the indicies here is memory ordering,
6213 so there should be no difference between big and little endian. */
6214 rtx_vector_builder
byte_indices (qimode
, u
, 1);
6215 for (i
= 0; i
< u
; ++i
)
6216 byte_indices
.quick_push (GEN_INT (i
));
6217 tmp
= byte_indices
.build ();
6218 sel_qi
= expand_simple_binop (qimode
, PLUS
, sel
, tmp
,
6219 sel
, 0, OPTAB_DIRECT
);
6220 gcc_assert (sel_qi
!= NULL
);
6222 tmp
= mode
!= qimode
? gen_reg_rtx (qimode
) : target
;
6223 tmp
= expand_vec_perm_1 (icode
, tmp
, gen_lowpart (qimode
, v0
),
6224 gen_lowpart (qimode
, v1
), sel_qi
);
6226 tmp
= gen_lowpart (mode
, tmp
);
6230 /* Generate VEC_SERIES_EXPR <OP0, OP1>, returning a value of mode VMODE.
6231 Use TARGET for the result if nonnull and convenient. */
6234 expand_vec_series_expr (machine_mode vmode
, rtx op0
, rtx op1
, rtx target
)
6236 class expand_operand ops
[3];
6237 enum insn_code icode
;
6238 machine_mode emode
= GET_MODE_INNER (vmode
);
6240 icode
= direct_optab_handler (vec_series_optab
, vmode
);
6241 gcc_assert (icode
!= CODE_FOR_nothing
);
6243 create_output_operand (&ops
[0], target
, vmode
);
6244 create_input_operand (&ops
[1], op0
, emode
);
6245 create_input_operand (&ops
[2], op1
, emode
);
6247 expand_insn (icode
, 3, ops
);
6248 return ops
[0].value
;
6251 /* Generate insns for a vector comparison into a mask. */
6254 expand_vec_cmp_expr (tree type
, tree exp
, rtx target
)
6256 class expand_operand ops
[4];
6257 enum insn_code icode
;
6259 machine_mode mask_mode
= TYPE_MODE (type
);
6263 enum tree_code tcode
;
6265 op0a
= TREE_OPERAND (exp
, 0);
6266 op0b
= TREE_OPERAND (exp
, 1);
6267 tcode
= TREE_CODE (exp
);
6269 unsignedp
= TYPE_UNSIGNED (TREE_TYPE (op0a
));
6270 vmode
= TYPE_MODE (TREE_TYPE (op0a
));
6272 icode
= get_vec_cmp_icode (vmode
, mask_mode
, unsignedp
);
6273 if (icode
== CODE_FOR_nothing
)
6275 if (tcode
== EQ_EXPR
|| tcode
== NE_EXPR
)
6276 icode
= get_vec_cmp_eq_icode (vmode
, mask_mode
);
6277 if (icode
== CODE_FOR_nothing
)
6281 comparison
= vector_compare_rtx (mask_mode
, tcode
, op0a
, op0b
,
6282 unsignedp
, icode
, 2);
6283 create_output_operand (&ops
[0], target
, mask_mode
);
6284 create_fixed_operand (&ops
[1], comparison
);
6285 create_fixed_operand (&ops
[2], XEXP (comparison
, 0));
6286 create_fixed_operand (&ops
[3], XEXP (comparison
, 1));
6287 expand_insn (icode
, 4, ops
);
6288 return ops
[0].value
;
6291 /* Expand a highpart multiply. */
6294 expand_mult_highpart (machine_mode mode
, rtx op0
, rtx op1
,
6295 rtx target
, bool uns_p
)
6297 class expand_operand eops
[3];
6298 enum insn_code icode
;
6304 method
= can_mult_highpart_p (mode
, uns_p
);
6310 tab1
= uns_p
? umul_highpart_optab
: smul_highpart_optab
;
6311 return expand_binop (mode
, tab1
, op0
, op1
, target
, uns_p
,
6314 tab1
= uns_p
? vec_widen_umult_even_optab
: vec_widen_smult_even_optab
;
6315 tab2
= uns_p
? vec_widen_umult_odd_optab
: vec_widen_smult_odd_optab
;
6318 tab1
= uns_p
? vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
6319 tab2
= uns_p
? vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
6320 if (BYTES_BIG_ENDIAN
)
6321 std::swap (tab1
, tab2
);
6327 icode
= optab_handler (tab1
, mode
);
6328 wmode
= insn_data
[icode
].operand
[0].mode
;
6329 gcc_checking_assert (known_eq (2 * GET_MODE_NUNITS (wmode
),
6330 GET_MODE_NUNITS (mode
)));
6331 gcc_checking_assert (known_eq (GET_MODE_SIZE (wmode
), GET_MODE_SIZE (mode
)));
6333 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
6334 create_input_operand (&eops
[1], op0
, mode
);
6335 create_input_operand (&eops
[2], op1
, mode
);
6336 expand_insn (icode
, 3, eops
);
6337 m1
= gen_lowpart (mode
, eops
[0].value
);
6339 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
6340 create_input_operand (&eops
[1], op0
, mode
);
6341 create_input_operand (&eops
[2], op1
, mode
);
6342 expand_insn (optab_handler (tab2
, mode
), 3, eops
);
6343 m2
= gen_lowpart (mode
, eops
[0].value
);
6345 vec_perm_builder sel
;
6348 /* The encoding has 2 interleaved stepped patterns. */
6349 sel
.new_vector (GET_MODE_NUNITS (mode
), 2, 3);
6350 for (i
= 0; i
< 6; ++i
)
6351 sel
.quick_push (!BYTES_BIG_ENDIAN
+ (i
& ~1)
6352 + ((i
& 1) ? GET_MODE_NUNITS (mode
) : 0));
6356 /* The encoding has a single interleaved stepped pattern. */
6357 sel
.new_vector (GET_MODE_NUNITS (mode
), 1, 3);
6358 for (i
= 0; i
< 3; ++i
)
6359 sel
.quick_push (2 * i
+ (BYTES_BIG_ENDIAN
? 0 : 1));
6362 return expand_vec_perm_const (mode
, m1
, m2
, sel
, BLKmode
, target
);
6365 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
6369 find_cc_set (rtx x
, const_rtx pat
, void *data
)
6371 if (REG_P (x
) && GET_MODE_CLASS (GET_MODE (x
)) == MODE_CC
6372 && GET_CODE (pat
) == SET
)
6374 rtx
*p_cc_reg
= (rtx
*) data
;
6375 gcc_assert (!*p_cc_reg
);
6380 /* This is a helper function for the other atomic operations. This function
6381 emits a loop that contains SEQ that iterates until a compare-and-swap
6382 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6383 a set of instructions that takes a value from OLD_REG as an input and
6384 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6385 set to the current contents of MEM. After SEQ, a compare-and-swap will
6386 attempt to update MEM with NEW_REG. The function returns true when the
6387 loop was generated successfully. */
6390 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
6392 machine_mode mode
= GET_MODE (mem
);
6393 rtx_code_label
*label
;
6394 rtx cmp_reg
, success
, oldval
;
6396 /* The loop we want to generate looks like
6402 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
6406 Note that we only do the plain load from memory once. Subsequent
6407 iterations use the value loaded by the compare-and-swap pattern. */
6409 label
= gen_label_rtx ();
6410 cmp_reg
= gen_reg_rtx (mode
);
6412 emit_move_insn (cmp_reg
, mem
);
6414 emit_move_insn (old_reg
, cmp_reg
);
6420 if (!expand_atomic_compare_and_swap (&success
, &oldval
, mem
, old_reg
,
6421 new_reg
, false, MEMMODEL_SYNC_SEQ_CST
,
6425 if (oldval
!= cmp_reg
)
6426 emit_move_insn (cmp_reg
, oldval
);
6428 /* Mark this jump predicted not taken. */
6429 emit_cmp_and_jump_insns (success
, const0_rtx
, EQ
, const0_rtx
,
6430 GET_MODE (success
), 1, label
,
6431 profile_probability::guessed_never ());
6436 /* This function tries to emit an atomic_exchange intruction. VAL is written
6437 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
6438 using TARGET if possible. */
6441 maybe_emit_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
6443 machine_mode mode
= GET_MODE (mem
);
6444 enum insn_code icode
;
6446 /* If the target supports the exchange directly, great. */
6447 icode
= direct_optab_handler (atomic_exchange_optab
, mode
);
6448 if (icode
!= CODE_FOR_nothing
)
6450 class expand_operand ops
[4];
6452 create_output_operand (&ops
[0], target
, mode
);
6453 create_fixed_operand (&ops
[1], mem
);
6454 create_input_operand (&ops
[2], val
, mode
);
6455 create_integer_operand (&ops
[3], model
);
6456 if (maybe_expand_insn (icode
, 4, ops
))
6457 return ops
[0].value
;
6463 /* This function tries to implement an atomic exchange operation using
6464 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
6465 The previous contents of *MEM are returned, using TARGET if possible.
6466 Since this instructionn is an acquire barrier only, stronger memory
6467 models may require additional barriers to be emitted. */
6470 maybe_emit_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
,
6471 enum memmodel model
)
6473 machine_mode mode
= GET_MODE (mem
);
6474 enum insn_code icode
;
6475 rtx_insn
*last_insn
= get_last_insn ();
6477 icode
= optab_handler (sync_lock_test_and_set_optab
, mode
);
6479 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
6480 exists, and the memory model is stronger than acquire, add a release
6481 barrier before the instruction. */
6483 if (is_mm_seq_cst (model
) || is_mm_release (model
) || is_mm_acq_rel (model
))
6484 expand_mem_thread_fence (model
);
6486 if (icode
!= CODE_FOR_nothing
)
6488 class expand_operand ops
[3];
6489 create_output_operand (&ops
[0], target
, mode
);
6490 create_fixed_operand (&ops
[1], mem
);
6491 create_input_operand (&ops
[2], val
, mode
);
6492 if (maybe_expand_insn (icode
, 3, ops
))
6493 return ops
[0].value
;
6496 /* If an external test-and-set libcall is provided, use that instead of
6497 any external compare-and-swap that we might get from the compare-and-
6498 swap-loop expansion later. */
6499 if (!can_compare_and_swap_p (mode
, false))
6501 rtx libfunc
= optab_libfunc (sync_lock_test_and_set_optab
, mode
);
6502 if (libfunc
!= NULL
)
6506 addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
6507 return emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
6508 mode
, addr
, ptr_mode
,
6513 /* If the test_and_set can't be emitted, eliminate any barrier that might
6514 have been emitted. */
6515 delete_insns_since (last_insn
);
6519 /* This function tries to implement an atomic exchange operation using a
6520 compare_and_swap loop. VAL is written to *MEM. The previous contents of
6521 *MEM are returned, using TARGET if possible. No memory model is required
6522 since a compare_and_swap loop is seq-cst. */
6525 maybe_emit_compare_and_swap_exchange_loop (rtx target
, rtx mem
, rtx val
)
6527 machine_mode mode
= GET_MODE (mem
);
6529 if (can_compare_and_swap_p (mode
, true))
6531 if (!target
|| !register_operand (target
, mode
))
6532 target
= gen_reg_rtx (mode
);
6533 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
6540 /* This function tries to implement an atomic test-and-set operation
6541 using the atomic_test_and_set instruction pattern. A boolean value
6542 is returned from the operation, using TARGET if possible. */
6545 maybe_emit_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
6547 machine_mode pat_bool_mode
;
6548 class expand_operand ops
[3];
6550 if (!targetm
.have_atomic_test_and_set ())
6553 /* While we always get QImode from __atomic_test_and_set, we get
6554 other memory modes from __sync_lock_test_and_set. Note that we
6555 use no endian adjustment here. This matches the 4.6 behavior
6556 in the Sparc backend. */
6557 enum insn_code icode
= targetm
.code_for_atomic_test_and_set
;
6558 gcc_checking_assert (insn_data
[icode
].operand
[1].mode
== QImode
);
6559 if (GET_MODE (mem
) != QImode
)
6560 mem
= adjust_address_nv (mem
, QImode
, 0);
6562 pat_bool_mode
= insn_data
[icode
].operand
[0].mode
;
6563 create_output_operand (&ops
[0], target
, pat_bool_mode
);
6564 create_fixed_operand (&ops
[1], mem
);
6565 create_integer_operand (&ops
[2], model
);
6567 if (maybe_expand_insn (icode
, 3, ops
))
6568 return ops
[0].value
;
6572 /* This function expands the legacy _sync_lock test_and_set operation which is
6573 generally an atomic exchange. Some limited targets only allow the
6574 constant 1 to be stored. This is an ACQUIRE operation.
6576 TARGET is an optional place to stick the return value.
6577 MEM is where VAL is stored. */
6580 expand_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
)
6584 /* Try an atomic_exchange first. */
6585 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, MEMMODEL_SYNC_ACQUIRE
);
6589 ret
= maybe_emit_sync_lock_test_and_set (target
, mem
, val
,
6590 MEMMODEL_SYNC_ACQUIRE
);
6594 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
6598 /* If there are no other options, try atomic_test_and_set if the value
6599 being stored is 1. */
6600 if (val
== const1_rtx
)
6601 ret
= maybe_emit_atomic_test_and_set (target
, mem
, MEMMODEL_SYNC_ACQUIRE
);
6606 /* This function expands the atomic test_and_set operation:
6607 atomically store a boolean TRUE into MEM and return the previous value.
6609 MEMMODEL is the memory model variant to use.
6610 TARGET is an optional place to stick the return value. */
6613 expand_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
6615 machine_mode mode
= GET_MODE (mem
);
6616 rtx ret
, trueval
, subtarget
;
6618 ret
= maybe_emit_atomic_test_and_set (target
, mem
, model
);
6622 /* Be binary compatible with non-default settings of trueval, and different
6623 cpu revisions. E.g. one revision may have atomic-test-and-set, but
6624 another only has atomic-exchange. */
6625 if (targetm
.atomic_test_and_set_trueval
== 1)
6627 trueval
= const1_rtx
;
6628 subtarget
= target
? target
: gen_reg_rtx (mode
);
6632 trueval
= gen_int_mode (targetm
.atomic_test_and_set_trueval
, mode
);
6633 subtarget
= gen_reg_rtx (mode
);
6636 /* Try the atomic-exchange optab... */
6637 ret
= maybe_emit_atomic_exchange (subtarget
, mem
, trueval
, model
);
6639 /* ... then an atomic-compare-and-swap loop ... */
6641 ret
= maybe_emit_compare_and_swap_exchange_loop (subtarget
, mem
, trueval
);
6643 /* ... before trying the vaguely defined legacy lock_test_and_set. */
6645 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, trueval
, model
);
6647 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
6648 things with the value 1. Thus we try again without trueval. */
6649 if (!ret
&& targetm
.atomic_test_and_set_trueval
!= 1)
6650 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, const1_rtx
, model
);
6652 /* Failing all else, assume a single threaded environment and simply
6653 perform the operation. */
6656 /* If the result is ignored skip the move to target. */
6657 if (subtarget
!= const0_rtx
)
6658 emit_move_insn (subtarget
, mem
);
6660 emit_move_insn (mem
, trueval
);
6664 /* Recall that have to return a boolean value; rectify if trueval
6665 is not exactly one. */
6666 if (targetm
.atomic_test_and_set_trueval
!= 1)
6667 ret
= emit_store_flag_force (target
, NE
, ret
, const0_rtx
, mode
, 0, 1);
6672 /* This function expands the atomic exchange operation:
6673 atomically store VAL in MEM and return the previous value in MEM.
6675 MEMMODEL is the memory model variant to use.
6676 TARGET is an optional place to stick the return value. */
6679 expand_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
6681 machine_mode mode
= GET_MODE (mem
);
6684 /* If loads are not atomic for the required size and we are not called to
6685 provide a __sync builtin, do not do anything so that we stay consistent
6686 with atomic loads of the same size. */
6687 if (!can_atomic_load_p (mode
) && !is_mm_sync (model
))
6690 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6692 /* Next try a compare-and-swap loop for the exchange. */
6694 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
6699 /* This function expands the atomic compare exchange operation:
6701 *PTARGET_BOOL is an optional place to store the boolean success/failure.
6702 *PTARGET_OVAL is an optional place to store the old value from memory.
6703 Both target parameters may be NULL or const0_rtx to indicate that we do
6704 not care about that return value. Both target parameters are updated on
6705 success to the actual location of the corresponding result.
6707 MEMMODEL is the memory model variant to use.
6709 The return value of the function is true for success. */
6712 expand_atomic_compare_and_swap (rtx
*ptarget_bool
, rtx
*ptarget_oval
,
6713 rtx mem
, rtx expected
, rtx desired
,
6714 bool is_weak
, enum memmodel succ_model
,
6715 enum memmodel fail_model
)
6717 machine_mode mode
= GET_MODE (mem
);
6718 class expand_operand ops
[8];
6719 enum insn_code icode
;
6720 rtx target_oval
, target_bool
= NULL_RTX
;
6723 /* If loads are not atomic for the required size and we are not called to
6724 provide a __sync builtin, do not do anything so that we stay consistent
6725 with atomic loads of the same size. */
6726 if (!can_atomic_load_p (mode
) && !is_mm_sync (succ_model
))
6729 /* Load expected into a register for the compare and swap. */
6730 if (MEM_P (expected
))
6731 expected
= copy_to_reg (expected
);
6733 /* Make sure we always have some place to put the return oldval.
6734 Further, make sure that place is distinct from the input expected,
6735 just in case we need that path down below. */
6736 if (ptarget_oval
&& *ptarget_oval
== const0_rtx
)
6737 ptarget_oval
= NULL
;
6739 if (ptarget_oval
== NULL
6740 || (target_oval
= *ptarget_oval
) == NULL
6741 || reg_overlap_mentioned_p (expected
, target_oval
))
6742 target_oval
= gen_reg_rtx (mode
);
6744 icode
= direct_optab_handler (atomic_compare_and_swap_optab
, mode
);
6745 if (icode
!= CODE_FOR_nothing
)
6747 machine_mode bool_mode
= insn_data
[icode
].operand
[0].mode
;
6749 if (ptarget_bool
&& *ptarget_bool
== const0_rtx
)
6750 ptarget_bool
= NULL
;
6752 /* Make sure we always have a place for the bool operand. */
6753 if (ptarget_bool
== NULL
6754 || (target_bool
= *ptarget_bool
) == NULL
6755 || GET_MODE (target_bool
) != bool_mode
)
6756 target_bool
= gen_reg_rtx (bool_mode
);
6758 /* Emit the compare_and_swap. */
6759 create_output_operand (&ops
[0], target_bool
, bool_mode
);
6760 create_output_operand (&ops
[1], target_oval
, mode
);
6761 create_fixed_operand (&ops
[2], mem
);
6762 create_input_operand (&ops
[3], expected
, mode
);
6763 create_input_operand (&ops
[4], desired
, mode
);
6764 create_integer_operand (&ops
[5], is_weak
);
6765 create_integer_operand (&ops
[6], succ_model
);
6766 create_integer_operand (&ops
[7], fail_model
);
6767 if (maybe_expand_insn (icode
, 8, ops
))
6769 /* Return success/failure. */
6770 target_bool
= ops
[0].value
;
6771 target_oval
= ops
[1].value
;
6776 /* Otherwise fall back to the original __sync_val_compare_and_swap
6777 which is always seq-cst. */
6778 icode
= optab_handler (sync_compare_and_swap_optab
, mode
);
6779 if (icode
!= CODE_FOR_nothing
)
6783 create_output_operand (&ops
[0], target_oval
, mode
);
6784 create_fixed_operand (&ops
[1], mem
);
6785 create_input_operand (&ops
[2], expected
, mode
);
6786 create_input_operand (&ops
[3], desired
, mode
);
6787 if (!maybe_expand_insn (icode
, 4, ops
))
6790 target_oval
= ops
[0].value
;
6792 /* If the caller isn't interested in the boolean return value,
6793 skip the computation of it. */
6794 if (ptarget_bool
== NULL
)
6797 /* Otherwise, work out if the compare-and-swap succeeded. */
6799 if (have_insn_for (COMPARE
, CCmode
))
6800 note_stores (get_last_insn (), find_cc_set
, &cc_reg
);
6803 target_bool
= emit_store_flag_force (target_bool
, EQ
, cc_reg
,
6804 const0_rtx
, VOIDmode
, 0, 1);
6807 goto success_bool_from_val
;
6810 /* Also check for library support for __sync_val_compare_and_swap. */
6811 libfunc
= optab_libfunc (sync_compare_and_swap_optab
, mode
);
6812 if (libfunc
!= NULL
)
6814 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
6815 rtx target
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
6816 mode
, addr
, ptr_mode
,
6817 expected
, mode
, desired
, mode
);
6818 emit_move_insn (target_oval
, target
);
6820 /* Compute the boolean return value only if requested. */
6822 goto success_bool_from_val
;
6830 success_bool_from_val
:
6831 target_bool
= emit_store_flag_force (target_bool
, EQ
, target_oval
,
6832 expected
, VOIDmode
, 1, 1);
6834 /* Make sure that the oval output winds up where the caller asked. */
6836 *ptarget_oval
= target_oval
;
6838 *ptarget_bool
= target_bool
;
6842 /* Generate asm volatile("" : : : "memory") as the memory blockage. */
6845 expand_asm_memory_blockage (void)
6849 asm_op
= gen_rtx_ASM_OPERANDS (VOIDmode
, "", "", 0,
6850 rtvec_alloc (0), rtvec_alloc (0),
6851 rtvec_alloc (0), UNKNOWN_LOCATION
);
6852 MEM_VOLATILE_P (asm_op
) = 1;
6854 clob
= gen_rtx_SCRATCH (VOIDmode
);
6855 clob
= gen_rtx_MEM (BLKmode
, clob
);
6856 clob
= gen_rtx_CLOBBER (VOIDmode
, clob
);
6858 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, asm_op
, clob
)));
6861 /* Do not propagate memory accesses across this point. */
6864 expand_memory_blockage (void)
6866 if (targetm
.have_memory_blockage ())
6867 emit_insn (targetm
.gen_memory_blockage ());
6869 expand_asm_memory_blockage ();
6872 /* Generate asm volatile("" : : : "memory") as a memory blockage, at the
6873 same time clobbering the register set specified by REGS. */
6876 expand_asm_reg_clobber_mem_blockage (HARD_REG_SET regs
)
6878 rtx asm_op
, clob_mem
;
6880 unsigned int num_of_regs
= 0;
6881 for (unsigned int i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
6882 if (TEST_HARD_REG_BIT (regs
, i
))
6885 asm_op
= gen_rtx_ASM_OPERANDS (VOIDmode
, "", "", 0,
6886 rtvec_alloc (0), rtvec_alloc (0),
6887 rtvec_alloc (0), UNKNOWN_LOCATION
);
6888 MEM_VOLATILE_P (asm_op
) = 1;
6890 rtvec v
= rtvec_alloc (num_of_regs
+ 2);
6892 clob_mem
= gen_rtx_SCRATCH (VOIDmode
);
6893 clob_mem
= gen_rtx_MEM (BLKmode
, clob_mem
);
6894 clob_mem
= gen_rtx_CLOBBER (VOIDmode
, clob_mem
);
6896 RTVEC_ELT (v
, 0) = asm_op
;
6897 RTVEC_ELT (v
, 1) = clob_mem
;
6899 if (num_of_regs
> 0)
6902 for (unsigned int i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
6903 if (TEST_HARD_REG_BIT (regs
, i
))
6905 RTVEC_ELT (v
, j
) = gen_rtx_CLOBBER (VOIDmode
, regno_reg_rtx
[i
]);
6908 gcc_assert (j
== (num_of_regs
+ 2));
6911 emit_insn (gen_rtx_PARALLEL (VOIDmode
, v
));
6914 /* This routine will either emit the mem_thread_fence pattern or issue a
6915 sync_synchronize to generate a fence for memory model MEMMODEL. */
6918 expand_mem_thread_fence (enum memmodel model
)
6920 if (is_mm_relaxed (model
))
6922 if (targetm
.have_mem_thread_fence ())
6924 emit_insn (targetm
.gen_mem_thread_fence (GEN_INT (model
)));
6925 expand_memory_blockage ();
6927 else if (targetm
.have_memory_barrier ())
6928 emit_insn (targetm
.gen_memory_barrier ());
6929 else if (synchronize_libfunc
!= NULL_RTX
)
6930 emit_library_call (synchronize_libfunc
, LCT_NORMAL
, VOIDmode
);
6932 expand_memory_blockage ();
6935 /* Emit a signal fence with given memory model. */
6938 expand_mem_signal_fence (enum memmodel model
)
6940 /* No machine barrier is required to implement a signal fence, but
6941 a compiler memory barrier must be issued, except for relaxed MM. */
6942 if (!is_mm_relaxed (model
))
6943 expand_memory_blockage ();
6946 /* This function expands the atomic load operation:
6947 return the atomically loaded value in MEM.
6949 MEMMODEL is the memory model variant to use.
6950 TARGET is an option place to stick the return value. */
6953 expand_atomic_load (rtx target
, rtx mem
, enum memmodel model
)
6955 machine_mode mode
= GET_MODE (mem
);
6956 enum insn_code icode
;
6958 /* If the target supports the load directly, great. */
6959 icode
= direct_optab_handler (atomic_load_optab
, mode
);
6960 if (icode
!= CODE_FOR_nothing
)
6962 class expand_operand ops
[3];
6963 rtx_insn
*last
= get_last_insn ();
6964 if (is_mm_seq_cst (model
))
6965 expand_memory_blockage ();
6967 create_output_operand (&ops
[0], target
, mode
);
6968 create_fixed_operand (&ops
[1], mem
);
6969 create_integer_operand (&ops
[2], model
);
6970 if (maybe_expand_insn (icode
, 3, ops
))
6972 if (!is_mm_relaxed (model
))
6973 expand_memory_blockage ();
6974 return ops
[0].value
;
6976 delete_insns_since (last
);
6979 /* If the size of the object is greater than word size on this target,
6980 then we assume that a load will not be atomic. We could try to
6981 emulate a load with a compare-and-swap operation, but the store that
6982 doing this could result in would be incorrect if this is a volatile
6983 atomic load or targetting read-only-mapped memory. */
6984 if (maybe_gt (GET_MODE_PRECISION (mode
), BITS_PER_WORD
))
6985 /* If there is no atomic load, leave the library call. */
6988 /* Otherwise assume loads are atomic, and emit the proper barriers. */
6989 if (!target
|| target
== const0_rtx
)
6990 target
= gen_reg_rtx (mode
);
6992 /* For SEQ_CST, emit a barrier before the load. */
6993 if (is_mm_seq_cst (model
))
6994 expand_mem_thread_fence (model
);
6996 emit_move_insn (target
, mem
);
6998 /* Emit the appropriate barrier after the load. */
6999 expand_mem_thread_fence (model
);
7004 /* This function expands the atomic store operation:
7005 Atomically store VAL in MEM.
7006 MEMMODEL is the memory model variant to use.
7007 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
7008 function returns const0_rtx if a pattern was emitted. */
7011 expand_atomic_store (rtx mem
, rtx val
, enum memmodel model
, bool use_release
)
7013 machine_mode mode
= GET_MODE (mem
);
7014 enum insn_code icode
;
7015 class expand_operand ops
[3];
7017 /* If the target supports the store directly, great. */
7018 icode
= direct_optab_handler (atomic_store_optab
, mode
);
7019 if (icode
!= CODE_FOR_nothing
)
7021 rtx_insn
*last
= get_last_insn ();
7022 if (!is_mm_relaxed (model
))
7023 expand_memory_blockage ();
7024 create_fixed_operand (&ops
[0], mem
);
7025 create_input_operand (&ops
[1], val
, mode
);
7026 create_integer_operand (&ops
[2], model
);
7027 if (maybe_expand_insn (icode
, 3, ops
))
7029 if (is_mm_seq_cst (model
))
7030 expand_memory_blockage ();
7033 delete_insns_since (last
);
7036 /* If using __sync_lock_release is a viable alternative, try it.
7037 Note that this will not be set to true if we are expanding a generic
7038 __atomic_store_n. */
7041 icode
= direct_optab_handler (sync_lock_release_optab
, mode
);
7042 if (icode
!= CODE_FOR_nothing
)
7044 create_fixed_operand (&ops
[0], mem
);
7045 create_input_operand (&ops
[1], const0_rtx
, mode
);
7046 if (maybe_expand_insn (icode
, 2, ops
))
7048 /* lock_release is only a release barrier. */
7049 if (is_mm_seq_cst (model
))
7050 expand_mem_thread_fence (model
);
7056 /* If the size of the object is greater than word size on this target,
7057 a default store will not be atomic. */
7058 if (maybe_gt (GET_MODE_PRECISION (mode
), BITS_PER_WORD
))
7060 /* If loads are atomic or we are called to provide a __sync builtin,
7061 we can try a atomic_exchange and throw away the result. Otherwise,
7062 don't do anything so that we do not create an inconsistency between
7063 loads and stores. */
7064 if (can_atomic_load_p (mode
) || is_mm_sync (model
))
7066 rtx target
= maybe_emit_atomic_exchange (NULL_RTX
, mem
, val
, model
);
7068 target
= maybe_emit_compare_and_swap_exchange_loop (NULL_RTX
, mem
,
7076 /* Otherwise assume stores are atomic, and emit the proper barriers. */
7077 expand_mem_thread_fence (model
);
7079 emit_move_insn (mem
, val
);
7081 /* For SEQ_CST, also emit a barrier after the store. */
7082 if (is_mm_seq_cst (model
))
7083 expand_mem_thread_fence (model
);
7089 /* Structure containing the pointers and values required to process the
7090 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
7092 struct atomic_op_functions
7094 direct_optab mem_fetch_before
;
7095 direct_optab mem_fetch_after
;
7096 direct_optab mem_no_result
;
7099 direct_optab no_result
;
7100 enum rtx_code reverse_code
;
7104 /* Fill in structure pointed to by OP with the various optab entries for an
7105 operation of type CODE. */
7108 get_atomic_op_for_code (struct atomic_op_functions
*op
, enum rtx_code code
)
7110 gcc_assert (op
!= NULL
);
7112 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
7113 in the source code during compilation, and the optab entries are not
7114 computable until runtime. Fill in the values at runtime. */
7118 op
->mem_fetch_before
= atomic_fetch_add_optab
;
7119 op
->mem_fetch_after
= atomic_add_fetch_optab
;
7120 op
->mem_no_result
= atomic_add_optab
;
7121 op
->fetch_before
= sync_old_add_optab
;
7122 op
->fetch_after
= sync_new_add_optab
;
7123 op
->no_result
= sync_add_optab
;
7124 op
->reverse_code
= MINUS
;
7127 op
->mem_fetch_before
= atomic_fetch_sub_optab
;
7128 op
->mem_fetch_after
= atomic_sub_fetch_optab
;
7129 op
->mem_no_result
= atomic_sub_optab
;
7130 op
->fetch_before
= sync_old_sub_optab
;
7131 op
->fetch_after
= sync_new_sub_optab
;
7132 op
->no_result
= sync_sub_optab
;
7133 op
->reverse_code
= PLUS
;
7136 op
->mem_fetch_before
= atomic_fetch_xor_optab
;
7137 op
->mem_fetch_after
= atomic_xor_fetch_optab
;
7138 op
->mem_no_result
= atomic_xor_optab
;
7139 op
->fetch_before
= sync_old_xor_optab
;
7140 op
->fetch_after
= sync_new_xor_optab
;
7141 op
->no_result
= sync_xor_optab
;
7142 op
->reverse_code
= XOR
;
7145 op
->mem_fetch_before
= atomic_fetch_and_optab
;
7146 op
->mem_fetch_after
= atomic_and_fetch_optab
;
7147 op
->mem_no_result
= atomic_and_optab
;
7148 op
->fetch_before
= sync_old_and_optab
;
7149 op
->fetch_after
= sync_new_and_optab
;
7150 op
->no_result
= sync_and_optab
;
7151 op
->reverse_code
= UNKNOWN
;
7154 op
->mem_fetch_before
= atomic_fetch_or_optab
;
7155 op
->mem_fetch_after
= atomic_or_fetch_optab
;
7156 op
->mem_no_result
= atomic_or_optab
;
7157 op
->fetch_before
= sync_old_ior_optab
;
7158 op
->fetch_after
= sync_new_ior_optab
;
7159 op
->no_result
= sync_ior_optab
;
7160 op
->reverse_code
= UNKNOWN
;
7163 op
->mem_fetch_before
= atomic_fetch_nand_optab
;
7164 op
->mem_fetch_after
= atomic_nand_fetch_optab
;
7165 op
->mem_no_result
= atomic_nand_optab
;
7166 op
->fetch_before
= sync_old_nand_optab
;
7167 op
->fetch_after
= sync_new_nand_optab
;
7168 op
->no_result
= sync_nand_optab
;
7169 op
->reverse_code
= UNKNOWN
;
7176 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
7177 using memory order MODEL. If AFTER is true the operation needs to return
7178 the value of *MEM after the operation, otherwise the previous value.
7179 TARGET is an optional place to place the result. The result is unused if
7181 Return the result if there is a better sequence, otherwise NULL_RTX. */
7184 maybe_optimize_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
7185 enum memmodel model
, bool after
)
7187 /* If the value is prefetched, or not used, it may be possible to replace
7188 the sequence with a native exchange operation. */
7189 if (!after
|| target
== const0_rtx
)
7191 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
7192 if (code
== AND
&& val
== const0_rtx
)
7194 if (target
== const0_rtx
)
7195 target
= gen_reg_rtx (GET_MODE (mem
));
7196 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
7199 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
7200 if (code
== IOR
&& val
== constm1_rtx
)
7202 if (target
== const0_rtx
)
7203 target
= gen_reg_rtx (GET_MODE (mem
));
7204 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
7211 /* Try to emit an instruction for a specific operation varaition.
7212 OPTAB contains the OP functions.
7213 TARGET is an optional place to return the result. const0_rtx means unused.
7214 MEM is the memory location to operate on.
7215 VAL is the value to use in the operation.
7216 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
7217 MODEL is the memory model, if used.
7218 AFTER is true if the returned result is the value after the operation. */
7221 maybe_emit_op (const struct atomic_op_functions
*optab
, rtx target
, rtx mem
,
7222 rtx val
, bool use_memmodel
, enum memmodel model
, bool after
)
7224 machine_mode mode
= GET_MODE (mem
);
7225 class expand_operand ops
[4];
7226 enum insn_code icode
;
7230 /* Check to see if there is a result returned. */
7231 if (target
== const0_rtx
)
7235 icode
= direct_optab_handler (optab
->mem_no_result
, mode
);
7236 create_integer_operand (&ops
[2], model
);
7241 icode
= direct_optab_handler (optab
->no_result
, mode
);
7245 /* Otherwise, we need to generate a result. */
7250 icode
= direct_optab_handler (after
? optab
->mem_fetch_after
7251 : optab
->mem_fetch_before
, mode
);
7252 create_integer_operand (&ops
[3], model
);
7257 icode
= optab_handler (after
? optab
->fetch_after
7258 : optab
->fetch_before
, mode
);
7261 create_output_operand (&ops
[op_counter
++], target
, mode
);
7263 if (icode
== CODE_FOR_nothing
)
7266 create_fixed_operand (&ops
[op_counter
++], mem
);
7267 /* VAL may have been promoted to a wider mode. Shrink it if so. */
7268 create_convert_operand_to (&ops
[op_counter
++], val
, mode
, true);
7270 if (maybe_expand_insn (icode
, num_ops
, ops
))
7271 return (target
== const0_rtx
? const0_rtx
: ops
[0].value
);
7277 /* This function expands an atomic fetch_OP or OP_fetch operation:
7278 TARGET is an option place to stick the return value. const0_rtx indicates
7279 the result is unused.
7280 atomically fetch MEM, perform the operation with VAL and return it to MEM.
7281 CODE is the operation being performed (OP)
7282 MEMMODEL is the memory model variant to use.
7283 AFTER is true to return the result of the operation (OP_fetch).
7284 AFTER is false to return the value before the operation (fetch_OP).
7286 This function will *only* generate instructions if there is a direct
7287 optab. No compare and swap loops or libcalls will be generated. */
7290 expand_atomic_fetch_op_no_fallback (rtx target
, rtx mem
, rtx val
,
7291 enum rtx_code code
, enum memmodel model
,
7294 machine_mode mode
= GET_MODE (mem
);
7295 struct atomic_op_functions optab
;
7297 bool unused_result
= (target
== const0_rtx
);
7299 get_atomic_op_for_code (&optab
, code
);
7301 /* Check to see if there are any better instructions. */
7302 result
= maybe_optimize_fetch_op (target
, mem
, val
, code
, model
, after
);
7306 /* Check for the case where the result isn't used and try those patterns. */
7309 /* Try the memory model variant first. */
7310 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, true);
7314 /* Next try the old style withuot a memory model. */
7315 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, true);
7319 /* There is no no-result pattern, so try patterns with a result. */
7323 /* Try the __atomic version. */
7324 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, after
);
7328 /* Try the older __sync version. */
7329 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, after
);
7333 /* If the fetch value can be calculated from the other variation of fetch,
7334 try that operation. */
7335 if (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
)
7337 /* Try the __atomic version, then the older __sync version. */
7338 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, !after
);
7340 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, !after
);
7344 /* If the result isn't used, no need to do compensation code. */
7348 /* Issue compensation code. Fetch_after == fetch_before OP val.
7349 Fetch_before == after REVERSE_OP val. */
7351 code
= optab
.reverse_code
;
7354 result
= expand_simple_binop (mode
, AND
, result
, val
, NULL_RTX
,
7355 true, OPTAB_LIB_WIDEN
);
7356 result
= expand_simple_unop (mode
, NOT
, result
, target
, true);
7359 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
7360 true, OPTAB_LIB_WIDEN
);
7365 /* No direct opcode can be generated. */
7371 /* This function expands an atomic fetch_OP or OP_fetch operation:
7372 TARGET is an option place to stick the return value. const0_rtx indicates
7373 the result is unused.
7374 atomically fetch MEM, perform the operation with VAL and return it to MEM.
7375 CODE is the operation being performed (OP)
7376 MEMMODEL is the memory model variant to use.
7377 AFTER is true to return the result of the operation (OP_fetch).
7378 AFTER is false to return the value before the operation (fetch_OP). */
7380 expand_atomic_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
7381 enum memmodel model
, bool after
)
7383 machine_mode mode
= GET_MODE (mem
);
7385 bool unused_result
= (target
== const0_rtx
);
7387 /* If loads are not atomic for the required size and we are not called to
7388 provide a __sync builtin, do not do anything so that we stay consistent
7389 with atomic loads of the same size. */
7390 if (!can_atomic_load_p (mode
) && !is_mm_sync (model
))
7393 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, val
, code
, model
,
7399 /* Add/sub can be implemented by doing the reverse operation with -(val). */
7400 if (code
== PLUS
|| code
== MINUS
)
7403 enum rtx_code reverse
= (code
== PLUS
? MINUS
: PLUS
);
7406 tmp
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, true);
7407 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, tmp
, reverse
,
7411 /* PLUS worked so emit the insns and return. */
7418 /* PLUS did not work, so throw away the negation code and continue. */
7422 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
7423 if (!can_compare_and_swap_p (mode
, false))
7427 enum rtx_code orig_code
= code
;
7428 struct atomic_op_functions optab
;
7430 get_atomic_op_for_code (&optab
, code
);
7431 libfunc
= optab_libfunc (after
? optab
.fetch_after
7432 : optab
.fetch_before
, mode
);
7434 && (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
))
7438 code
= optab
.reverse_code
;
7439 libfunc
= optab_libfunc (after
? optab
.fetch_before
7440 : optab
.fetch_after
, mode
);
7442 if (libfunc
!= NULL
)
7444 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
7445 result
= emit_library_call_value (libfunc
, NULL
, LCT_NORMAL
, mode
,
7446 addr
, ptr_mode
, val
, mode
);
7448 if (!unused_result
&& fixup
)
7449 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
7450 true, OPTAB_LIB_WIDEN
);
7454 /* We need the original code for any further attempts. */
7458 /* If nothing else has succeeded, default to a compare and swap loop. */
7459 if (can_compare_and_swap_p (mode
, true))
7462 rtx t0
= gen_reg_rtx (mode
), t1
;
7466 /* If the result is used, get a register for it. */
7469 if (!target
|| !register_operand (target
, mode
))
7470 target
= gen_reg_rtx (mode
);
7471 /* If fetch_before, copy the value now. */
7473 emit_move_insn (target
, t0
);
7476 target
= const0_rtx
;
7481 t1
= expand_simple_binop (mode
, AND
, t1
, val
, NULL_RTX
,
7482 true, OPTAB_LIB_WIDEN
);
7483 t1
= expand_simple_unop (mode
, code
, t1
, NULL_RTX
, true);
7486 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
, true,
7489 /* For after, copy the value now. */
7490 if (!unused_result
&& after
)
7491 emit_move_insn (target
, t1
);
7492 insn
= get_insns ();
7495 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
7502 /* Return true if OPERAND is suitable for operand number OPNO of
7503 instruction ICODE. */
7506 insn_operand_matches (enum insn_code icode
, unsigned int opno
, rtx operand
)
7508 return (!insn_data
[(int) icode
].operand
[opno
].predicate
7509 || (insn_data
[(int) icode
].operand
[opno
].predicate
7510 (operand
, insn_data
[(int) icode
].operand
[opno
].mode
)));
7513 /* TARGET is a target of a multiword operation that we are going to
7514 implement as a series of word-mode operations. Return true if
7515 TARGET is suitable for this purpose. */
7518 valid_multiword_target_p (rtx target
)
7523 mode
= GET_MODE (target
);
7524 if (!GET_MODE_SIZE (mode
).is_constant (&size
))
7526 for (i
= 0; i
< size
; i
+= UNITS_PER_WORD
)
7527 if (!validate_subreg (word_mode
, mode
, target
, i
))
7532 /* Make OP describe an input operand that has value INTVAL and that has
7533 no inherent mode. This function should only be used for operands that
7534 are always expand-time constants. The backend may request that INTVAL
7535 be copied into a different kind of rtx, but it must specify the mode
7536 of that rtx if so. */
7539 create_integer_operand (class expand_operand
*op
, poly_int64 intval
)
7541 create_expand_operand (op
, EXPAND_INTEGER
,
7542 gen_int_mode (intval
, MAX_MODE_INT
),
7543 VOIDmode
, false, intval
);
7546 /* Like maybe_legitimize_operand, but do not change the code of the
7547 current rtx value. */
7550 maybe_legitimize_operand_same_code (enum insn_code icode
, unsigned int opno
,
7551 class expand_operand
*op
)
7553 /* See if the operand matches in its current form. */
7554 if (insn_operand_matches (icode
, opno
, op
->value
))
7557 /* If the operand is a memory whose address has no side effects,
7558 try forcing the address into a non-virtual pseudo register.
7559 The check for side effects is important because copy_to_mode_reg
7560 cannot handle things like auto-modified addresses. */
7561 if (insn_data
[(int) icode
].operand
[opno
].allows_mem
&& MEM_P (op
->value
))
7566 addr
= XEXP (mem
, 0);
7567 if (!(REG_P (addr
) && REGNO (addr
) > LAST_VIRTUAL_REGISTER
)
7568 && !side_effects_p (addr
))
7573 last
= get_last_insn ();
7574 mode
= get_address_mode (mem
);
7575 mem
= replace_equiv_address (mem
, copy_to_mode_reg (mode
, addr
));
7576 if (insn_operand_matches (icode
, opno
, mem
))
7581 delete_insns_since (last
);
7588 /* Try to make OP match operand OPNO of instruction ICODE. Return true
7589 on success, storing the new operand value back in OP. */
7592 maybe_legitimize_operand (enum insn_code icode
, unsigned int opno
,
7593 class expand_operand
*op
)
7595 machine_mode mode
, imode
, tmode
;
7602 temporary_volatile_ok
v (true);
7603 return maybe_legitimize_operand_same_code (icode
, opno
, op
);
7607 gcc_assert (mode
!= VOIDmode
);
7609 && op
->value
!= const0_rtx
7610 && GET_MODE (op
->value
) == mode
7611 && maybe_legitimize_operand_same_code (icode
, opno
, op
))
7614 op
->value
= gen_reg_rtx (mode
);
7620 gcc_assert (mode
!= VOIDmode
);
7621 gcc_assert (GET_MODE (op
->value
) == VOIDmode
7622 || GET_MODE (op
->value
) == mode
);
7623 if (maybe_legitimize_operand_same_code (icode
, opno
, op
))
7626 op
->value
= copy_to_mode_reg (mode
, op
->value
);
7629 case EXPAND_CONVERT_TO
:
7630 gcc_assert (mode
!= VOIDmode
);
7631 op
->value
= convert_to_mode (mode
, op
->value
, op
->unsigned_p
);
7634 case EXPAND_CONVERT_FROM
:
7635 if (GET_MODE (op
->value
) != VOIDmode
)
7636 mode
= GET_MODE (op
->value
);
7638 /* The caller must tell us what mode this value has. */
7639 gcc_assert (mode
!= VOIDmode
);
7641 imode
= insn_data
[(int) icode
].operand
[opno
].mode
;
7642 tmode
= (VECTOR_MODE_P (imode
) && !VECTOR_MODE_P (mode
)
7643 ? GET_MODE_INNER (imode
) : imode
);
7644 if (tmode
!= VOIDmode
&& tmode
!= mode
)
7646 op
->value
= convert_modes (tmode
, mode
, op
->value
, op
->unsigned_p
);
7649 if (imode
!= VOIDmode
&& imode
!= mode
)
7651 gcc_assert (VECTOR_MODE_P (imode
) && !VECTOR_MODE_P (mode
));
7652 op
->value
= expand_vector_broadcast (imode
, op
->value
);
7657 case EXPAND_ADDRESS
:
7658 op
->value
= convert_memory_address (as_a
<scalar_int_mode
> (mode
),
7662 case EXPAND_INTEGER
:
7663 mode
= insn_data
[(int) icode
].operand
[opno
].mode
;
7664 if (mode
!= VOIDmode
7665 && known_eq (trunc_int_for_mode (op
->int_value
, mode
),
7668 op
->value
= gen_int_mode (op
->int_value
, mode
);
7673 return insn_operand_matches (icode
, opno
, op
->value
);
7676 /* Make OP describe an input operand that should have the same value
7677 as VALUE, after any mode conversion that the target might request.
7678 TYPE is the type of VALUE. */
7681 create_convert_operand_from_type (class expand_operand
*op
,
7682 rtx value
, tree type
)
7684 create_convert_operand_from (op
, value
, TYPE_MODE (type
),
7685 TYPE_UNSIGNED (type
));
7688 /* Return true if the requirements on operands OP1 and OP2 of instruction
7689 ICODE are similar enough for the result of legitimizing OP1 to be
7690 reusable for OP2. OPNO1 and OPNO2 are the operand numbers associated
7691 with OP1 and OP2 respectively. */
7694 can_reuse_operands_p (enum insn_code icode
,
7695 unsigned int opno1
, unsigned int opno2
,
7696 const class expand_operand
*op1
,
7697 const class expand_operand
*op2
)
7699 /* Check requirements that are common to all types. */
7700 if (op1
->type
!= op2
->type
7701 || op1
->mode
!= op2
->mode
7702 || (insn_data
[(int) icode
].operand
[opno1
].mode
7703 != insn_data
[(int) icode
].operand
[opno2
].mode
))
7706 /* Check the requirements for specific types. */
7710 /* Outputs must remain distinct. */
7715 case EXPAND_ADDRESS
:
7716 case EXPAND_INTEGER
:
7719 case EXPAND_CONVERT_TO
:
7720 case EXPAND_CONVERT_FROM
:
7721 return op1
->unsigned_p
== op2
->unsigned_p
;
7726 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
7727 of instruction ICODE. Return true on success, leaving the new operand
7728 values in the OPS themselves. Emit no code on failure. */
7731 maybe_legitimize_operands (enum insn_code icode
, unsigned int opno
,
7732 unsigned int nops
, class expand_operand
*ops
)
7734 rtx_insn
*last
= get_last_insn ();
7735 rtx
*orig_values
= XALLOCAVEC (rtx
, nops
);
7736 for (unsigned int i
= 0; i
< nops
; i
++)
7738 orig_values
[i
] = ops
[i
].value
;
7740 /* First try reusing the result of an earlier legitimization.
7741 This avoids duplicate rtl and ensures that tied operands
7744 This search is linear, but NOPS is bounded at compile time
7745 to a small number (current a single digit). */
7748 if (can_reuse_operands_p (icode
, opno
+ j
, opno
+ i
, &ops
[j
], &ops
[i
])
7749 && rtx_equal_p (orig_values
[j
], orig_values
[i
])
7751 && insn_operand_matches (icode
, opno
+ i
, ops
[j
].value
))
7753 ops
[i
].value
= copy_rtx (ops
[j
].value
);
7757 /* Otherwise try legitimizing the operand on its own. */
7758 if (j
== i
&& !maybe_legitimize_operand (icode
, opno
+ i
, &ops
[i
]))
7760 delete_insns_since (last
);
7767 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
7768 as its operands. Return the instruction pattern on success,
7769 and emit any necessary set-up code. Return null and emit no
7773 maybe_gen_insn (enum insn_code icode
, unsigned int nops
,
7774 class expand_operand
*ops
)
7776 gcc_assert (nops
== (unsigned int) insn_data
[(int) icode
].n_generator_args
);
7777 if (!maybe_legitimize_operands (icode
, 0, nops
, ops
))
7783 return GEN_FCN (icode
) (ops
[0].value
);
7785 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
);
7787 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
);
7789 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7792 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7793 ops
[3].value
, ops
[4].value
);
7795 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7796 ops
[3].value
, ops
[4].value
, ops
[5].value
);
7798 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7799 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7802 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7803 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7804 ops
[6].value
, ops
[7].value
);
7806 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7807 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7808 ops
[6].value
, ops
[7].value
, ops
[8].value
);
7813 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7814 as its operands. Return true on success and emit no code on failure. */
7817 maybe_expand_insn (enum insn_code icode
, unsigned int nops
,
7818 class expand_operand
*ops
)
7820 rtx_insn
*pat
= maybe_gen_insn (icode
, nops
, ops
);
7829 /* Like maybe_expand_insn, but for jumps. */
7832 maybe_expand_jump_insn (enum insn_code icode
, unsigned int nops
,
7833 class expand_operand
*ops
)
7835 rtx_insn
*pat
= maybe_gen_insn (icode
, nops
, ops
);
7838 emit_jump_insn (pat
);
7844 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7848 expand_insn (enum insn_code icode
, unsigned int nops
,
7849 class expand_operand
*ops
)
7851 if (!maybe_expand_insn (icode
, nops
, ops
))
7855 /* Like expand_insn, but for jumps. */
7858 expand_jump_insn (enum insn_code icode
, unsigned int nops
,
7859 class expand_operand
*ops
)
7861 if (!maybe_expand_jump_insn (icode
, nops
, ops
))