1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
34 #include "diagnostic-core.h"
36 /* Include insn-config.h before expr.h so that HAVE_conditional_move
37 is properly defined. */
38 #include "stor-layout.h"
43 #include "optabs-tree.h"
46 static void prepare_float_lib_cmp (rtx
, rtx
, enum rtx_code
, rtx
*,
48 static rtx
expand_unop_direct (machine_mode
, optab
, rtx
, rtx
, int);
49 static void emit_libcall_block_1 (rtx_insn
*, rtx
, rtx
, rtx
, bool);
51 /* Debug facility for use in GDB. */
52 void debug_optab_libfuncs (void);
54 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
55 the result of operation CODE applied to OP0 (and OP1 if it is a binary
58 If the last insn does not set TARGET, don't do anything, but return 1.
60 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
61 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
62 try again, ensuring that TARGET is not one of the operands. */
65 add_equal_note (rtx_insn
*insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
71 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
73 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
74 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
75 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
76 && GET_RTX_CLASS (code
) != RTX_COMPARE
77 && GET_RTX_CLASS (code
) != RTX_UNARY
)
80 if (GET_CODE (target
) == ZERO_EXTRACT
)
83 for (last_insn
= insns
;
84 NEXT_INSN (last_insn
) != NULL_RTX
;
85 last_insn
= NEXT_INSN (last_insn
))
88 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
89 a value changing in the insn, so the note would be invalid for CSE. */
90 if (reg_overlap_mentioned_p (target
, op0
)
91 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
94 && (rtx_equal_p (target
, op0
)
95 || (op1
&& rtx_equal_p (target
, op1
))))
97 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
98 over expanding it as temp = MEM op X, MEM = temp. If the target
99 supports MEM = MEM op X instructions, it is sometimes too hard
100 to reconstruct that form later, especially if X is also a memory,
101 and due to multiple occurrences of addresses the address might
102 be forced into register unnecessarily.
103 Note that not emitting the REG_EQUIV note might inhibit
104 CSE in some cases. */
105 set
= single_set (last_insn
);
107 && GET_CODE (SET_SRC (set
)) == code
108 && MEM_P (SET_DEST (set
))
109 && (rtx_equal_p (SET_DEST (set
), XEXP (SET_SRC (set
), 0))
110 || (op1
&& rtx_equal_p (SET_DEST (set
),
111 XEXP (SET_SRC (set
), 1)))))
117 set
= set_for_reg_notes (last_insn
);
121 if (! rtx_equal_p (SET_DEST (set
), target
)
122 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
123 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
124 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
127 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
137 if (GET_MODE (op0
) != VOIDmode
&& GET_MODE (target
) != GET_MODE (op0
))
139 note
= gen_rtx_fmt_e (code
, GET_MODE (op0
), copy_rtx (op0
));
140 if (GET_MODE_SIZE (GET_MODE (op0
))
141 > GET_MODE_SIZE (GET_MODE (target
)))
142 note
= simplify_gen_unary (TRUNCATE
, GET_MODE (target
),
143 note
, GET_MODE (op0
));
145 note
= simplify_gen_unary (ZERO_EXTEND
, GET_MODE (target
),
146 note
, GET_MODE (op0
));
151 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
155 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
157 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
162 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
163 for a widening operation would be. In most cases this would be OP0, but if
164 that's a constant it'll be VOIDmode, which isn't useful. */
167 widened_mode (machine_mode to_mode
, rtx op0
, rtx op1
)
169 machine_mode m0
= GET_MODE (op0
);
170 machine_mode m1
= GET_MODE (op1
);
173 if (m0
== VOIDmode
&& m1
== VOIDmode
)
175 else if (m0
== VOIDmode
|| GET_MODE_SIZE (m0
) < GET_MODE_SIZE (m1
))
180 if (GET_MODE_SIZE (result
) > GET_MODE_SIZE (to_mode
))
186 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
187 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
188 not actually do a sign-extend or zero-extend, but can leave the
189 higher-order bits of the result rtx undefined, for example, in the case
190 of logical operations, but not right shifts. */
193 widen_operand (rtx op
, machine_mode mode
, machine_mode oldmode
,
194 int unsignedp
, int no_extend
)
198 /* If we don't have to extend and this is a constant, return it. */
199 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
202 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
203 extend since it will be more efficient to do so unless the signedness of
204 a promoted object differs from our extension. */
206 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
207 && SUBREG_CHECK_PROMOTED_SIGN (op
, unsignedp
)))
208 return convert_modes (mode
, oldmode
, op
, unsignedp
);
210 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
212 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
213 return gen_lowpart (mode
, force_reg (GET_MODE (op
), op
));
215 /* Otherwise, get an object of MODE, clobber it, and set the low-order
218 result
= gen_reg_rtx (mode
);
219 emit_clobber (result
);
220 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
224 /* Expand vector widening operations.
226 There are two different classes of operations handled here:
227 1) Operations whose result is wider than all the arguments to the operation.
228 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
229 In this case OP0 and optionally OP1 would be initialized,
230 but WIDE_OP wouldn't (not relevant for this case).
231 2) Operations whose result is of the same size as the last argument to the
232 operation, but wider than all the other arguments to the operation.
233 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
234 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
236 E.g, when called to expand the following operations, this is how
237 the arguments will be initialized:
239 widening-sum 2 oprnd0 - oprnd1
240 widening-dot-product 3 oprnd0 oprnd1 oprnd2
241 widening-mult 2 oprnd0 oprnd1 -
242 type-promotion (vec-unpack) 1 oprnd0 - - */
245 expand_widen_pattern_expr (sepops ops
, rtx op0
, rtx op1
, rtx wide_op
,
246 rtx target
, int unsignedp
)
248 struct expand_operand eops
[4];
249 tree oprnd0
, oprnd1
, oprnd2
;
250 machine_mode wmode
= VOIDmode
, tmode0
, tmode1
= VOIDmode
;
251 optab widen_pattern_optab
;
252 enum insn_code icode
;
253 int nops
= TREE_CODE_LENGTH (ops
->code
);
257 tmode0
= TYPE_MODE (TREE_TYPE (oprnd0
));
258 widen_pattern_optab
=
259 optab_for_tree_code (ops
->code
, TREE_TYPE (oprnd0
), optab_default
);
260 if (ops
->code
== WIDEN_MULT_PLUS_EXPR
261 || ops
->code
== WIDEN_MULT_MINUS_EXPR
)
262 icode
= find_widening_optab_handler (widen_pattern_optab
,
263 TYPE_MODE (TREE_TYPE (ops
->op2
)),
266 icode
= optab_handler (widen_pattern_optab
, tmode0
);
267 gcc_assert (icode
!= CODE_FOR_nothing
);
272 tmode1
= TYPE_MODE (TREE_TYPE (oprnd1
));
275 /* The last operand is of a wider mode than the rest of the operands. */
280 gcc_assert (tmode1
== tmode0
);
283 wmode
= TYPE_MODE (TREE_TYPE (oprnd2
));
287 create_output_operand (&eops
[op
++], target
, TYPE_MODE (ops
->type
));
288 create_convert_operand_from (&eops
[op
++], op0
, tmode0
, unsignedp
);
290 create_convert_operand_from (&eops
[op
++], op1
, tmode1
, unsignedp
);
292 create_convert_operand_from (&eops
[op
++], wide_op
, wmode
, unsignedp
);
293 expand_insn (icode
, op
, eops
);
294 return eops
[0].value
;
297 /* Generate code to perform an operation specified by TERNARY_OPTAB
298 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
300 UNSIGNEDP is for the case where we have to widen the operands
301 to perform the operation. It says to use zero-extension.
303 If TARGET is nonzero, the value
304 is generated there, if it is convenient to do so.
305 In all cases an rtx is returned for the locus of the value;
306 this may or may not be TARGET. */
309 expand_ternary_op (machine_mode mode
, optab ternary_optab
, rtx op0
,
310 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
312 struct expand_operand ops
[4];
313 enum insn_code icode
= optab_handler (ternary_optab
, mode
);
315 gcc_assert (optab_handler (ternary_optab
, mode
) != CODE_FOR_nothing
);
317 create_output_operand (&ops
[0], target
, mode
);
318 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
319 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
320 create_convert_operand_from (&ops
[3], op2
, mode
, unsignedp
);
321 expand_insn (icode
, 4, ops
);
326 /* Like expand_binop, but return a constant rtx if the result can be
327 calculated at compile time. The arguments and return value are
328 otherwise the same as for expand_binop. */
331 simplify_expand_binop (machine_mode mode
, optab binoptab
,
332 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
333 enum optab_methods methods
)
335 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
337 rtx x
= simplify_binary_operation (optab_to_code (binoptab
),
343 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
346 /* Like simplify_expand_binop, but always put the result in TARGET.
347 Return true if the expansion succeeded. */
350 force_expand_binop (machine_mode mode
, optab binoptab
,
351 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
352 enum optab_methods methods
)
354 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
355 target
, unsignedp
, methods
);
359 emit_move_insn (target
, x
);
363 /* Create a new vector value in VMODE with all elements set to OP. The
364 mode of OP must be the element mode of VMODE. If OP is a constant,
365 then the return value will be a constant. */
368 expand_vector_broadcast (machine_mode vmode
, rtx op
)
370 enum insn_code icode
;
375 gcc_checking_assert (VECTOR_MODE_P (vmode
));
377 n
= GET_MODE_NUNITS (vmode
);
378 vec
= rtvec_alloc (n
);
379 for (i
= 0; i
< n
; ++i
)
380 RTVEC_ELT (vec
, i
) = op
;
383 return gen_rtx_CONST_VECTOR (vmode
, vec
);
385 /* ??? If the target doesn't have a vec_init, then we have no easy way
386 of performing this operation. Most of this sort of generic support
387 is hidden away in the vector lowering support in gimple. */
388 icode
= optab_handler (vec_init_optab
, vmode
);
389 if (icode
== CODE_FOR_nothing
)
392 ret
= gen_reg_rtx (vmode
);
393 emit_insn (GEN_FCN (icode
) (ret
, gen_rtx_PARALLEL (vmode
, vec
)));
398 /* This subroutine of expand_doubleword_shift handles the cases in which
399 the effective shift value is >= BITS_PER_WORD. The arguments and return
400 value are the same as for the parent routine, except that SUPERWORD_OP1
401 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
402 INTO_TARGET may be null if the caller has decided to calculate it. */
405 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
406 rtx outof_target
, rtx into_target
,
407 int unsignedp
, enum optab_methods methods
)
409 if (into_target
!= 0)
410 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
411 into_target
, unsignedp
, methods
))
414 if (outof_target
!= 0)
416 /* For a signed right shift, we must fill OUTOF_TARGET with copies
417 of the sign bit, otherwise we must fill it with zeros. */
418 if (binoptab
!= ashr_optab
)
419 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
421 if (!force_expand_binop (word_mode
, binoptab
,
422 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
423 outof_target
, unsignedp
, methods
))
429 /* This subroutine of expand_doubleword_shift handles the cases in which
430 the effective shift value is < BITS_PER_WORD. The arguments and return
431 value are the same as for the parent routine. */
434 expand_subword_shift (machine_mode op1_mode
, optab binoptab
,
435 rtx outof_input
, rtx into_input
, rtx op1
,
436 rtx outof_target
, rtx into_target
,
437 int unsignedp
, enum optab_methods methods
,
438 unsigned HOST_WIDE_INT shift_mask
)
440 optab reverse_unsigned_shift
, unsigned_shift
;
443 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
444 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
446 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
447 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
448 the opposite direction to BINOPTAB. */
449 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
451 carries
= outof_input
;
452 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
,
453 op1_mode
), op1_mode
);
454 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
459 /* We must avoid shifting by BITS_PER_WORD bits since that is either
460 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
461 has unknown behavior. Do a single shift first, then shift by the
462 remainder. It's OK to use ~OP1 as the remainder if shift counts
463 are truncated to the mode size. */
464 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
465 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
466 if (shift_mask
== BITS_PER_WORD
- 1)
468 tmp
= immed_wide_int_const
469 (wi::minus_one (GET_MODE_PRECISION (op1_mode
)), op1_mode
);
470 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
475 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
- 1,
476 op1_mode
), op1_mode
);
477 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
481 if (tmp
== 0 || carries
== 0)
483 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
484 carries
, tmp
, 0, unsignedp
, methods
);
488 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
489 so the result can go directly into INTO_TARGET if convenient. */
490 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
491 into_target
, unsignedp
, methods
);
495 /* Now OR in the bits carried over from OUTOF_INPUT. */
496 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
497 into_target
, unsignedp
, methods
))
500 /* Use a standard word_mode shift for the out-of half. */
501 if (outof_target
!= 0)
502 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
503 outof_target
, unsignedp
, methods
))
510 /* Try implementing expand_doubleword_shift using conditional moves.
511 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
512 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
513 are the shift counts to use in the former and latter case. All other
514 arguments are the same as the parent routine. */
517 expand_doubleword_shift_condmove (machine_mode op1_mode
, optab binoptab
,
518 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
519 rtx outof_input
, rtx into_input
,
520 rtx subword_op1
, rtx superword_op1
,
521 rtx outof_target
, rtx into_target
,
522 int unsignedp
, enum optab_methods methods
,
523 unsigned HOST_WIDE_INT shift_mask
)
525 rtx outof_superword
, into_superword
;
527 /* Put the superword version of the output into OUTOF_SUPERWORD and
529 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
530 if (outof_target
!= 0 && subword_op1
== superword_op1
)
532 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
533 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
534 into_superword
= outof_target
;
535 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
536 outof_superword
, 0, unsignedp
, methods
))
541 into_superword
= gen_reg_rtx (word_mode
);
542 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
543 outof_superword
, into_superword
,
548 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
549 if (!expand_subword_shift (op1_mode
, binoptab
,
550 outof_input
, into_input
, subword_op1
,
551 outof_target
, into_target
,
552 unsignedp
, methods
, shift_mask
))
555 /* Select between them. Do the INTO half first because INTO_SUPERWORD
556 might be the current value of OUTOF_TARGET. */
557 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
558 into_target
, into_superword
, word_mode
, false))
561 if (outof_target
!= 0)
562 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
563 outof_target
, outof_superword
,
570 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
571 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
572 input operand; the shift moves bits in the direction OUTOF_INPUT->
573 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
574 of the target. OP1 is the shift count and OP1_MODE is its mode.
575 If OP1 is constant, it will have been truncated as appropriate
576 and is known to be nonzero.
578 If SHIFT_MASK is zero, the result of word shifts is undefined when the
579 shift count is outside the range [0, BITS_PER_WORD). This routine must
580 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
582 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
583 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
584 fill with zeros or sign bits as appropriate.
586 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
587 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
588 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
589 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
592 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
593 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
594 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
595 function wants to calculate it itself.
597 Return true if the shift could be successfully synthesized. */
600 expand_doubleword_shift (machine_mode op1_mode
, optab binoptab
,
601 rtx outof_input
, rtx into_input
, rtx op1
,
602 rtx outof_target
, rtx into_target
,
603 int unsignedp
, enum optab_methods methods
,
604 unsigned HOST_WIDE_INT shift_mask
)
606 rtx superword_op1
, tmp
, cmp1
, cmp2
;
607 enum rtx_code cmp_code
;
609 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
610 fill the result with sign or zero bits as appropriate. If so, the value
611 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
612 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
613 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
615 This isn't worthwhile for constant shifts since the optimizers will
616 cope better with in-range shift counts. */
617 if (shift_mask
>= BITS_PER_WORD
619 && !CONSTANT_P (op1
))
621 if (!expand_doubleword_shift (op1_mode
, binoptab
,
622 outof_input
, into_input
, op1
,
624 unsignedp
, methods
, shift_mask
))
626 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
627 outof_target
, unsignedp
, methods
))
632 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
633 is true when the effective shift value is less than BITS_PER_WORD.
634 Set SUPERWORD_OP1 to the shift count that should be used to shift
635 OUTOF_INPUT into INTO_TARGET when the condition is false. */
636 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
, op1_mode
), op1_mode
);
637 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
639 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
640 is a subword shift count. */
641 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
643 cmp2
= CONST0_RTX (op1_mode
);
649 /* Set CMP1 to OP1 - BITS_PER_WORD. */
650 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
652 cmp2
= CONST0_RTX (op1_mode
);
654 superword_op1
= cmp1
;
659 /* If we can compute the condition at compile time, pick the
660 appropriate subroutine. */
661 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
662 if (tmp
!= 0 && CONST_INT_P (tmp
))
664 if (tmp
== const0_rtx
)
665 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
666 outof_target
, into_target
,
669 return expand_subword_shift (op1_mode
, binoptab
,
670 outof_input
, into_input
, op1
,
671 outof_target
, into_target
,
672 unsignedp
, methods
, shift_mask
);
675 /* Try using conditional moves to generate straight-line code. */
676 if (HAVE_conditional_move
)
678 rtx_insn
*start
= get_last_insn ();
679 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
680 cmp_code
, cmp1
, cmp2
,
681 outof_input
, into_input
,
683 outof_target
, into_target
,
684 unsignedp
, methods
, shift_mask
))
686 delete_insns_since (start
);
689 /* As a last resort, use branches to select the correct alternative. */
690 rtx_code_label
*subword_label
= gen_label_rtx ();
691 rtx_code_label
*done_label
= gen_label_rtx ();
694 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
695 0, 0, subword_label
, -1);
698 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
699 outof_target
, into_target
,
703 emit_jump_insn (targetm
.gen_jump (done_label
));
705 emit_label (subword_label
);
707 if (!expand_subword_shift (op1_mode
, binoptab
,
708 outof_input
, into_input
, op1
,
709 outof_target
, into_target
,
710 unsignedp
, methods
, shift_mask
))
713 emit_label (done_label
);
717 /* Subroutine of expand_binop. Perform a double word multiplication of
718 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
719 as the target's word_mode. This function return NULL_RTX if anything
720 goes wrong, in which case it may have already emitted instructions
721 which need to be deleted.
723 If we want to multiply two two-word values and have normal and widening
724 multiplies of single-word values, we can do this with three smaller
727 The multiplication proceeds as follows:
728 _______________________
729 [__op0_high_|__op0_low__]
730 _______________________
731 * [__op1_high_|__op1_low__]
732 _______________________________________________
733 _______________________
734 (1) [__op0_low__*__op1_low__]
735 _______________________
736 (2a) [__op0_low__*__op1_high_]
737 _______________________
738 (2b) [__op0_high_*__op1_low__]
739 _______________________
740 (3) [__op0_high_*__op1_high_]
743 This gives a 4-word result. Since we are only interested in the
744 lower 2 words, partial result (3) and the upper words of (2a) and
745 (2b) don't need to be calculated. Hence (2a) and (2b) can be
746 calculated using non-widening multiplication.
748 (1), however, needs to be calculated with an unsigned widening
749 multiplication. If this operation is not directly supported we
750 try using a signed widening multiplication and adjust the result.
751 This adjustment works as follows:
753 If both operands are positive then no adjustment is needed.
755 If the operands have different signs, for example op0_low < 0 and
756 op1_low >= 0, the instruction treats the most significant bit of
757 op0_low as a sign bit instead of a bit with significance
758 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
759 with 2**BITS_PER_WORD - op0_low, and two's complements the
760 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
763 Similarly, if both operands are negative, we need to add
764 (op0_low + op1_low) * 2**BITS_PER_WORD.
766 We use a trick to adjust quickly. We logically shift op0_low right
767 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
768 op0_high (op1_high) before it is used to calculate 2b (2a). If no
769 logical shift exists, we do an arithmetic right shift and subtract
773 expand_doubleword_mult (machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
774 bool umulp
, enum optab_methods methods
)
776 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
777 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
778 rtx wordm1
= umulp
? NULL_RTX
: GEN_INT (BITS_PER_WORD
- 1);
779 rtx product
, adjust
, product_high
, temp
;
781 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
782 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
783 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
784 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
786 /* If we're using an unsigned multiply to directly compute the product
787 of the low-order words of the operands and perform any required
788 adjustments of the operands, we begin by trying two more multiplications
789 and then computing the appropriate sum.
791 We have checked above that the required addition is provided.
792 Full-word addition will normally always succeed, especially if
793 it is provided at all, so we don't worry about its failure. The
794 multiplication may well fail, however, so we do handle that. */
798 /* ??? This could be done with emit_store_flag where available. */
799 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
800 NULL_RTX
, 1, methods
);
802 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
803 NULL_RTX
, 0, OPTAB_DIRECT
);
806 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
807 NULL_RTX
, 0, methods
);
810 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
811 NULL_RTX
, 0, OPTAB_DIRECT
);
818 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
819 NULL_RTX
, 0, OPTAB_DIRECT
);
823 /* OP0_HIGH should now be dead. */
827 /* ??? This could be done with emit_store_flag where available. */
828 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
829 NULL_RTX
, 1, methods
);
831 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
832 NULL_RTX
, 0, OPTAB_DIRECT
);
835 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
836 NULL_RTX
, 0, methods
);
839 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
840 NULL_RTX
, 0, OPTAB_DIRECT
);
847 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
848 NULL_RTX
, 0, OPTAB_DIRECT
);
852 /* OP1_HIGH should now be dead. */
854 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
855 NULL_RTX
, 0, OPTAB_DIRECT
);
857 if (target
&& !REG_P (target
))
861 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
862 target
, 1, OPTAB_DIRECT
);
864 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
865 target
, 1, OPTAB_DIRECT
);
870 product_high
= operand_subword (product
, high
, 1, mode
);
871 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
872 NULL_RTX
, 0, OPTAB_DIRECT
);
873 emit_move_insn (product_high
, adjust
);
877 /* Wrapper around expand_binop which takes an rtx code to specify
878 the operation to perform, not an optab pointer. All other
879 arguments are the same. */
881 expand_simple_binop (machine_mode mode
, enum rtx_code code
, rtx op0
,
882 rtx op1
, rtx target
, int unsignedp
,
883 enum optab_methods methods
)
885 optab binop
= code_to_optab (code
);
888 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
891 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
892 binop. Order them according to commutative_operand_precedence and, if
893 possible, try to put TARGET or a pseudo first. */
895 swap_commutative_operands_with_target (rtx target
, rtx op0
, rtx op1
)
897 int op0_prec
= commutative_operand_precedence (op0
);
898 int op1_prec
= commutative_operand_precedence (op1
);
900 if (op0_prec
< op1_prec
)
903 if (op0_prec
> op1_prec
)
906 /* With equal precedence, both orders are ok, but it is better if the
907 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
908 if (target
== 0 || REG_P (target
))
909 return (REG_P (op1
) && !REG_P (op0
)) || target
== op1
;
911 return rtx_equal_p (op1
, target
);
914 /* Return true if BINOPTAB implements a shift operation. */
917 shift_optab_p (optab binoptab
)
919 switch (optab_to_code (binoptab
))
935 /* Return true if BINOPTAB implements a commutative binary operation. */
938 commutative_optab_p (optab binoptab
)
940 return (GET_RTX_CLASS (optab_to_code (binoptab
)) == RTX_COMM_ARITH
941 || binoptab
== smul_widen_optab
942 || binoptab
== umul_widen_optab
943 || binoptab
== smul_highpart_optab
944 || binoptab
== umul_highpart_optab
);
947 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
948 optimizing, and if the operand is a constant that costs more than
949 1 instruction, force the constant into a register and return that
950 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
953 avoid_expensive_constant (machine_mode mode
, optab binoptab
,
954 int opn
, rtx x
, bool unsignedp
)
956 bool speed
= optimize_insn_for_speed_p ();
961 && (rtx_cost (x
, mode
, optab_to_code (binoptab
), opn
, speed
)
962 > set_src_cost (x
, mode
, speed
)))
966 HOST_WIDE_INT intval
= trunc_int_for_mode (INTVAL (x
), mode
);
967 if (intval
!= INTVAL (x
))
968 x
= GEN_INT (intval
);
971 x
= convert_modes (mode
, VOIDmode
, x
, unsignedp
);
972 x
= force_reg (mode
, x
);
977 /* Helper function for expand_binop: handle the case where there
978 is an insn that directly implements the indicated operation.
979 Returns null if this is not possible. */
981 expand_binop_directly (machine_mode mode
, optab binoptab
,
983 rtx target
, int unsignedp
, enum optab_methods methods
,
986 machine_mode from_mode
= widened_mode (mode
, op0
, op1
);
987 enum insn_code icode
= find_widening_optab_handler (binoptab
, mode
,
989 machine_mode xmode0
= insn_data
[(int) icode
].operand
[1].mode
;
990 machine_mode xmode1
= insn_data
[(int) icode
].operand
[2].mode
;
991 machine_mode mode0
, mode1
, tmp_mode
;
992 struct expand_operand ops
[3];
995 rtx xop0
= op0
, xop1
= op1
;
996 bool canonicalize_op1
= false;
998 /* If it is a commutative operator and the modes would match
999 if we would swap the operands, we can save the conversions. */
1000 commutative_p
= commutative_optab_p (binoptab
);
1002 && GET_MODE (xop0
) != xmode0
&& GET_MODE (xop1
) != xmode1
1003 && GET_MODE (xop0
) == xmode1
&& GET_MODE (xop1
) == xmode1
)
1004 std::swap (xop0
, xop1
);
1006 /* If we are optimizing, force expensive constants into a register. */
1007 xop0
= avoid_expensive_constant (xmode0
, binoptab
, 0, xop0
, unsignedp
);
1008 if (!shift_optab_p (binoptab
))
1009 xop1
= avoid_expensive_constant (xmode1
, binoptab
, 1, xop1
, unsignedp
);
1011 /* Shifts and rotates often use a different mode for op1 from op0;
1012 for VOIDmode constants we don't know the mode, so force it
1013 to be canonicalized using convert_modes. */
1014 canonicalize_op1
= true;
1016 /* In case the insn wants input operands in modes different from
1017 those of the actual operands, convert the operands. It would
1018 seem that we don't need to convert CONST_INTs, but we do, so
1019 that they're properly zero-extended, sign-extended or truncated
1022 mode0
= GET_MODE (xop0
) != VOIDmode
? GET_MODE (xop0
) : mode
;
1023 if (xmode0
!= VOIDmode
&& xmode0
!= mode0
)
1025 xop0
= convert_modes (xmode0
, mode0
, xop0
, unsignedp
);
1029 mode1
= ((GET_MODE (xop1
) != VOIDmode
|| canonicalize_op1
)
1030 ? GET_MODE (xop1
) : mode
);
1031 if (xmode1
!= VOIDmode
&& xmode1
!= mode1
)
1033 xop1
= convert_modes (xmode1
, mode1
, xop1
, unsignedp
);
1037 /* If operation is commutative,
1038 try to make the first operand a register.
1039 Even better, try to make it the same as the target.
1040 Also try to make the last operand a constant. */
1042 && swap_commutative_operands_with_target (target
, xop0
, xop1
))
1043 std::swap (xop0
, xop1
);
1045 /* Now, if insn's predicates don't allow our operands, put them into
1048 if (binoptab
== vec_pack_trunc_optab
1049 || binoptab
== vec_pack_usat_optab
1050 || binoptab
== vec_pack_ssat_optab
1051 || binoptab
== vec_pack_ufix_trunc_optab
1052 || binoptab
== vec_pack_sfix_trunc_optab
)
1054 /* The mode of the result is different then the mode of the
1056 tmp_mode
= insn_data
[(int) icode
].operand
[0].mode
;
1057 if (VECTOR_MODE_P (mode
)
1058 && GET_MODE_NUNITS (tmp_mode
) != 2 * GET_MODE_NUNITS (mode
))
1060 delete_insns_since (last
);
1067 create_output_operand (&ops
[0], target
, tmp_mode
);
1068 create_input_operand (&ops
[1], xop0
, mode0
);
1069 create_input_operand (&ops
[2], xop1
, mode1
);
1070 pat
= maybe_gen_insn (icode
, 3, ops
);
1073 /* If PAT is composed of more than one insn, try to add an appropriate
1074 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1075 operand, call expand_binop again, this time without a target. */
1076 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1077 && ! add_equal_note (pat
, ops
[0].value
,
1078 optab_to_code (binoptab
),
1079 ops
[1].value
, ops
[2].value
))
1081 delete_insns_since (last
);
1082 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1083 unsignedp
, methods
);
1087 return ops
[0].value
;
1089 delete_insns_since (last
);
1093 /* Generate code to perform an operation specified by BINOPTAB
1094 on operands OP0 and OP1, with result having machine-mode MODE.
1096 UNSIGNEDP is for the case where we have to widen the operands
1097 to perform the operation. It says to use zero-extension.
1099 If TARGET is nonzero, the value
1100 is generated there, if it is convenient to do so.
1101 In all cases an rtx is returned for the locus of the value;
1102 this may or may not be TARGET. */
1105 expand_binop (machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1106 rtx target
, int unsignedp
, enum optab_methods methods
)
1108 enum optab_methods next_methods
1109 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1110 ? OPTAB_WIDEN
: methods
);
1111 enum mode_class mclass
;
1112 machine_mode wider_mode
;
1115 rtx_insn
*entry_last
= get_last_insn ();
1118 mclass
= GET_MODE_CLASS (mode
);
1120 /* If subtracting an integer constant, convert this into an addition of
1121 the negated constant. */
1123 if (binoptab
== sub_optab
&& CONST_INT_P (op1
))
1125 op1
= negate_rtx (mode
, op1
);
1126 binoptab
= add_optab
;
1128 /* For shifts, constant invalid op1 might be expanded from different
1129 mode than MODE. As those are invalid, force them to a register
1130 to avoid further problems during expansion. */
1131 else if (CONST_INT_P (op1
)
1132 && shift_optab_p (binoptab
)
1133 && UINTVAL (op1
) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode
)))
1135 op1
= gen_int_mode (INTVAL (op1
), GET_MODE_INNER (mode
));
1136 op1
= force_reg (GET_MODE_INNER (mode
), op1
);
1139 /* Record where to delete back to if we backtrack. */
1140 last
= get_last_insn ();
1142 /* If we can do it with a three-operand insn, do so. */
1144 if (methods
!= OPTAB_MUST_WIDEN
1145 && find_widening_optab_handler (binoptab
, mode
,
1146 widened_mode (mode
, op0
, op1
), 1)
1147 != CODE_FOR_nothing
)
1149 temp
= expand_binop_directly (mode
, binoptab
, op0
, op1
, target
,
1150 unsignedp
, methods
, last
);
1155 /* If we were trying to rotate, and that didn't work, try rotating
1156 the other direction before falling back to shifts and bitwise-or. */
1157 if (((binoptab
== rotl_optab
1158 && optab_handler (rotr_optab
, mode
) != CODE_FOR_nothing
)
1159 || (binoptab
== rotr_optab
1160 && optab_handler (rotl_optab
, mode
) != CODE_FOR_nothing
))
1161 && mclass
== MODE_INT
)
1163 optab otheroptab
= (binoptab
== rotl_optab
? rotr_optab
: rotl_optab
);
1165 unsigned int bits
= GET_MODE_PRECISION (mode
);
1167 if (CONST_INT_P (op1
))
1168 newop1
= GEN_INT (bits
- INTVAL (op1
));
1169 else if (targetm
.shift_truncation_mask (mode
) == bits
- 1)
1170 newop1
= negate_rtx (GET_MODE (op1
), op1
);
1172 newop1
= expand_binop (GET_MODE (op1
), sub_optab
,
1173 gen_int_mode (bits
, GET_MODE (op1
)), op1
,
1174 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1176 temp
= expand_binop_directly (mode
, otheroptab
, op0
, newop1
,
1177 target
, unsignedp
, methods
, last
);
1182 /* If this is a multiply, see if we can do a widening operation that
1183 takes operands of this mode and makes a wider mode. */
1185 if (binoptab
== smul_optab
1186 && GET_MODE_2XWIDER_MODE (mode
) != VOIDmode
1187 && (widening_optab_handler ((unsignedp
? umul_widen_optab
1188 : smul_widen_optab
),
1189 GET_MODE_2XWIDER_MODE (mode
), mode
)
1190 != CODE_FOR_nothing
))
1192 temp
= expand_binop (GET_MODE_2XWIDER_MODE (mode
),
1193 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1194 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1198 if (GET_MODE_CLASS (mode
) == MODE_INT
1199 && TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (temp
)))
1200 return gen_lowpart (mode
, temp
);
1202 return convert_to_mode (mode
, temp
, unsignedp
);
1206 /* If this is a vector shift by a scalar, see if we can do a vector
1207 shift by a vector. If so, broadcast the scalar into a vector. */
1208 if (mclass
== MODE_VECTOR_INT
)
1210 optab otheroptab
= unknown_optab
;
1212 if (binoptab
== ashl_optab
)
1213 otheroptab
= vashl_optab
;
1214 else if (binoptab
== ashr_optab
)
1215 otheroptab
= vashr_optab
;
1216 else if (binoptab
== lshr_optab
)
1217 otheroptab
= vlshr_optab
;
1218 else if (binoptab
== rotl_optab
)
1219 otheroptab
= vrotl_optab
;
1220 else if (binoptab
== rotr_optab
)
1221 otheroptab
= vrotr_optab
;
1223 if (otheroptab
&& optab_handler (otheroptab
, mode
) != CODE_FOR_nothing
)
1225 /* The scalar may have been extended to be too wide. Truncate
1226 it back to the proper size to fit in the broadcast vector. */
1227 machine_mode inner_mode
= GET_MODE_INNER (mode
);
1228 if (!CONST_INT_P (op1
)
1229 && (GET_MODE_BITSIZE (inner_mode
)
1230 < GET_MODE_BITSIZE (GET_MODE (op1
))))
1231 op1
= force_reg (inner_mode
,
1232 simplify_gen_unary (TRUNCATE
, inner_mode
, op1
,
1234 rtx vop1
= expand_vector_broadcast (mode
, op1
);
1237 temp
= expand_binop_directly (mode
, otheroptab
, op0
, vop1
,
1238 target
, unsignedp
, methods
, last
);
1245 /* Look for a wider mode of the same class for which we think we
1246 can open-code the operation. Check for a widening multiply at the
1247 wider mode as well. */
1249 if (CLASS_HAS_WIDER_MODES_P (mclass
)
1250 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1251 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1252 wider_mode
!= VOIDmode
;
1253 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1255 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
1256 || (binoptab
== smul_optab
1257 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
1258 && (find_widening_optab_handler ((unsignedp
1260 : smul_widen_optab
),
1261 GET_MODE_WIDER_MODE (wider_mode
),
1263 != CODE_FOR_nothing
)))
1265 rtx xop0
= op0
, xop1
= op1
;
1268 /* For certain integer operations, we need not actually extend
1269 the narrow operands, as long as we will truncate
1270 the results to the same narrowness. */
1272 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1273 || binoptab
== xor_optab
1274 || binoptab
== add_optab
|| binoptab
== sub_optab
1275 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1276 && mclass
== MODE_INT
)
1279 xop0
= avoid_expensive_constant (mode
, binoptab
, 0,
1281 if (binoptab
!= ashl_optab
)
1282 xop1
= avoid_expensive_constant (mode
, binoptab
, 1,
1286 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1288 /* The second operand of a shift must always be extended. */
1289 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1290 no_extend
&& binoptab
!= ashl_optab
);
1292 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1293 unsignedp
, OPTAB_DIRECT
);
1296 if (mclass
!= MODE_INT
1297 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
1300 target
= gen_reg_rtx (mode
);
1301 convert_move (target
, temp
, 0);
1305 return gen_lowpart (mode
, temp
);
1308 delete_insns_since (last
);
1312 /* If operation is commutative,
1313 try to make the first operand a register.
1314 Even better, try to make it the same as the target.
1315 Also try to make the last operand a constant. */
1316 if (commutative_optab_p (binoptab
)
1317 && swap_commutative_operands_with_target (target
, op0
, op1
))
1318 std::swap (op0
, op1
);
1320 /* These can be done a word at a time. */
1321 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1322 && mclass
== MODE_INT
1323 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1324 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1329 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1330 won't be accurate, so use a new target. */
1334 || !valid_multiword_target_p (target
))
1335 target
= gen_reg_rtx (mode
);
1339 /* Do the actual arithmetic. */
1340 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1342 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1343 rtx x
= expand_binop (word_mode
, binoptab
,
1344 operand_subword_force (op0
, i
, mode
),
1345 operand_subword_force (op1
, i
, mode
),
1346 target_piece
, unsignedp
, next_methods
);
1351 if (target_piece
!= x
)
1352 emit_move_insn (target_piece
, x
);
1355 insns
= get_insns ();
1358 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1365 /* Synthesize double word shifts from single word shifts. */
1366 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1367 || binoptab
== ashr_optab
)
1368 && mclass
== MODE_INT
1369 && (CONST_INT_P (op1
) || optimize_insn_for_speed_p ())
1370 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1371 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
)
1372 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
1373 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1374 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1376 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1377 machine_mode op1_mode
;
1379 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1380 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1381 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1383 /* Apply the truncation to constant shifts. */
1384 if (double_shift_mask
> 0 && CONST_INT_P (op1
))
1385 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1387 if (op1
== CONST0_RTX (op1_mode
))
1390 /* Make sure that this is a combination that expand_doubleword_shift
1391 can handle. See the comments there for details. */
1392 if (double_shift_mask
== 0
1393 || (shift_mask
== BITS_PER_WORD
- 1
1394 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1397 rtx into_target
, outof_target
;
1398 rtx into_input
, outof_input
;
1399 int left_shift
, outof_word
;
1401 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1402 won't be accurate, so use a new target. */
1406 || !valid_multiword_target_p (target
))
1407 target
= gen_reg_rtx (mode
);
1411 /* OUTOF_* is the word we are shifting bits away from, and
1412 INTO_* is the word that we are shifting bits towards, thus
1413 they differ depending on the direction of the shift and
1414 WORDS_BIG_ENDIAN. */
1416 left_shift
= binoptab
== ashl_optab
;
1417 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1419 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1420 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1422 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1423 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1425 if (expand_doubleword_shift (op1_mode
, binoptab
,
1426 outof_input
, into_input
, op1
,
1427 outof_target
, into_target
,
1428 unsignedp
, next_methods
, shift_mask
))
1430 insns
= get_insns ();
1440 /* Synthesize double word rotates from single word shifts. */
1441 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1442 && mclass
== MODE_INT
1443 && CONST_INT_P (op1
)
1444 && GET_MODE_PRECISION (mode
) == 2 * BITS_PER_WORD
1445 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1446 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1449 rtx into_target
, outof_target
;
1450 rtx into_input
, outof_input
;
1452 int shift_count
, left_shift
, outof_word
;
1454 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1455 won't be accurate, so use a new target. Do this also if target is not
1456 a REG, first because having a register instead may open optimization
1457 opportunities, and second because if target and op0 happen to be MEMs
1458 designating the same location, we would risk clobbering it too early
1459 in the code sequence we generate below. */
1464 || !valid_multiword_target_p (target
))
1465 target
= gen_reg_rtx (mode
);
1469 shift_count
= INTVAL (op1
);
1471 /* OUTOF_* is the word we are shifting bits away from, and
1472 INTO_* is the word that we are shifting bits towards, thus
1473 they differ depending on the direction of the shift and
1474 WORDS_BIG_ENDIAN. */
1476 left_shift
= (binoptab
== rotl_optab
);
1477 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1479 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1480 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1482 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1483 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1485 if (shift_count
== BITS_PER_WORD
)
1487 /* This is just a word swap. */
1488 emit_move_insn (outof_target
, into_input
);
1489 emit_move_insn (into_target
, outof_input
);
1494 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1495 rtx first_shift_count
, second_shift_count
;
1496 optab reverse_unsigned_shift
, unsigned_shift
;
1498 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1499 ? lshr_optab
: ashl_optab
);
1501 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1502 ? ashl_optab
: lshr_optab
);
1504 if (shift_count
> BITS_PER_WORD
)
1506 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1507 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1511 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1512 second_shift_count
= GEN_INT (shift_count
);
1515 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1516 outof_input
, first_shift_count
,
1517 NULL_RTX
, unsignedp
, next_methods
);
1518 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1519 into_input
, second_shift_count
,
1520 NULL_RTX
, unsignedp
, next_methods
);
1522 if (into_temp1
!= 0 && into_temp2
!= 0)
1523 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1524 into_target
, unsignedp
, next_methods
);
1528 if (inter
!= 0 && inter
!= into_target
)
1529 emit_move_insn (into_target
, inter
);
1531 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1532 into_input
, first_shift_count
,
1533 NULL_RTX
, unsignedp
, next_methods
);
1534 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1535 outof_input
, second_shift_count
,
1536 NULL_RTX
, unsignedp
, next_methods
);
1538 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1539 inter
= expand_binop (word_mode
, ior_optab
,
1540 outof_temp1
, outof_temp2
,
1541 outof_target
, unsignedp
, next_methods
);
1543 if (inter
!= 0 && inter
!= outof_target
)
1544 emit_move_insn (outof_target
, inter
);
1547 insns
= get_insns ();
1557 /* These can be done a word at a time by propagating carries. */
1558 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1559 && mclass
== MODE_INT
1560 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1561 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1564 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1565 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1566 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1567 rtx xop0
, xop1
, xtarget
;
1569 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1570 value is one of those, use it. Otherwise, use 1 since it is the
1571 one easiest to get. */
1572 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1573 int normalizep
= STORE_FLAG_VALUE
;
1578 /* Prepare the operands. */
1579 xop0
= force_reg (mode
, op0
);
1580 xop1
= force_reg (mode
, op1
);
1582 xtarget
= gen_reg_rtx (mode
);
1584 if (target
== 0 || !REG_P (target
) || !valid_multiword_target_p (target
))
1587 /* Indicate for flow that the entire target reg is being set. */
1589 emit_clobber (xtarget
);
1591 /* Do the actual arithmetic. */
1592 for (i
= 0; i
< nwords
; i
++)
1594 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1595 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1596 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1597 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1600 /* Main add/subtract of the input operands. */
1601 x
= expand_binop (word_mode
, binoptab
,
1602 op0_piece
, op1_piece
,
1603 target_piece
, unsignedp
, next_methods
);
1609 /* Store carry from main add/subtract. */
1610 carry_out
= gen_reg_rtx (word_mode
);
1611 carry_out
= emit_store_flag_force (carry_out
,
1612 (binoptab
== add_optab
1615 word_mode
, 1, normalizep
);
1622 /* Add/subtract previous carry to main result. */
1623 newx
= expand_binop (word_mode
,
1624 normalizep
== 1 ? binoptab
: otheroptab
,
1626 NULL_RTX
, 1, next_methods
);
1630 /* Get out carry from adding/subtracting carry in. */
1631 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1632 carry_tmp
= emit_store_flag_force (carry_tmp
,
1633 (binoptab
== add_optab
1636 word_mode
, 1, normalizep
);
1638 /* Logical-ior the two poss. carry together. */
1639 carry_out
= expand_binop (word_mode
, ior_optab
,
1640 carry_out
, carry_tmp
,
1641 carry_out
, 0, next_methods
);
1645 emit_move_insn (target_piece
, newx
);
1649 if (x
!= target_piece
)
1650 emit_move_insn (target_piece
, x
);
1653 carry_in
= carry_out
;
1656 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1658 if (optab_handler (mov_optab
, mode
) != CODE_FOR_nothing
1659 || ! rtx_equal_p (target
, xtarget
))
1661 rtx_insn
*temp
= emit_move_insn (target
, xtarget
);
1663 set_dst_reg_note (temp
, REG_EQUAL
,
1664 gen_rtx_fmt_ee (optab_to_code (binoptab
),
1665 mode
, copy_rtx (xop0
),
1676 delete_insns_since (last
);
1679 /* Attempt to synthesize double word multiplies using a sequence of word
1680 mode multiplications. We first attempt to generate a sequence using a
1681 more efficient unsigned widening multiply, and if that fails we then
1682 try using a signed widening multiply. */
1684 if (binoptab
== smul_optab
1685 && mclass
== MODE_INT
1686 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1687 && optab_handler (smul_optab
, word_mode
) != CODE_FOR_nothing
1688 && optab_handler (add_optab
, word_mode
) != CODE_FOR_nothing
)
1690 rtx product
= NULL_RTX
;
1691 if (widening_optab_handler (umul_widen_optab
, mode
, word_mode
)
1692 != CODE_FOR_nothing
)
1694 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1697 delete_insns_since (last
);
1700 if (product
== NULL_RTX
1701 && widening_optab_handler (smul_widen_optab
, mode
, word_mode
)
1702 != CODE_FOR_nothing
)
1704 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1707 delete_insns_since (last
);
1710 if (product
!= NULL_RTX
)
1712 if (optab_handler (mov_optab
, mode
) != CODE_FOR_nothing
)
1714 temp
= emit_move_insn (target
? target
: product
, product
);
1715 set_dst_reg_note (temp
,
1717 gen_rtx_fmt_ee (MULT
, mode
,
1720 target
? target
: product
);
1726 /* It can't be open-coded in this mode.
1727 Use a library call if one is available and caller says that's ok. */
1729 libfunc
= optab_libfunc (binoptab
, mode
);
1731 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1735 machine_mode op1_mode
= mode
;
1740 if (shift_optab_p (binoptab
))
1742 op1_mode
= targetm
.libgcc_shift_count_mode ();
1743 /* Specify unsigned here,
1744 since negative shift counts are meaningless. */
1745 op1x
= convert_to_mode (op1_mode
, op1
, 1);
1748 if (GET_MODE (op0
) != VOIDmode
1749 && GET_MODE (op0
) != mode
)
1750 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1752 /* Pass 1 for NO_QUEUE so we don't lose any increments
1753 if the libcall is cse'd or moved. */
1754 value
= emit_library_call_value (libfunc
,
1755 NULL_RTX
, LCT_CONST
, mode
, 2,
1756 op0
, mode
, op1x
, op1_mode
);
1758 insns
= get_insns ();
1761 bool trapv
= trapv_binoptab_p (binoptab
);
1762 target
= gen_reg_rtx (mode
);
1763 emit_libcall_block_1 (insns
, target
, value
,
1765 : gen_rtx_fmt_ee (optab_to_code (binoptab
),
1766 mode
, op0
, op1
), trapv
);
1771 delete_insns_since (last
);
1773 /* It can't be done in this mode. Can we do it in a wider mode? */
1775 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1776 || methods
== OPTAB_MUST_WIDEN
))
1778 /* Caller says, don't even try. */
1779 delete_insns_since (entry_last
);
1783 /* Compute the value of METHODS to pass to recursive calls.
1784 Don't allow widening to be tried recursively. */
1786 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1788 /* Look for a wider mode of the same class for which it appears we can do
1791 if (CLASS_HAS_WIDER_MODES_P (mclass
))
1793 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1794 wider_mode
!= VOIDmode
;
1795 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1797 if (find_widening_optab_handler (binoptab
, wider_mode
, mode
, 1)
1799 || (methods
== OPTAB_LIB
1800 && optab_libfunc (binoptab
, wider_mode
)))
1802 rtx xop0
= op0
, xop1
= op1
;
1805 /* For certain integer operations, we need not actually extend
1806 the narrow operands, as long as we will truncate
1807 the results to the same narrowness. */
1809 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1810 || binoptab
== xor_optab
1811 || binoptab
== add_optab
|| binoptab
== sub_optab
1812 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1813 && mclass
== MODE_INT
)
1816 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1817 unsignedp
, no_extend
);
1819 /* The second operand of a shift must always be extended. */
1820 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1821 no_extend
&& binoptab
!= ashl_optab
);
1823 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1824 unsignedp
, methods
);
1827 if (mclass
!= MODE_INT
1828 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
1831 target
= gen_reg_rtx (mode
);
1832 convert_move (target
, temp
, 0);
1836 return gen_lowpart (mode
, temp
);
1839 delete_insns_since (last
);
1844 delete_insns_since (entry_last
);
1848 /* Expand a binary operator which has both signed and unsigned forms.
1849 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1852 If we widen unsigned operands, we may use a signed wider operation instead
1853 of an unsigned wider operation, since the result would be the same. */
1856 sign_expand_binop (machine_mode mode
, optab uoptab
, optab soptab
,
1857 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
1858 enum optab_methods methods
)
1861 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1864 /* Do it without widening, if possible. */
1865 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1866 unsignedp
, OPTAB_DIRECT
);
1867 if (temp
|| methods
== OPTAB_DIRECT
)
1870 /* Try widening to a signed int. Disable any direct use of any
1871 signed insn in the current mode. */
1872 save_enable
= swap_optab_enable (soptab
, mode
, false);
1874 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
1875 unsignedp
, OPTAB_WIDEN
);
1877 /* For unsigned operands, try widening to an unsigned int. */
1878 if (!temp
&& unsignedp
)
1879 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1880 unsignedp
, OPTAB_WIDEN
);
1881 if (temp
|| methods
== OPTAB_WIDEN
)
1884 /* Use the right width libcall if that exists. */
1885 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1886 unsignedp
, OPTAB_LIB
);
1887 if (temp
|| methods
== OPTAB_LIB
)
1890 /* Must widen and use a libcall, use either signed or unsigned. */
1891 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
1892 unsignedp
, methods
);
1893 if (!temp
&& unsignedp
)
1894 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1895 unsignedp
, methods
);
1898 /* Undo the fiddling above. */
1900 swap_optab_enable (soptab
, mode
, true);
1904 /* Generate code to perform an operation specified by UNOPPTAB
1905 on operand OP0, with two results to TARG0 and TARG1.
1906 We assume that the order of the operands for the instruction
1907 is TARG0, TARG1, OP0.
1909 Either TARG0 or TARG1 may be zero, but what that means is that
1910 the result is not actually wanted. We will generate it into
1911 a dummy pseudo-reg and discard it. They may not both be zero.
1913 Returns 1 if this operation can be performed; 0 if not. */
1916 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
1919 machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1920 enum mode_class mclass
;
1921 machine_mode wider_mode
;
1922 rtx_insn
*entry_last
= get_last_insn ();
1925 mclass
= GET_MODE_CLASS (mode
);
1928 targ0
= gen_reg_rtx (mode
);
1930 targ1
= gen_reg_rtx (mode
);
1932 /* Record where to go back to if we fail. */
1933 last
= get_last_insn ();
1935 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
1937 struct expand_operand ops
[3];
1938 enum insn_code icode
= optab_handler (unoptab
, mode
);
1940 create_fixed_operand (&ops
[0], targ0
);
1941 create_fixed_operand (&ops
[1], targ1
);
1942 create_convert_operand_from (&ops
[2], op0
, mode
, unsignedp
);
1943 if (maybe_expand_insn (icode
, 3, ops
))
1947 /* It can't be done in this mode. Can we do it in a wider mode? */
1949 if (CLASS_HAS_WIDER_MODES_P (mclass
))
1951 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1952 wider_mode
!= VOIDmode
;
1953 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1955 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
1957 rtx t0
= gen_reg_rtx (wider_mode
);
1958 rtx t1
= gen_reg_rtx (wider_mode
);
1959 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1961 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
1963 convert_move (targ0
, t0
, unsignedp
);
1964 convert_move (targ1
, t1
, unsignedp
);
1968 delete_insns_since (last
);
1973 delete_insns_since (entry_last
);
1977 /* Generate code to perform an operation specified by BINOPTAB
1978 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1979 We assume that the order of the operands for the instruction
1980 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1981 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1983 Either TARG0 or TARG1 may be zero, but what that means is that
1984 the result is not actually wanted. We will generate it into
1985 a dummy pseudo-reg and discard it. They may not both be zero.
1987 Returns 1 if this operation can be performed; 0 if not. */
1990 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
1993 machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1994 enum mode_class mclass
;
1995 machine_mode wider_mode
;
1996 rtx_insn
*entry_last
= get_last_insn ();
1999 mclass
= GET_MODE_CLASS (mode
);
2002 targ0
= gen_reg_rtx (mode
);
2004 targ1
= gen_reg_rtx (mode
);
2006 /* Record where to go back to if we fail. */
2007 last
= get_last_insn ();
2009 if (optab_handler (binoptab
, mode
) != CODE_FOR_nothing
)
2011 struct expand_operand ops
[4];
2012 enum insn_code icode
= optab_handler (binoptab
, mode
);
2013 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2014 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2015 rtx xop0
= op0
, xop1
= op1
;
2017 /* If we are optimizing, force expensive constants into a register. */
2018 xop0
= avoid_expensive_constant (mode0
, binoptab
, 0, xop0
, unsignedp
);
2019 xop1
= avoid_expensive_constant (mode1
, binoptab
, 1, xop1
, unsignedp
);
2021 create_fixed_operand (&ops
[0], targ0
);
2022 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
2023 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
2024 create_fixed_operand (&ops
[3], targ1
);
2025 if (maybe_expand_insn (icode
, 4, ops
))
2027 delete_insns_since (last
);
2030 /* It can't be done in this mode. Can we do it in a wider mode? */
2032 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2034 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2035 wider_mode
!= VOIDmode
;
2036 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2038 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
)
2040 rtx t0
= gen_reg_rtx (wider_mode
);
2041 rtx t1
= gen_reg_rtx (wider_mode
);
2042 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2043 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2045 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2048 convert_move (targ0
, t0
, unsignedp
);
2049 convert_move (targ1
, t1
, unsignedp
);
2053 delete_insns_since (last
);
2058 delete_insns_since (entry_last
);
2062 /* Expand the two-valued library call indicated by BINOPTAB, but
2063 preserve only one of the values. If TARG0 is non-NULL, the first
2064 value is placed into TARG0; otherwise the second value is placed
2065 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2066 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2067 This routine assumes that the value returned by the library call is
2068 as if the return value was of an integral mode twice as wide as the
2069 mode of OP0. Returns 1 if the call was successful. */
2072 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2073 rtx targ0
, rtx targ1
, enum rtx_code code
)
2076 machine_mode libval_mode
;
2081 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2082 gcc_assert (!targ0
!= !targ1
);
2084 mode
= GET_MODE (op0
);
2085 libfunc
= optab_libfunc (binoptab
, mode
);
2089 /* The value returned by the library function will have twice as
2090 many bits as the nominal MODE. */
2091 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2094 libval
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
2098 /* Get the part of VAL containing the value that we want. */
2099 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2100 targ0
? 0 : GET_MODE_SIZE (mode
));
2101 insns
= get_insns ();
2103 /* Move the into the desired location. */
2104 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2105 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2111 /* Wrapper around expand_unop which takes an rtx code to specify
2112 the operation to perform, not an optab pointer. All other
2113 arguments are the same. */
2115 expand_simple_unop (machine_mode mode
, enum rtx_code code
, rtx op0
,
2116 rtx target
, int unsignedp
)
2118 optab unop
= code_to_optab (code
);
2121 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2127 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2129 A similar operation can be used for clrsb. UNOPTAB says which operation
2130 we are trying to expand. */
2132 widen_leading (machine_mode mode
, rtx op0
, rtx target
, optab unoptab
)
2134 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2135 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2137 machine_mode wider_mode
;
2138 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2139 wider_mode
!= VOIDmode
;
2140 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2142 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2147 last
= get_last_insn ();
2150 target
= gen_reg_rtx (mode
);
2151 xop0
= widen_operand (op0
, wider_mode
, mode
,
2152 unoptab
!= clrsb_optab
, false);
2153 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2154 unoptab
!= clrsb_optab
);
2157 (wider_mode
, sub_optab
, temp
,
2158 gen_int_mode (GET_MODE_PRECISION (wider_mode
)
2159 - GET_MODE_PRECISION (mode
),
2161 target
, true, OPTAB_DIRECT
);
2163 delete_insns_since (last
);
2172 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2173 quantities, choosing which based on whether the high word is nonzero. */
2175 expand_doubleword_clz (machine_mode mode
, rtx op0
, rtx target
)
2177 rtx xop0
= force_reg (mode
, op0
);
2178 rtx subhi
= gen_highpart (word_mode
, xop0
);
2179 rtx sublo
= gen_lowpart (word_mode
, xop0
);
2180 rtx_code_label
*hi0_label
= gen_label_rtx ();
2181 rtx_code_label
*after_label
= gen_label_rtx ();
2185 /* If we were not given a target, use a word_mode register, not a
2186 'mode' register. The result will fit, and nobody is expecting
2187 anything bigger (the return type of __builtin_clz* is int). */
2189 target
= gen_reg_rtx (word_mode
);
2191 /* In any case, write to a word_mode scratch in both branches of the
2192 conditional, so we can ensure there is a single move insn setting
2193 'target' to tag a REG_EQUAL note on. */
2194 result
= gen_reg_rtx (word_mode
);
2198 /* If the high word is not equal to zero,
2199 then clz of the full value is clz of the high word. */
2200 emit_cmp_and_jump_insns (subhi
, CONST0_RTX (word_mode
), EQ
, 0,
2201 word_mode
, true, hi0_label
);
2203 temp
= expand_unop_direct (word_mode
, clz_optab
, subhi
, result
, true);
2208 convert_move (result
, temp
, true);
2210 emit_jump_insn (targetm
.gen_jump (after_label
));
2213 /* Else clz of the full value is clz of the low word plus the number
2214 of bits in the high word. */
2215 emit_label (hi0_label
);
2217 temp
= expand_unop_direct (word_mode
, clz_optab
, sublo
, 0, true);
2220 temp
= expand_binop (word_mode
, add_optab
, temp
,
2221 gen_int_mode (GET_MODE_BITSIZE (word_mode
), word_mode
),
2222 result
, true, OPTAB_DIRECT
);
2226 convert_move (result
, temp
, true);
2228 emit_label (after_label
);
2229 convert_move (target
, result
, true);
2234 add_equal_note (seq
, target
, CLZ
, xop0
, 0);
2243 /* Try calculating popcount of a double-word quantity as two popcount's of
2244 word-sized quantities and summing up the results. */
2246 expand_doubleword_popcount (machine_mode mode
, rtx op0
, rtx target
)
2253 t0
= expand_unop_direct (word_mode
, popcount_optab
,
2254 operand_subword_force (op0
, 0, mode
), NULL_RTX
,
2256 t1
= expand_unop_direct (word_mode
, popcount_optab
,
2257 operand_subword_force (op0
, 1, mode
), NULL_RTX
,
2265 /* If we were not given a target, use a word_mode register, not a
2266 'mode' register. The result will fit, and nobody is expecting
2267 anything bigger (the return type of __builtin_popcount* is int). */
2269 target
= gen_reg_rtx (word_mode
);
2271 t
= expand_binop (word_mode
, add_optab
, t0
, t1
, target
, 0, OPTAB_DIRECT
);
2276 add_equal_note (seq
, t
, POPCOUNT
, op0
, 0);
2284 (parity:narrow (low (x) ^ high (x))) */
2286 expand_doubleword_parity (machine_mode mode
, rtx op0
, rtx target
)
2288 rtx t
= expand_binop (word_mode
, xor_optab
,
2289 operand_subword_force (op0
, 0, mode
),
2290 operand_subword_force (op0
, 1, mode
),
2291 NULL_RTX
, 0, OPTAB_DIRECT
);
2292 return expand_unop (word_mode
, parity_optab
, t
, target
, true);
2298 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2300 widen_bswap (machine_mode mode
, rtx op0
, rtx target
)
2302 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2303 machine_mode wider_mode
;
2307 if (!CLASS_HAS_WIDER_MODES_P (mclass
))
2310 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2311 wider_mode
!= VOIDmode
;
2312 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2313 if (optab_handler (bswap_optab
, wider_mode
) != CODE_FOR_nothing
)
2318 last
= get_last_insn ();
2320 x
= widen_operand (op0
, wider_mode
, mode
, true, true);
2321 x
= expand_unop (wider_mode
, bswap_optab
, x
, NULL_RTX
, true);
2323 gcc_assert (GET_MODE_PRECISION (wider_mode
) == GET_MODE_BITSIZE (wider_mode
)
2324 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
));
2326 x
= expand_shift (RSHIFT_EXPR
, wider_mode
, x
,
2327 GET_MODE_BITSIZE (wider_mode
)
2328 - GET_MODE_BITSIZE (mode
),
2334 target
= gen_reg_rtx (mode
);
2335 emit_move_insn (target
, gen_lowpart (mode
, x
));
2338 delete_insns_since (last
);
2343 /* Try calculating bswap as two bswaps of two word-sized operands. */
2346 expand_doubleword_bswap (machine_mode mode
, rtx op
, rtx target
)
2350 t1
= expand_unop (word_mode
, bswap_optab
,
2351 operand_subword_force (op
, 0, mode
), NULL_RTX
, true);
2352 t0
= expand_unop (word_mode
, bswap_optab
,
2353 operand_subword_force (op
, 1, mode
), NULL_RTX
, true);
2355 if (target
== 0 || !valid_multiword_target_p (target
))
2356 target
= gen_reg_rtx (mode
);
2358 emit_clobber (target
);
2359 emit_move_insn (operand_subword (target
, 0, 1, mode
), t0
);
2360 emit_move_insn (operand_subword (target
, 1, 1, mode
), t1
);
2365 /* Try calculating (parity x) as (and (popcount x) 1), where
2366 popcount can also be done in a wider mode. */
2368 expand_parity (machine_mode mode
, rtx op0
, rtx target
)
2370 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2371 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2373 machine_mode wider_mode
;
2374 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2375 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2377 if (optab_handler (popcount_optab
, wider_mode
) != CODE_FOR_nothing
)
2382 last
= get_last_insn ();
2385 target
= gen_reg_rtx (mode
);
2386 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2387 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2390 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2391 target
, true, OPTAB_DIRECT
);
2393 delete_insns_since (last
);
2402 /* Try calculating ctz(x) as K - clz(x & -x) ,
2403 where K is GET_MODE_PRECISION(mode) - 1.
2405 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2406 don't have to worry about what the hardware does in that case. (If
2407 the clz instruction produces the usual value at 0, which is K, the
2408 result of this code sequence will be -1; expand_ffs, below, relies
2409 on this. It might be nice to have it be K instead, for consistency
2410 with the (very few) processors that provide a ctz with a defined
2411 value, but that would take one more instruction, and it would be
2412 less convenient for expand_ffs anyway. */
2415 expand_ctz (machine_mode mode
, rtx op0
, rtx target
)
2420 if (optab_handler (clz_optab
, mode
) == CODE_FOR_nothing
)
2425 temp
= expand_unop_direct (mode
, neg_optab
, op0
, NULL_RTX
, true);
2427 temp
= expand_binop (mode
, and_optab
, op0
, temp
, NULL_RTX
,
2428 true, OPTAB_DIRECT
);
2430 temp
= expand_unop_direct (mode
, clz_optab
, temp
, NULL_RTX
, true);
2432 temp
= expand_binop (mode
, sub_optab
,
2433 gen_int_mode (GET_MODE_PRECISION (mode
) - 1, mode
),
2435 true, OPTAB_DIRECT
);
2445 add_equal_note (seq
, temp
, CTZ
, op0
, 0);
2451 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2452 else with the sequence used by expand_clz.
2454 The ffs builtin promises to return zero for a zero value and ctz/clz
2455 may have an undefined value in that case. If they do not give us a
2456 convenient value, we have to generate a test and branch. */
2458 expand_ffs (machine_mode mode
, rtx op0
, rtx target
)
2460 HOST_WIDE_INT val
= 0;
2461 bool defined_at_zero
= false;
2465 if (optab_handler (ctz_optab
, mode
) != CODE_FOR_nothing
)
2469 temp
= expand_unop_direct (mode
, ctz_optab
, op0
, 0, true);
2473 defined_at_zero
= (CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2);
2475 else if (optab_handler (clz_optab
, mode
) != CODE_FOR_nothing
)
2478 temp
= expand_ctz (mode
, op0
, 0);
2482 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2)
2484 defined_at_zero
= true;
2485 val
= (GET_MODE_PRECISION (mode
) - 1) - val
;
2491 if (defined_at_zero
&& val
== -1)
2492 /* No correction needed at zero. */;
2495 /* We don't try to do anything clever with the situation found
2496 on some processors (eg Alpha) where ctz(0:mode) ==
2497 bitsize(mode). If someone can think of a way to send N to -1
2498 and leave alone all values in the range 0..N-1 (where N is a
2499 power of two), cheaper than this test-and-branch, please add it.
2501 The test-and-branch is done after the operation itself, in case
2502 the operation sets condition codes that can be recycled for this.
2503 (This is true on i386, for instance.) */
2505 rtx_code_label
*nonzero_label
= gen_label_rtx ();
2506 emit_cmp_and_jump_insns (op0
, CONST0_RTX (mode
), NE
, 0,
2507 mode
, true, nonzero_label
);
2509 convert_move (temp
, GEN_INT (-1), false);
2510 emit_label (nonzero_label
);
2513 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2514 to produce a value in the range 0..bitsize. */
2515 temp
= expand_binop (mode
, add_optab
, temp
, gen_int_mode (1, mode
),
2516 target
, false, OPTAB_DIRECT
);
2523 add_equal_note (seq
, temp
, FFS
, op0
, 0);
2532 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2533 conditions, VAL may already be a SUBREG against which we cannot generate
2534 a further SUBREG. In this case, we expect forcing the value into a
2535 register will work around the situation. */
2538 lowpart_subreg_maybe_copy (machine_mode omode
, rtx val
,
2542 ret
= lowpart_subreg (omode
, val
, imode
);
2545 val
= force_reg (imode
, val
);
2546 ret
= lowpart_subreg (omode
, val
, imode
);
2547 gcc_assert (ret
!= NULL
);
2552 /* Expand a floating point absolute value or negation operation via a
2553 logical operation on the sign bit. */
2556 expand_absneg_bit (enum rtx_code code
, machine_mode mode
,
2557 rtx op0
, rtx target
)
2559 const struct real_format
*fmt
;
2560 int bitpos
, word
, nwords
, i
;
2565 /* The format has to have a simple sign bit. */
2566 fmt
= REAL_MODE_FORMAT (mode
);
2570 bitpos
= fmt
->signbit_rw
;
2574 /* Don't create negative zeros if the format doesn't support them. */
2575 if (code
== NEG
&& !fmt
->has_signed_zero
)
2578 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2580 imode
= int_mode_for_mode (mode
);
2581 if (imode
== BLKmode
)
2590 if (FLOAT_WORDS_BIG_ENDIAN
)
2591 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2593 word
= bitpos
/ BITS_PER_WORD
;
2594 bitpos
= bitpos
% BITS_PER_WORD
;
2595 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2598 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
2604 || (nwords
> 1 && !valid_multiword_target_p (target
)))
2605 target
= gen_reg_rtx (mode
);
2611 for (i
= 0; i
< nwords
; ++i
)
2613 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2614 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2618 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2620 immed_wide_int_const (mask
, imode
),
2621 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2622 if (temp
!= targ_piece
)
2623 emit_move_insn (targ_piece
, temp
);
2626 emit_move_insn (targ_piece
, op0_piece
);
2629 insns
= get_insns ();
2636 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2637 gen_lowpart (imode
, op0
),
2638 immed_wide_int_const (mask
, imode
),
2639 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2640 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2642 set_dst_reg_note (get_last_insn (), REG_EQUAL
,
2643 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)),
2650 /* As expand_unop, but will fail rather than attempt the operation in a
2651 different mode or with a libcall. */
2653 expand_unop_direct (machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2656 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
2658 struct expand_operand ops
[2];
2659 enum insn_code icode
= optab_handler (unoptab
, mode
);
2660 rtx_insn
*last
= get_last_insn ();
2663 create_output_operand (&ops
[0], target
, mode
);
2664 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
2665 pat
= maybe_gen_insn (icode
, 2, ops
);
2668 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2669 && ! add_equal_note (pat
, ops
[0].value
,
2670 optab_to_code (unoptab
),
2671 ops
[1].value
, NULL_RTX
))
2673 delete_insns_since (last
);
2674 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2679 return ops
[0].value
;
2685 /* Generate code to perform an operation specified by UNOPTAB
2686 on operand OP0, with result having machine-mode MODE.
2688 UNSIGNEDP is for the case where we have to widen the operands
2689 to perform the operation. It says to use zero-extension.
2691 If TARGET is nonzero, the value
2692 is generated there, if it is convenient to do so.
2693 In all cases an rtx is returned for the locus of the value;
2694 this may or may not be TARGET. */
2697 expand_unop (machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2700 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2701 machine_mode wider_mode
;
2705 temp
= expand_unop_direct (mode
, unoptab
, op0
, target
, unsignedp
);
2709 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2711 /* Widening (or narrowing) clz needs special treatment. */
2712 if (unoptab
== clz_optab
)
2714 temp
= widen_leading (mode
, op0
, target
, unoptab
);
2718 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2719 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2721 temp
= expand_doubleword_clz (mode
, op0
, target
);
2729 if (unoptab
== clrsb_optab
)
2731 temp
= widen_leading (mode
, op0
, target
, unoptab
);
2737 if (unoptab
== popcount_optab
2738 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2739 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
2740 && optimize_insn_for_speed_p ())
2742 temp
= expand_doubleword_popcount (mode
, op0
, target
);
2747 if (unoptab
== parity_optab
2748 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2749 && (optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
2750 || optab_handler (popcount_optab
, word_mode
) != CODE_FOR_nothing
)
2751 && optimize_insn_for_speed_p ())
2753 temp
= expand_doubleword_parity (mode
, op0
, target
);
2758 /* Widening (or narrowing) bswap needs special treatment. */
2759 if (unoptab
== bswap_optab
)
2761 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
2762 or ROTATERT. First try these directly; if this fails, then try the
2763 obvious pair of shifts with allowed widening, as this will probably
2764 be always more efficient than the other fallback methods. */
2770 if (optab_handler (rotl_optab
, mode
) != CODE_FOR_nothing
)
2772 temp
= expand_binop (mode
, rotl_optab
, op0
, GEN_INT (8), target
,
2773 unsignedp
, OPTAB_DIRECT
);
2778 if (optab_handler (rotr_optab
, mode
) != CODE_FOR_nothing
)
2780 temp
= expand_binop (mode
, rotr_optab
, op0
, GEN_INT (8), target
,
2781 unsignedp
, OPTAB_DIRECT
);
2786 last
= get_last_insn ();
2788 temp1
= expand_binop (mode
, ashl_optab
, op0
, GEN_INT (8), NULL_RTX
,
2789 unsignedp
, OPTAB_WIDEN
);
2790 temp2
= expand_binop (mode
, lshr_optab
, op0
, GEN_INT (8), NULL_RTX
,
2791 unsignedp
, OPTAB_WIDEN
);
2794 temp
= expand_binop (mode
, ior_optab
, temp1
, temp2
, target
,
2795 unsignedp
, OPTAB_WIDEN
);
2800 delete_insns_since (last
);
2803 temp
= widen_bswap (mode
, op0
, target
);
2807 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2808 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2810 temp
= expand_doubleword_bswap (mode
, op0
, target
);
2818 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2819 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2820 wider_mode
!= VOIDmode
;
2821 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2823 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2826 rtx_insn
*last
= get_last_insn ();
2828 /* For certain operations, we need not actually extend
2829 the narrow operand, as long as we will truncate the
2830 results to the same narrowness. */
2832 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2833 (unoptab
== neg_optab
2834 || unoptab
== one_cmpl_optab
)
2835 && mclass
== MODE_INT
);
2837 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2842 if (mclass
!= MODE_INT
2843 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2846 target
= gen_reg_rtx (mode
);
2847 convert_move (target
, temp
, 0);
2851 return gen_lowpart (mode
, temp
);
2854 delete_insns_since (last
);
2858 /* These can be done a word at a time. */
2859 if (unoptab
== one_cmpl_optab
2860 && mclass
== MODE_INT
2861 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2862 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2867 if (target
== 0 || target
== op0
|| !valid_multiword_target_p (target
))
2868 target
= gen_reg_rtx (mode
);
2872 /* Do the actual arithmetic. */
2873 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2875 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2876 rtx x
= expand_unop (word_mode
, unoptab
,
2877 operand_subword_force (op0
, i
, mode
),
2878 target_piece
, unsignedp
);
2880 if (target_piece
!= x
)
2881 emit_move_insn (target_piece
, x
);
2884 insns
= get_insns ();
2891 if (optab_to_code (unoptab
) == NEG
)
2893 /* Try negating floating point values by flipping the sign bit. */
2894 if (SCALAR_FLOAT_MODE_P (mode
))
2896 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
2901 /* If there is no negation pattern, and we have no negative zero,
2902 try subtracting from zero. */
2903 if (!HONOR_SIGNED_ZEROS (mode
))
2905 temp
= expand_binop (mode
, (unoptab
== negv_optab
2906 ? subv_optab
: sub_optab
),
2907 CONST0_RTX (mode
), op0
, target
,
2908 unsignedp
, OPTAB_DIRECT
);
2914 /* Try calculating parity (x) as popcount (x) % 2. */
2915 if (unoptab
== parity_optab
)
2917 temp
= expand_parity (mode
, op0
, target
);
2922 /* Try implementing ffs (x) in terms of clz (x). */
2923 if (unoptab
== ffs_optab
)
2925 temp
= expand_ffs (mode
, op0
, target
);
2930 /* Try implementing ctz (x) in terms of clz (x). */
2931 if (unoptab
== ctz_optab
)
2933 temp
= expand_ctz (mode
, op0
, target
);
2939 /* Now try a library call in this mode. */
2940 libfunc
= optab_libfunc (unoptab
, mode
);
2946 machine_mode outmode
= mode
;
2948 /* All of these functions return small values. Thus we choose to
2949 have them return something that isn't a double-word. */
2950 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2951 || unoptab
== clrsb_optab
|| unoptab
== popcount_optab
2952 || unoptab
== parity_optab
)
2954 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
),
2955 optab_libfunc (unoptab
, mode
)));
2959 /* Pass 1 for NO_QUEUE so we don't lose any increments
2960 if the libcall is cse'd or moved. */
2961 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, outmode
,
2963 insns
= get_insns ();
2966 target
= gen_reg_rtx (outmode
);
2967 bool trapv
= trapv_unoptab_p (unoptab
);
2969 eq_value
= NULL_RTX
;
2972 eq_value
= gen_rtx_fmt_e (optab_to_code (unoptab
), mode
, op0
);
2973 if (GET_MODE_SIZE (outmode
) < GET_MODE_SIZE (mode
))
2974 eq_value
= simplify_gen_unary (TRUNCATE
, outmode
, eq_value
, mode
);
2975 else if (GET_MODE_SIZE (outmode
) > GET_MODE_SIZE (mode
))
2976 eq_value
= simplify_gen_unary (ZERO_EXTEND
,
2977 outmode
, eq_value
, mode
);
2979 emit_libcall_block_1 (insns
, target
, value
, eq_value
, trapv
);
2984 /* It can't be done in this mode. Can we do it in a wider mode? */
2986 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2988 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2989 wider_mode
!= VOIDmode
;
2990 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2992 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
2993 || optab_libfunc (unoptab
, wider_mode
))
2996 rtx_insn
*last
= get_last_insn ();
2998 /* For certain operations, we need not actually extend
2999 the narrow operand, as long as we will truncate the
3000 results to the same narrowness. */
3001 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3002 (unoptab
== neg_optab
3003 || unoptab
== one_cmpl_optab
3004 || unoptab
== bswap_optab
)
3005 && mclass
== MODE_INT
);
3007 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3010 /* If we are generating clz using wider mode, adjust the
3011 result. Similarly for clrsb. */
3012 if ((unoptab
== clz_optab
|| unoptab
== clrsb_optab
)
3015 (wider_mode
, sub_optab
, temp
,
3016 gen_int_mode (GET_MODE_PRECISION (wider_mode
)
3017 - GET_MODE_PRECISION (mode
),
3019 target
, true, OPTAB_DIRECT
);
3021 /* Likewise for bswap. */
3022 if (unoptab
== bswap_optab
&& temp
!= 0)
3024 gcc_assert (GET_MODE_PRECISION (wider_mode
)
3025 == GET_MODE_BITSIZE (wider_mode
)
3026 && GET_MODE_PRECISION (mode
)
3027 == GET_MODE_BITSIZE (mode
));
3029 temp
= expand_shift (RSHIFT_EXPR
, wider_mode
, temp
,
3030 GET_MODE_BITSIZE (wider_mode
)
3031 - GET_MODE_BITSIZE (mode
),
3037 if (mclass
!= MODE_INT
)
3040 target
= gen_reg_rtx (mode
);
3041 convert_move (target
, temp
, 0);
3045 return gen_lowpart (mode
, temp
);
3048 delete_insns_since (last
);
3053 /* One final attempt at implementing negation via subtraction,
3054 this time allowing widening of the operand. */
3055 if (optab_to_code (unoptab
) == NEG
&& !HONOR_SIGNED_ZEROS (mode
))
3058 temp
= expand_binop (mode
,
3059 unoptab
== negv_optab
? subv_optab
: sub_optab
,
3060 CONST0_RTX (mode
), op0
,
3061 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3069 /* Emit code to compute the absolute value of OP0, with result to
3070 TARGET if convenient. (TARGET may be 0.) The return value says
3071 where the result actually is to be found.
3073 MODE is the mode of the operand; the mode of the result is
3074 different but can be deduced from MODE.
3079 expand_abs_nojump (machine_mode mode
, rtx op0
, rtx target
,
3080 int result_unsignedp
)
3084 if (GET_MODE_CLASS (mode
) != MODE_INT
3086 result_unsignedp
= 1;
3088 /* First try to do it with a special abs instruction. */
3089 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
3094 /* For floating point modes, try clearing the sign bit. */
3095 if (SCALAR_FLOAT_MODE_P (mode
))
3097 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
3102 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3103 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
3104 && !HONOR_SIGNED_ZEROS (mode
))
3106 rtx_insn
*last
= get_last_insn ();
3108 temp
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3111 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3117 delete_insns_since (last
);
3120 /* If this machine has expensive jumps, we can do integer absolute
3121 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3122 where W is the width of MODE. */
3124 if (GET_MODE_CLASS (mode
) == MODE_INT
3125 && BRANCH_COST (optimize_insn_for_speed_p (),
3128 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3129 GET_MODE_PRECISION (mode
) - 1,
3132 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
3135 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
3136 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
3146 expand_abs (machine_mode mode
, rtx op0
, rtx target
,
3147 int result_unsignedp
, int safe
)
3150 rtx_code_label
*op1
;
3152 if (GET_MODE_CLASS (mode
) != MODE_INT
3154 result_unsignedp
= 1;
3156 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
3160 /* If that does not win, use conditional jump and negate. */
3162 /* It is safe to use the target if it is the same
3163 as the source if this is also a pseudo register */
3164 if (op0
== target
&& REG_P (op0
)
3165 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
3168 op1
= gen_label_rtx ();
3169 if (target
== 0 || ! safe
3170 || GET_MODE (target
) != mode
3171 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
3173 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
3174 target
= gen_reg_rtx (mode
);
3176 emit_move_insn (target
, op0
);
3179 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
3180 NULL_RTX
, NULL
, op1
, -1);
3182 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3185 emit_move_insn (target
, op0
);
3191 /* Emit code to compute the one's complement absolute value of OP0
3192 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3193 (TARGET may be NULL_RTX.) The return value says where the result
3194 actually is to be found.
3196 MODE is the mode of the operand; the mode of the result is
3197 different but can be deduced from MODE. */
3200 expand_one_cmpl_abs_nojump (machine_mode mode
, rtx op0
, rtx target
)
3204 /* Not applicable for floating point modes. */
3205 if (FLOAT_MODE_P (mode
))
3208 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3209 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
)
3211 rtx_insn
*last
= get_last_insn ();
3213 temp
= expand_unop (mode
, one_cmpl_optab
, op0
, NULL_RTX
, 0);
3215 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3221 delete_insns_since (last
);
3224 /* If this machine has expensive jumps, we can do one's complement
3225 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3227 if (GET_MODE_CLASS (mode
) == MODE_INT
3228 && BRANCH_COST (optimize_insn_for_speed_p (),
3231 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3232 GET_MODE_PRECISION (mode
) - 1,
3235 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
3245 /* A subroutine of expand_copysign, perform the copysign operation using the
3246 abs and neg primitives advertised to exist on the target. The assumption
3247 is that we have a split register file, and leaving op0 in fp registers,
3248 and not playing with subregs so much, will help the register allocator. */
3251 expand_copysign_absneg (machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3252 int bitpos
, bool op0_is_abs
)
3255 enum insn_code icode
;
3257 rtx_code_label
*label
;
3262 /* Check if the back end provides an insn that handles signbit for the
3264 icode
= optab_handler (signbit_optab
, mode
);
3265 if (icode
!= CODE_FOR_nothing
)
3267 imode
= insn_data
[(int) icode
].operand
[0].mode
;
3268 sign
= gen_reg_rtx (imode
);
3269 emit_unop_insn (icode
, sign
, op1
, UNKNOWN
);
3273 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3275 imode
= int_mode_for_mode (mode
);
3276 if (imode
== BLKmode
)
3278 op1
= gen_lowpart (imode
, op1
);
3285 if (FLOAT_WORDS_BIG_ENDIAN
)
3286 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3288 word
= bitpos
/ BITS_PER_WORD
;
3289 bitpos
= bitpos
% BITS_PER_WORD
;
3290 op1
= operand_subword_force (op1
, word
, mode
);
3293 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3294 sign
= expand_binop (imode
, and_optab
, op1
,
3295 immed_wide_int_const (mask
, imode
),
3296 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3301 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
3308 if (target
== NULL_RTX
)
3309 target
= copy_to_reg (op0
);
3311 emit_move_insn (target
, op0
);
3314 label
= gen_label_rtx ();
3315 emit_cmp_and_jump_insns (sign
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
3317 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
3318 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3320 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
3322 emit_move_insn (target
, op0
);
3330 /* A subroutine of expand_copysign, perform the entire copysign operation
3331 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3332 is true if op0 is known to have its sign bit clear. */
3335 expand_copysign_bit (machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3336 int bitpos
, bool op0_is_abs
)
3339 int word
, nwords
, i
;
3343 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3345 imode
= int_mode_for_mode (mode
);
3346 if (imode
== BLKmode
)
3355 if (FLOAT_WORDS_BIG_ENDIAN
)
3356 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3358 word
= bitpos
/ BITS_PER_WORD
;
3359 bitpos
= bitpos
% BITS_PER_WORD
;
3360 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3363 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3368 || (nwords
> 1 && !valid_multiword_target_p (target
)))
3369 target
= gen_reg_rtx (mode
);
3375 for (i
= 0; i
< nwords
; ++i
)
3377 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3378 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3384 = expand_binop (imode
, and_optab
, op0_piece
,
3385 immed_wide_int_const (~mask
, imode
),
3386 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3387 op1
= expand_binop (imode
, and_optab
,
3388 operand_subword_force (op1
, i
, mode
),
3389 immed_wide_int_const (mask
, imode
),
3390 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3392 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
3393 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3394 if (temp
!= targ_piece
)
3395 emit_move_insn (targ_piece
, temp
);
3398 emit_move_insn (targ_piece
, op0_piece
);
3401 insns
= get_insns ();
3408 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
3409 immed_wide_int_const (mask
, imode
),
3410 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3412 op0
= gen_lowpart (imode
, op0
);
3414 op0
= expand_binop (imode
, and_optab
, op0
,
3415 immed_wide_int_const (~mask
, imode
),
3416 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3418 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
3419 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3420 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3426 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3427 scalar floating point mode. Return NULL if we do not know how to
3428 expand the operation inline. */
3431 expand_copysign (rtx op0
, rtx op1
, rtx target
)
3433 machine_mode mode
= GET_MODE (op0
);
3434 const struct real_format
*fmt
;
3438 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
3439 gcc_assert (GET_MODE (op1
) == mode
);
3441 /* First try to do it with a special instruction. */
3442 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
3443 target
, 0, OPTAB_DIRECT
);
3447 fmt
= REAL_MODE_FORMAT (mode
);
3448 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
3452 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
3454 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
3455 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
3459 if (fmt
->signbit_ro
>= 0
3460 && (CONST_DOUBLE_AS_FLOAT_P (op0
)
3461 || (optab_handler (neg_optab
, mode
) != CODE_FOR_nothing
3462 && optab_handler (abs_optab
, mode
) != CODE_FOR_nothing
)))
3464 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
3465 fmt
->signbit_ro
, op0_is_abs
);
3470 if (fmt
->signbit_rw
< 0)
3472 return expand_copysign_bit (mode
, op0
, op1
, target
,
3473 fmt
->signbit_rw
, op0_is_abs
);
3476 /* Generate an instruction whose insn-code is INSN_CODE,
3477 with two operands: an output TARGET and an input OP0.
3478 TARGET *must* be nonzero, and the output is always stored there.
3479 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3480 the value that is stored into TARGET.
3482 Return false if expansion failed. */
3485 maybe_emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
,
3488 struct expand_operand ops
[2];
3491 create_output_operand (&ops
[0], target
, GET_MODE (target
));
3492 create_input_operand (&ops
[1], op0
, GET_MODE (op0
));
3493 pat
= maybe_gen_insn (icode
, 2, ops
);
3497 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
3499 add_equal_note (pat
, ops
[0].value
, code
, ops
[1].value
, NULL_RTX
);
3503 if (ops
[0].value
!= target
)
3504 emit_move_insn (target
, ops
[0].value
);
3507 /* Generate an instruction whose insn-code is INSN_CODE,
3508 with two operands: an output TARGET and an input OP0.
3509 TARGET *must* be nonzero, and the output is always stored there.
3510 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3511 the value that is stored into TARGET. */
3514 emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
, enum rtx_code code
)
3516 bool ok
= maybe_emit_unop_insn (icode
, target
, op0
, code
);
3520 struct no_conflict_data
3523 rtx_insn
*first
, *insn
;
3527 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3528 the currently examined clobber / store has to stay in the list of
3529 insns that constitute the actual libcall block. */
3531 no_conflict_move_test (rtx dest
, const_rtx set
, void *p0
)
3533 struct no_conflict_data
*p
= (struct no_conflict_data
*) p0
;
3535 /* If this inns directly contributes to setting the target, it must stay. */
3536 if (reg_overlap_mentioned_p (p
->target
, dest
))
3537 p
->must_stay
= true;
3538 /* If we haven't committed to keeping any other insns in the list yet,
3539 there is nothing more to check. */
3540 else if (p
->insn
== p
->first
)
3542 /* If this insn sets / clobbers a register that feeds one of the insns
3543 already in the list, this insn has to stay too. */
3544 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
3545 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
3546 || reg_used_between_p (dest
, p
->first
, p
->insn
)
3547 /* Likewise if this insn depends on a register set by a previous
3548 insn in the list, or if it sets a result (presumably a hard
3549 register) that is set or clobbered by a previous insn.
3550 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3551 SET_DEST perform the former check on the address, and the latter
3552 check on the MEM. */
3553 || (GET_CODE (set
) == SET
3554 && (modified_in_p (SET_SRC (set
), p
->first
)
3555 || modified_in_p (SET_DEST (set
), p
->first
)
3556 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
3557 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
3558 p
->must_stay
= true;
3562 /* Emit code to make a call to a constant function or a library call.
3564 INSNS is a list containing all insns emitted in the call.
3565 These insns leave the result in RESULT. Our block is to copy RESULT
3566 to TARGET, which is logically equivalent to EQUIV.
3568 We first emit any insns that set a pseudo on the assumption that these are
3569 loading constants into registers; doing so allows them to be safely cse'ed
3570 between blocks. Then we emit all the other insns in the block, followed by
3571 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3572 note with an operand of EQUIV. */
3575 emit_libcall_block_1 (rtx_insn
*insns
, rtx target
, rtx result
, rtx equiv
,
3576 bool equiv_may_trap
)
3578 rtx final_dest
= target
;
3579 rtx_insn
*next
, *last
, *insn
;
3581 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3582 into a MEM later. Protect the libcall block from this change. */
3583 if (! REG_P (target
) || REG_USERVAR_P (target
))
3584 target
= gen_reg_rtx (GET_MODE (target
));
3586 /* If we're using non-call exceptions, a libcall corresponding to an
3587 operation that may trap may also trap. */
3588 /* ??? See the comment in front of make_reg_eh_region_note. */
3589 if (cfun
->can_throw_non_call_exceptions
3590 && (equiv_may_trap
|| may_trap_p (equiv
)))
3592 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3595 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3598 int lp_nr
= INTVAL (XEXP (note
, 0));
3599 if (lp_nr
== 0 || lp_nr
== INT_MIN
)
3600 remove_note (insn
, note
);
3606 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3607 reg note to indicate that this call cannot throw or execute a nonlocal
3608 goto (unless there is already a REG_EH_REGION note, in which case
3610 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3612 make_reg_eh_region_note_nothrow_nononlocal (insn
);
3615 /* First emit all insns that set pseudos. Remove them from the list as
3616 we go. Avoid insns that set pseudos which were referenced in previous
3617 insns. These can be generated by move_by_pieces, for example,
3618 to update an address. Similarly, avoid insns that reference things
3619 set in previous insns. */
3621 for (insn
= insns
; insn
; insn
= next
)
3623 rtx set
= single_set (insn
);
3625 next
= NEXT_INSN (insn
);
3627 if (set
!= 0 && REG_P (SET_DEST (set
))
3628 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3630 struct no_conflict_data data
;
3632 data
.target
= const0_rtx
;
3636 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3637 if (! data
.must_stay
)
3639 if (PREV_INSN (insn
))
3640 SET_NEXT_INSN (PREV_INSN (insn
)) = next
;
3645 SET_PREV_INSN (next
) = PREV_INSN (insn
);
3651 /* Some ports use a loop to copy large arguments onto the stack.
3652 Don't move anything outside such a loop. */
3657 /* Write the remaining insns followed by the final copy. */
3658 for (insn
= insns
; insn
; insn
= next
)
3660 next
= NEXT_INSN (insn
);
3665 last
= emit_move_insn (target
, result
);
3667 set_dst_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
), target
);
3669 if (final_dest
!= target
)
3670 emit_move_insn (final_dest
, target
);
3674 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3676 emit_libcall_block_1 (safe_as_a
<rtx_insn
*> (insns
),
3677 target
, result
, equiv
, false);
3680 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3681 PURPOSE describes how this comparison will be used. CODE is the rtx
3682 comparison code we will be using.
3684 ??? Actually, CODE is slightly weaker than that. A target is still
3685 required to implement all of the normal bcc operations, but not
3686 required to implement all (or any) of the unordered bcc operations. */
3689 can_compare_p (enum rtx_code code
, machine_mode mode
,
3690 enum can_compare_purpose purpose
)
3693 test
= gen_rtx_fmt_ee (code
, mode
, const0_rtx
, const0_rtx
);
3696 enum insn_code icode
;
3698 if (purpose
== ccp_jump
3699 && (icode
= optab_handler (cbranch_optab
, mode
)) != CODE_FOR_nothing
3700 && insn_operand_matches (icode
, 0, test
))
3702 if (purpose
== ccp_store_flag
3703 && (icode
= optab_handler (cstore_optab
, mode
)) != CODE_FOR_nothing
3704 && insn_operand_matches (icode
, 1, test
))
3706 if (purpose
== ccp_cmov
3707 && optab_handler (cmov_optab
, mode
) != CODE_FOR_nothing
)
3710 mode
= GET_MODE_WIDER_MODE (mode
);
3711 PUT_MODE (test
, mode
);
3713 while (mode
!= VOIDmode
);
3718 /* This function is called when we are going to emit a compare instruction that
3719 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3721 *PMODE is the mode of the inputs (in case they are const_int).
3722 *PUNSIGNEDP nonzero says that the operands are unsigned;
3723 this matters if they need to be widened (as given by METHODS).
3725 If they have mode BLKmode, then SIZE specifies the size of both operands.
3727 This function performs all the setup necessary so that the caller only has
3728 to emit a single comparison insn. This setup can involve doing a BLKmode
3729 comparison or emitting a library call to perform the comparison if no insn
3730 is available to handle it.
3731 The values which are passed in through pointers can be modified; the caller
3732 should perform the comparison on the modified values. Constant
3733 comparisons must have already been folded. */
3736 prepare_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3737 int unsignedp
, enum optab_methods methods
,
3738 rtx
*ptest
, machine_mode
*pmode
)
3740 machine_mode mode
= *pmode
;
3742 machine_mode cmp_mode
;
3743 enum mode_class mclass
;
3745 /* The other methods are not needed. */
3746 gcc_assert (methods
== OPTAB_DIRECT
|| methods
== OPTAB_WIDEN
3747 || methods
== OPTAB_LIB_WIDEN
);
3749 /* If we are optimizing, force expensive constants into a register. */
3750 if (CONSTANT_P (x
) && optimize
3751 && (rtx_cost (x
, mode
, COMPARE
, 0, optimize_insn_for_speed_p ())
3752 > COSTS_N_INSNS (1)))
3753 x
= force_reg (mode
, x
);
3755 if (CONSTANT_P (y
) && optimize
3756 && (rtx_cost (y
, mode
, COMPARE
, 1, optimize_insn_for_speed_p ())
3757 > COSTS_N_INSNS (1)))
3758 y
= force_reg (mode
, y
);
3761 /* Make sure if we have a canonical comparison. The RTL
3762 documentation states that canonical comparisons are required only
3763 for targets which have cc0. */
3764 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
3767 /* Don't let both operands fail to indicate the mode. */
3768 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3769 x
= force_reg (mode
, x
);
3770 if (mode
== VOIDmode
)
3771 mode
= GET_MODE (x
) != VOIDmode
? GET_MODE (x
) : GET_MODE (y
);
3773 /* Handle all BLKmode compares. */
3775 if (mode
== BLKmode
)
3777 machine_mode result_mode
;
3778 enum insn_code cmp_code
;
3781 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3785 /* Try to use a memory block compare insn - either cmpstr
3786 or cmpmem will do. */
3787 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
3788 cmp_mode
!= VOIDmode
;
3789 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
3791 cmp_code
= direct_optab_handler (cmpmem_optab
, cmp_mode
);
3792 if (cmp_code
== CODE_FOR_nothing
)
3793 cmp_code
= direct_optab_handler (cmpstr_optab
, cmp_mode
);
3794 if (cmp_code
== CODE_FOR_nothing
)
3795 cmp_code
= direct_optab_handler (cmpstrn_optab
, cmp_mode
);
3796 if (cmp_code
== CODE_FOR_nothing
)
3799 /* Must make sure the size fits the insn's mode. */
3800 if ((CONST_INT_P (size
)
3801 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
3802 || (GET_MODE_BITSIZE (GET_MODE (size
))
3803 > GET_MODE_BITSIZE (cmp_mode
)))
3806 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3807 result
= gen_reg_rtx (result_mode
);
3808 size
= convert_to_mode (cmp_mode
, size
, 1);
3809 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3811 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, result
, const0_rtx
);
3812 *pmode
= result_mode
;
3816 if (methods
!= OPTAB_LIB
&& methods
!= OPTAB_LIB_WIDEN
)
3819 /* Otherwise call a library function. */
3820 result
= emit_block_comp_via_libcall (XEXP (x
, 0), XEXP (y
, 0), size
);
3824 mode
= TYPE_MODE (integer_type_node
);
3825 methods
= OPTAB_LIB_WIDEN
;
3829 /* Don't allow operands to the compare to trap, as that can put the
3830 compare and branch in different basic blocks. */
3831 if (cfun
->can_throw_non_call_exceptions
)
3834 x
= force_reg (mode
, x
);
3836 y
= force_reg (mode
, y
);
3839 if (GET_MODE_CLASS (mode
) == MODE_CC
)
3841 enum insn_code icode
= optab_handler (cbranch_optab
, CCmode
);
3842 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
3843 gcc_assert (icode
!= CODE_FOR_nothing
3844 && insn_operand_matches (icode
, 0, test
));
3849 mclass
= GET_MODE_CLASS (mode
);
3850 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
3854 enum insn_code icode
;
3855 icode
= optab_handler (cbranch_optab
, cmp_mode
);
3856 if (icode
!= CODE_FOR_nothing
3857 && insn_operand_matches (icode
, 0, test
))
3859 rtx_insn
*last
= get_last_insn ();
3860 rtx op0
= prepare_operand (icode
, x
, 1, mode
, cmp_mode
, unsignedp
);
3861 rtx op1
= prepare_operand (icode
, y
, 2, mode
, cmp_mode
, unsignedp
);
3863 && insn_operand_matches (icode
, 1, op0
)
3864 && insn_operand_matches (icode
, 2, op1
))
3866 XEXP (test
, 0) = op0
;
3867 XEXP (test
, 1) = op1
;
3872 delete_insns_since (last
);
3875 if (methods
== OPTAB_DIRECT
|| !CLASS_HAS_WIDER_MODES_P (mclass
))
3877 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
);
3879 while (cmp_mode
!= VOIDmode
);
3881 if (methods
!= OPTAB_LIB_WIDEN
)
3884 if (!SCALAR_FLOAT_MODE_P (mode
))
3887 machine_mode ret_mode
;
3889 /* Handle a libcall just for the mode we are using. */
3890 libfunc
= optab_libfunc (cmp_optab
, mode
);
3891 gcc_assert (libfunc
);
3893 /* If we want unsigned, and this mode has a distinct unsigned
3894 comparison routine, use that. */
3897 rtx ulibfunc
= optab_libfunc (ucmp_optab
, mode
);
3902 ret_mode
= targetm
.libgcc_cmp_return_mode ();
3903 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
3904 ret_mode
, 2, x
, mode
, y
, mode
);
3906 /* There are two kinds of comparison routines. Biased routines
3907 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3908 of gcc expect that the comparison operation is equivalent
3909 to the modified comparison. For signed comparisons compare the
3910 result against 1 in the biased case, and zero in the unbiased
3911 case. For unsigned comparisons always compare against 1 after
3912 biasing the unbiased result by adding 1. This gives us a way to
3914 The comparisons in the fixed-point helper library are always
3919 if (!TARGET_LIB_INT_CMP_BIASED
&& !ALL_FIXED_POINT_MODE_P (mode
))
3922 x
= plus_constant (ret_mode
, result
, 1);
3928 prepare_cmp_insn (x
, y
, comparison
, NULL_RTX
, unsignedp
, methods
,
3932 prepare_float_lib_cmp (x
, y
, comparison
, ptest
, pmode
);
3940 /* Before emitting an insn with code ICODE, make sure that X, which is going
3941 to be used for operand OPNUM of the insn, is converted from mode MODE to
3942 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3943 that it is accepted by the operand predicate. Return the new value. */
3946 prepare_operand (enum insn_code icode
, rtx x
, int opnum
, machine_mode mode
,
3947 machine_mode wider_mode
, int unsignedp
)
3949 if (mode
!= wider_mode
)
3950 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3952 if (!insn_operand_matches (icode
, opnum
, x
))
3954 machine_mode op_mode
= insn_data
[(int) icode
].operand
[opnum
].mode
;
3955 if (reload_completed
)
3957 if (GET_MODE (x
) != op_mode
&& GET_MODE (x
) != VOIDmode
)
3959 x
= copy_to_mode_reg (op_mode
, x
);
3965 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3966 we can do the branch. */
3969 emit_cmp_and_jump_insn_1 (rtx test
, machine_mode mode
, rtx label
, int prob
)
3971 machine_mode optab_mode
;
3972 enum mode_class mclass
;
3973 enum insn_code icode
;
3976 mclass
= GET_MODE_CLASS (mode
);
3977 optab_mode
= (mclass
== MODE_CC
) ? CCmode
: mode
;
3978 icode
= optab_handler (cbranch_optab
, optab_mode
);
3980 gcc_assert (icode
!= CODE_FOR_nothing
);
3981 gcc_assert (insn_operand_matches (icode
, 0, test
));
3982 insn
= emit_jump_insn (GEN_FCN (icode
) (test
, XEXP (test
, 0),
3983 XEXP (test
, 1), label
));
3985 && profile_status_for_fn (cfun
) != PROFILE_ABSENT
3988 && any_condjump_p (insn
)
3989 && !find_reg_note (insn
, REG_BR_PROB
, 0))
3990 add_int_reg_note (insn
, REG_BR_PROB
, prob
);
3993 /* Generate code to compare X with Y so that the condition codes are
3994 set and to jump to LABEL if the condition is true. If X is a
3995 constant and Y is not a constant, then the comparison is swapped to
3996 ensure that the comparison RTL has the canonical form.
3998 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3999 need to be widened. UNSIGNEDP is also used to select the proper
4000 branch condition code.
4002 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4004 MODE is the mode of the inputs (in case they are const_int).
4006 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4007 It will be potentially converted into an unsigned variant based on
4008 UNSIGNEDP to select a proper jump instruction.
4010 PROB is the probability of jumping to LABEL. */
4013 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4014 machine_mode mode
, int unsignedp
, rtx label
,
4017 rtx op0
= x
, op1
= y
;
4020 /* Swap operands and condition to ensure canonical RTL. */
4021 if (swap_commutative_operands_p (x
, y
)
4022 && can_compare_p (swap_condition (comparison
), mode
, ccp_jump
))
4025 comparison
= swap_condition (comparison
);
4028 /* If OP0 is still a constant, then both X and Y must be constants
4029 or the opposite comparison is not supported. Force X into a register
4030 to create canonical RTL. */
4031 if (CONSTANT_P (op0
))
4032 op0
= force_reg (mode
, op0
);
4035 comparison
= unsigned_condition (comparison
);
4037 prepare_cmp_insn (op0
, op1
, comparison
, size
, unsignedp
, OPTAB_LIB_WIDEN
,
4039 emit_cmp_and_jump_insn_1 (test
, mode
, label
, prob
);
4043 /* Emit a library call comparison between floating point X and Y.
4044 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4047 prepare_float_lib_cmp (rtx x
, rtx y
, enum rtx_code comparison
,
4048 rtx
*ptest
, machine_mode
*pmode
)
4050 enum rtx_code swapped
= swap_condition (comparison
);
4051 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
4052 machine_mode orig_mode
= GET_MODE (x
);
4053 machine_mode mode
, cmp_mode
;
4054 rtx true_rtx
, false_rtx
;
4055 rtx value
, target
, equiv
;
4058 bool reversed_p
= false;
4059 cmp_mode
= targetm
.libgcc_cmp_return_mode ();
4061 for (mode
= orig_mode
;
4063 mode
= GET_MODE_WIDER_MODE (mode
))
4065 if (code_to_optab (comparison
)
4066 && (libfunc
= optab_libfunc (code_to_optab (comparison
), mode
)))
4069 if (code_to_optab (swapped
)
4070 && (libfunc
= optab_libfunc (code_to_optab (swapped
), mode
)))
4073 comparison
= swapped
;
4077 if (code_to_optab (reversed
)
4078 && (libfunc
= optab_libfunc (code_to_optab (reversed
), mode
)))
4080 comparison
= reversed
;
4086 gcc_assert (mode
!= VOIDmode
);
4088 if (mode
!= orig_mode
)
4090 x
= convert_to_mode (mode
, x
, 0);
4091 y
= convert_to_mode (mode
, y
, 0);
4094 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4095 the RTL. The allows the RTL optimizers to delete the libcall if the
4096 condition can be determined at compile-time. */
4097 if (comparison
== UNORDERED
4098 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4100 true_rtx
= const_true_rtx
;
4101 false_rtx
= const0_rtx
;
4108 true_rtx
= const0_rtx
;
4109 false_rtx
= const_true_rtx
;
4113 true_rtx
= const_true_rtx
;
4114 false_rtx
= const0_rtx
;
4118 true_rtx
= const1_rtx
;
4119 false_rtx
= const0_rtx
;
4123 true_rtx
= const0_rtx
;
4124 false_rtx
= constm1_rtx
;
4128 true_rtx
= constm1_rtx
;
4129 false_rtx
= const0_rtx
;
4133 true_rtx
= const0_rtx
;
4134 false_rtx
= const1_rtx
;
4142 if (comparison
== UNORDERED
)
4144 rtx temp
= simplify_gen_relational (NE
, cmp_mode
, mode
, x
, x
);
4145 equiv
= simplify_gen_relational (NE
, cmp_mode
, mode
, y
, y
);
4146 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4147 temp
, const_true_rtx
, equiv
);
4151 equiv
= simplify_gen_relational (comparison
, cmp_mode
, mode
, x
, y
);
4152 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4153 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4154 equiv
, true_rtx
, false_rtx
);
4158 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4159 cmp_mode
, 2, x
, mode
, y
, mode
);
4160 insns
= get_insns ();
4163 target
= gen_reg_rtx (cmp_mode
);
4164 emit_libcall_block (insns
, target
, value
, equiv
);
4166 if (comparison
== UNORDERED
4167 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
)
4169 *ptest
= gen_rtx_fmt_ee (reversed_p
? EQ
: NE
, VOIDmode
, target
, false_rtx
);
4171 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, target
, const0_rtx
);
4176 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4179 emit_indirect_jump (rtx loc
)
4181 if (!targetm
.have_indirect_jump ())
4182 sorry ("indirect jumps are not available on this target");
4185 struct expand_operand ops
[1];
4186 create_address_operand (&ops
[0], loc
);
4187 expand_jump_insn (targetm
.code_for_indirect_jump
, 1, ops
);
4193 /* Emit a conditional move instruction if the machine supports one for that
4194 condition and machine mode.
4196 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4197 the mode to use should they be constants. If it is VOIDmode, they cannot
4200 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4201 should be stored there. MODE is the mode to use should they be constants.
4202 If it is VOIDmode, they cannot both be constants.
4204 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4205 is not supported. */
4208 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4209 machine_mode cmode
, rtx op2
, rtx op3
,
4210 machine_mode mode
, int unsignedp
)
4214 enum insn_code icode
;
4215 enum rtx_code reversed
;
4217 /* If the two source operands are identical, that's just a move. */
4219 if (rtx_equal_p (op2
, op3
))
4222 target
= gen_reg_rtx (mode
);
4224 emit_move_insn (target
, op3
);
4228 /* If one operand is constant, make it the second one. Only do this
4229 if the other operand is not constant as well. */
4231 if (swap_commutative_operands_p (op0
, op1
))
4233 std::swap (op0
, op1
);
4234 code
= swap_condition (code
);
4237 /* get_condition will prefer to generate LT and GT even if the old
4238 comparison was against zero, so undo that canonicalization here since
4239 comparisons against zero are cheaper. */
4240 if (code
== LT
&& op1
== const1_rtx
)
4241 code
= LE
, op1
= const0_rtx
;
4242 else if (code
== GT
&& op1
== constm1_rtx
)
4243 code
= GE
, op1
= const0_rtx
;
4245 if (cmode
== VOIDmode
)
4246 cmode
= GET_MODE (op0
);
4248 if (swap_commutative_operands_p (op2
, op3
)
4249 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4252 std::swap (op2
, op3
);
4256 if (mode
== VOIDmode
)
4257 mode
= GET_MODE (op2
);
4259 icode
= direct_optab_handler (movcc_optab
, mode
);
4261 if (icode
== CODE_FOR_nothing
)
4265 target
= gen_reg_rtx (mode
);
4267 code
= unsignedp
? unsigned_condition (code
) : code
;
4268 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4270 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4271 return NULL and let the caller figure out how best to deal with this
4273 if (!COMPARISON_P (comparison
))
4276 saved_pending_stack_adjust save
;
4277 save_pending_stack_adjust (&save
);
4278 last
= get_last_insn ();
4279 do_pending_stack_adjust ();
4280 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4281 GET_CODE (comparison
), NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4282 &comparison
, &cmode
);
4285 struct expand_operand ops
[4];
4287 create_output_operand (&ops
[0], target
, mode
);
4288 create_fixed_operand (&ops
[1], comparison
);
4289 create_input_operand (&ops
[2], op2
, mode
);
4290 create_input_operand (&ops
[3], op3
, mode
);
4291 if (maybe_expand_insn (icode
, 4, ops
))
4293 if (ops
[0].value
!= target
)
4294 convert_move (target
, ops
[0].value
, false);
4298 delete_insns_since (last
);
4299 restore_pending_stack_adjust (&save
);
4304 /* Emit a conditional negate or bitwise complement using the
4305 negcc or notcc optabs if available. Return NULL_RTX if such operations
4306 are not available. Otherwise return the RTX holding the result.
4307 TARGET is the desired destination of the result. COMP is the comparison
4308 on which to negate. If COND is true move into TARGET the negation
4309 or bitwise complement of OP1. Otherwise move OP2 into TARGET.
4310 CODE is either NEG or NOT. MODE is the machine mode in which the
4311 operation is performed. */
4314 emit_conditional_neg_or_complement (rtx target
, rtx_code code
,
4315 machine_mode mode
, rtx cond
, rtx op1
,
4318 optab op
= unknown_optab
;
4321 else if (code
== NOT
)
4326 insn_code icode
= direct_optab_handler (op
, mode
);
4328 if (icode
== CODE_FOR_nothing
)
4332 target
= gen_reg_rtx (mode
);
4334 rtx_insn
*last
= get_last_insn ();
4335 struct expand_operand ops
[4];
4337 create_output_operand (&ops
[0], target
, mode
);
4338 create_fixed_operand (&ops
[1], cond
);
4339 create_input_operand (&ops
[2], op1
, mode
);
4340 create_input_operand (&ops
[3], op2
, mode
);
4342 if (maybe_expand_insn (icode
, 4, ops
))
4344 if (ops
[0].value
!= target
)
4345 convert_move (target
, ops
[0].value
, false);
4349 delete_insns_since (last
);
4353 /* Emit a conditional addition instruction if the machine supports one for that
4354 condition and machine mode.
4356 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4357 the mode to use should they be constants. If it is VOIDmode, they cannot
4360 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
4361 should be stored there. MODE is the mode to use should they be constants.
4362 If it is VOIDmode, they cannot both be constants.
4364 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4365 is not supported. */
4368 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4369 machine_mode cmode
, rtx op2
, rtx op3
,
4370 machine_mode mode
, int unsignedp
)
4374 enum insn_code icode
;
4376 /* If one operand is constant, make it the second one. Only do this
4377 if the other operand is not constant as well. */
4379 if (swap_commutative_operands_p (op0
, op1
))
4381 std::swap (op0
, op1
);
4382 code
= swap_condition (code
);
4385 /* get_condition will prefer to generate LT and GT even if the old
4386 comparison was against zero, so undo that canonicalization here since
4387 comparisons against zero are cheaper. */
4388 if (code
== LT
&& op1
== const1_rtx
)
4389 code
= LE
, op1
= const0_rtx
;
4390 else if (code
== GT
&& op1
== constm1_rtx
)
4391 code
= GE
, op1
= const0_rtx
;
4393 if (cmode
== VOIDmode
)
4394 cmode
= GET_MODE (op0
);
4396 if (mode
== VOIDmode
)
4397 mode
= GET_MODE (op2
);
4399 icode
= optab_handler (addcc_optab
, mode
);
4401 if (icode
== CODE_FOR_nothing
)
4405 target
= gen_reg_rtx (mode
);
4407 code
= unsignedp
? unsigned_condition (code
) : code
;
4408 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4410 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4411 return NULL and let the caller figure out how best to deal with this
4413 if (!COMPARISON_P (comparison
))
4416 do_pending_stack_adjust ();
4417 last
= get_last_insn ();
4418 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4419 GET_CODE (comparison
), NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4420 &comparison
, &cmode
);
4423 struct expand_operand ops
[4];
4425 create_output_operand (&ops
[0], target
, mode
);
4426 create_fixed_operand (&ops
[1], comparison
);
4427 create_input_operand (&ops
[2], op2
, mode
);
4428 create_input_operand (&ops
[3], op3
, mode
);
4429 if (maybe_expand_insn (icode
, 4, ops
))
4431 if (ops
[0].value
!= target
)
4432 convert_move (target
, ops
[0].value
, false);
4436 delete_insns_since (last
);
4440 /* These functions attempt to generate an insn body, rather than
4441 emitting the insn, but if the gen function already emits them, we
4442 make no attempt to turn them back into naked patterns. */
4444 /* Generate and return an insn body to add Y to X. */
4447 gen_add2_insn (rtx x
, rtx y
)
4449 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (x
));
4451 gcc_assert (insn_operand_matches (icode
, 0, x
));
4452 gcc_assert (insn_operand_matches (icode
, 1, x
));
4453 gcc_assert (insn_operand_matches (icode
, 2, y
));
4455 return GEN_FCN (icode
) (x
, x
, y
);
4458 /* Generate and return an insn body to add r1 and c,
4459 storing the result in r0. */
4462 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4464 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (r0
));
4466 if (icode
== CODE_FOR_nothing
4467 || !insn_operand_matches (icode
, 0, r0
)
4468 || !insn_operand_matches (icode
, 1, r1
)
4469 || !insn_operand_matches (icode
, 2, c
))
4472 return GEN_FCN (icode
) (r0
, r1
, c
);
4476 have_add2_insn (rtx x
, rtx y
)
4478 enum insn_code icode
;
4480 gcc_assert (GET_MODE (x
) != VOIDmode
);
4482 icode
= optab_handler (add_optab
, GET_MODE (x
));
4484 if (icode
== CODE_FOR_nothing
)
4487 if (!insn_operand_matches (icode
, 0, x
)
4488 || !insn_operand_matches (icode
, 1, x
)
4489 || !insn_operand_matches (icode
, 2, y
))
4495 /* Generate and return an insn body to add Y to X. */
4498 gen_addptr3_insn (rtx x
, rtx y
, rtx z
)
4500 enum insn_code icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
4502 gcc_assert (insn_operand_matches (icode
, 0, x
));
4503 gcc_assert (insn_operand_matches (icode
, 1, y
));
4504 gcc_assert (insn_operand_matches (icode
, 2, z
));
4506 return GEN_FCN (icode
) (x
, y
, z
);
4509 /* Return true if the target implements an addptr pattern and X, Y,
4510 and Z are valid for the pattern predicates. */
4513 have_addptr3_insn (rtx x
, rtx y
, rtx z
)
4515 enum insn_code icode
;
4517 gcc_assert (GET_MODE (x
) != VOIDmode
);
4519 icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
4521 if (icode
== CODE_FOR_nothing
)
4524 if (!insn_operand_matches (icode
, 0, x
)
4525 || !insn_operand_matches (icode
, 1, y
)
4526 || !insn_operand_matches (icode
, 2, z
))
4532 /* Generate and return an insn body to subtract Y from X. */
4535 gen_sub2_insn (rtx x
, rtx y
)
4537 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (x
));
4539 gcc_assert (insn_operand_matches (icode
, 0, x
));
4540 gcc_assert (insn_operand_matches (icode
, 1, x
));
4541 gcc_assert (insn_operand_matches (icode
, 2, y
));
4543 return GEN_FCN (icode
) (x
, x
, y
);
4546 /* Generate and return an insn body to subtract r1 and c,
4547 storing the result in r0. */
4550 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4552 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (r0
));
4554 if (icode
== CODE_FOR_nothing
4555 || !insn_operand_matches (icode
, 0, r0
)
4556 || !insn_operand_matches (icode
, 1, r1
)
4557 || !insn_operand_matches (icode
, 2, c
))
4560 return GEN_FCN (icode
) (r0
, r1
, c
);
4564 have_sub2_insn (rtx x
, rtx y
)
4566 enum insn_code icode
;
4568 gcc_assert (GET_MODE (x
) != VOIDmode
);
4570 icode
= optab_handler (sub_optab
, GET_MODE (x
));
4572 if (icode
== CODE_FOR_nothing
)
4575 if (!insn_operand_matches (icode
, 0, x
)
4576 || !insn_operand_matches (icode
, 1, x
)
4577 || !insn_operand_matches (icode
, 2, y
))
4583 /* Generate the body of an insn to extend Y (with mode MFROM)
4584 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4587 gen_extend_insn (rtx x
, rtx y
, machine_mode mto
,
4588 machine_mode mfrom
, int unsignedp
)
4590 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4591 return GEN_FCN (icode
) (x
, y
);
4594 /* Generate code to convert FROM to floating point
4595 and store in TO. FROM must be fixed point and not VOIDmode.
4596 UNSIGNEDP nonzero means regard FROM as unsigned.
4597 Normally this is done by correcting the final value
4598 if it is negative. */
4601 expand_float (rtx to
, rtx from
, int unsignedp
)
4603 enum insn_code icode
;
4605 machine_mode fmode
, imode
;
4606 bool can_do_signed
= false;
4608 /* Crash now, because we won't be able to decide which mode to use. */
4609 gcc_assert (GET_MODE (from
) != VOIDmode
);
4611 /* Look for an insn to do the conversion. Do it in the specified
4612 modes if possible; otherwise convert either input, output or both to
4613 wider mode. If the integer mode is wider than the mode of FROM,
4614 we can do the conversion signed even if the input is unsigned. */
4616 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4617 fmode
= GET_MODE_WIDER_MODE (fmode
))
4618 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4619 imode
= GET_MODE_WIDER_MODE (imode
))
4621 int doing_unsigned
= unsignedp
;
4623 if (fmode
!= GET_MODE (to
)
4624 && significand_size (fmode
) < GET_MODE_PRECISION (GET_MODE (from
)))
4627 icode
= can_float_p (fmode
, imode
, unsignedp
);
4628 if (icode
== CODE_FOR_nothing
&& unsignedp
)
4630 enum insn_code scode
= can_float_p (fmode
, imode
, 0);
4631 if (scode
!= CODE_FOR_nothing
)
4632 can_do_signed
= true;
4633 if (imode
!= GET_MODE (from
))
4634 icode
= scode
, doing_unsigned
= 0;
4637 if (icode
!= CODE_FOR_nothing
)
4639 if (imode
!= GET_MODE (from
))
4640 from
= convert_to_mode (imode
, from
, unsignedp
);
4642 if (fmode
!= GET_MODE (to
))
4643 target
= gen_reg_rtx (fmode
);
4645 emit_unop_insn (icode
, target
, from
,
4646 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4649 convert_move (to
, target
, 0);
4654 /* Unsigned integer, and no way to convert directly. Convert as signed,
4655 then unconditionally adjust the result. */
4656 if (unsignedp
&& can_do_signed
)
4658 rtx_code_label
*label
= gen_label_rtx ();
4660 REAL_VALUE_TYPE offset
;
4662 /* Look for a usable floating mode FMODE wider than the source and at
4663 least as wide as the target. Using FMODE will avoid rounding woes
4664 with unsigned values greater than the signed maximum value. */
4666 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4667 fmode
= GET_MODE_WIDER_MODE (fmode
))
4668 if (GET_MODE_PRECISION (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4669 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4672 if (fmode
== VOIDmode
)
4674 /* There is no such mode. Pretend the target is wide enough. */
4675 fmode
= GET_MODE (to
);
4677 /* Avoid double-rounding when TO is narrower than FROM. */
4678 if ((significand_size (fmode
) + 1)
4679 < GET_MODE_PRECISION (GET_MODE (from
)))
4682 rtx_code_label
*neglabel
= gen_label_rtx ();
4684 /* Don't use TARGET if it isn't a register, is a hard register,
4685 or is the wrong mode. */
4687 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4688 || GET_MODE (target
) != fmode
)
4689 target
= gen_reg_rtx (fmode
);
4691 imode
= GET_MODE (from
);
4692 do_pending_stack_adjust ();
4694 /* Test whether the sign bit is set. */
4695 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4698 /* The sign bit is not set. Convert as signed. */
4699 expand_float (target
, from
, 0);
4700 emit_jump_insn (targetm
.gen_jump (label
));
4703 /* The sign bit is set.
4704 Convert to a usable (positive signed) value by shifting right
4705 one bit, while remembering if a nonzero bit was shifted
4706 out; i.e., compute (from & 1) | (from >> 1). */
4708 emit_label (neglabel
);
4709 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4710 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4711 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, 1, NULL_RTX
, 1);
4712 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4714 expand_float (target
, temp
, 0);
4716 /* Multiply by 2 to undo the shift above. */
4717 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4718 target
, 0, OPTAB_LIB_WIDEN
);
4720 emit_move_insn (target
, temp
);
4722 do_pending_stack_adjust ();
4728 /* If we are about to do some arithmetic to correct for an
4729 unsigned operand, do it in a pseudo-register. */
4731 if (GET_MODE (to
) != fmode
4732 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4733 target
= gen_reg_rtx (fmode
);
4735 /* Convert as signed integer to floating. */
4736 expand_float (target
, from
, 0);
4738 /* If FROM is negative (and therefore TO is negative),
4739 correct its value by 2**bitwidth. */
4741 do_pending_stack_adjust ();
4742 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4746 real_2expN (&offset
, GET_MODE_PRECISION (GET_MODE (from
)), fmode
);
4747 temp
= expand_binop (fmode
, add_optab
, target
,
4748 const_double_from_real_value (offset
, fmode
),
4749 target
, 0, OPTAB_LIB_WIDEN
);
4751 emit_move_insn (target
, temp
);
4753 do_pending_stack_adjust ();
4758 /* No hardware instruction available; call a library routine. */
4763 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4765 if (GET_MODE_PRECISION (GET_MODE (from
)) < GET_MODE_PRECISION (SImode
))
4766 from
= convert_to_mode (SImode
, from
, unsignedp
);
4768 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
4769 gcc_assert (libfunc
);
4773 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4774 GET_MODE (to
), 1, from
,
4776 insns
= get_insns ();
4779 emit_libcall_block (insns
, target
, value
,
4780 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FLOAT
: FLOAT
,
4781 GET_MODE (to
), from
));
4786 /* Copy result to requested destination
4787 if we have been computing in a temp location. */
4791 if (GET_MODE (target
) == GET_MODE (to
))
4792 emit_move_insn (to
, target
);
4794 convert_move (to
, target
, 0);
4798 /* Generate code to convert FROM to fixed point and store in TO. FROM
4799 must be floating point. */
4802 expand_fix (rtx to
, rtx from
, int unsignedp
)
4804 enum insn_code icode
;
4806 machine_mode fmode
, imode
;
4807 bool must_trunc
= false;
4809 /* We first try to find a pair of modes, one real and one integer, at
4810 least as wide as FROM and TO, respectively, in which we can open-code
4811 this conversion. If the integer mode is wider than the mode of TO,
4812 we can do the conversion either signed or unsigned. */
4814 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4815 fmode
= GET_MODE_WIDER_MODE (fmode
))
4816 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4817 imode
= GET_MODE_WIDER_MODE (imode
))
4819 int doing_unsigned
= unsignedp
;
4821 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4822 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4823 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4825 if (icode
!= CODE_FOR_nothing
)
4827 rtx_insn
*last
= get_last_insn ();
4828 if (fmode
!= GET_MODE (from
))
4829 from
= convert_to_mode (fmode
, from
, 0);
4833 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4834 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
4838 if (imode
!= GET_MODE (to
))
4839 target
= gen_reg_rtx (imode
);
4841 if (maybe_emit_unop_insn (icode
, target
, from
,
4842 doing_unsigned
? UNSIGNED_FIX
: FIX
))
4845 convert_move (to
, target
, unsignedp
);
4848 delete_insns_since (last
);
4852 /* For an unsigned conversion, there is one more way to do it.
4853 If we have a signed conversion, we generate code that compares
4854 the real value to the largest representable positive number. If if
4855 is smaller, the conversion is done normally. Otherwise, subtract
4856 one plus the highest signed number, convert, and add it back.
4858 We only need to check all real modes, since we know we didn't find
4859 anything with a wider integer mode.
4861 This code used to extend FP value into mode wider than the destination.
4862 This is needed for decimal float modes which cannot accurately
4863 represent one plus the highest signed number of the same size, but
4864 not for binary modes. Consider, for instance conversion from SFmode
4867 The hot path through the code is dealing with inputs smaller than 2^63
4868 and doing just the conversion, so there is no bits to lose.
4870 In the other path we know the value is positive in the range 2^63..2^64-1
4871 inclusive. (as for other input overflow happens and result is undefined)
4872 So we know that the most important bit set in mantissa corresponds to
4873 2^63. The subtraction of 2^63 should not generate any rounding as it
4874 simply clears out that bit. The rest is trivial. */
4876 if (unsignedp
&& GET_MODE_PRECISION (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4877 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4878 fmode
= GET_MODE_WIDER_MODE (fmode
))
4879 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0, &must_trunc
)
4880 && (!DECIMAL_FLOAT_MODE_P (fmode
)
4881 || GET_MODE_BITSIZE (fmode
) > GET_MODE_PRECISION (GET_MODE (to
))))
4884 REAL_VALUE_TYPE offset
;
4886 rtx_code_label
*lab1
, *lab2
;
4889 bitsize
= GET_MODE_PRECISION (GET_MODE (to
));
4890 real_2expN (&offset
, bitsize
- 1, fmode
);
4891 limit
= const_double_from_real_value (offset
, fmode
);
4892 lab1
= gen_label_rtx ();
4893 lab2
= gen_label_rtx ();
4895 if (fmode
!= GET_MODE (from
))
4896 from
= convert_to_mode (fmode
, from
, 0);
4898 /* See if we need to do the subtraction. */
4899 do_pending_stack_adjust ();
4900 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4903 /* If not, do the signed "fix" and branch around fixup code. */
4904 expand_fix (to
, from
, 0);
4905 emit_jump_insn (targetm
.gen_jump (lab2
));
4908 /* Otherwise, subtract 2**(N-1), convert to signed number,
4909 then add 2**(N-1). Do the addition using XOR since this
4910 will often generate better code. */
4912 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4913 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4914 expand_fix (to
, target
, 0);
4915 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4917 (HOST_WIDE_INT_1
<< (bitsize
- 1),
4919 to
, 1, OPTAB_LIB_WIDEN
);
4922 emit_move_insn (to
, target
);
4926 if (optab_handler (mov_optab
, GET_MODE (to
)) != CODE_FOR_nothing
)
4928 /* Make a place for a REG_NOTE and add it. */
4929 insn
= emit_move_insn (to
, to
);
4930 set_dst_reg_note (insn
, REG_EQUAL
,
4931 gen_rtx_fmt_e (UNSIGNED_FIX
, GET_MODE (to
),
4939 /* We can't do it with an insn, so use a library call. But first ensure
4940 that the mode of TO is at least as wide as SImode, since those are the
4941 only library calls we know about. */
4943 if (GET_MODE_PRECISION (GET_MODE (to
)) < GET_MODE_PRECISION (SImode
))
4945 target
= gen_reg_rtx (SImode
);
4947 expand_fix (target
, from
, unsignedp
);
4955 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
4956 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
4957 gcc_assert (libfunc
);
4961 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4962 GET_MODE (to
), 1, from
,
4964 insns
= get_insns ();
4967 emit_libcall_block (insns
, target
, value
,
4968 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4969 GET_MODE (to
), from
));
4974 if (GET_MODE (to
) == GET_MODE (target
))
4975 emit_move_insn (to
, target
);
4977 convert_move (to
, target
, 0);
4982 /* Promote integer arguments for a libcall if necessary.
4983 emit_library_call_value cannot do the promotion because it does not
4984 know if it should do a signed or unsigned promotion. This is because
4985 there are no tree types defined for libcalls. */
4988 prepare_libcall_arg (rtx arg
, int uintp
)
4990 machine_mode mode
= GET_MODE (arg
);
4991 machine_mode arg_mode
;
4992 if (SCALAR_INT_MODE_P (mode
))
4994 /* If we need to promote the integer function argument we need to do
4995 it here instead of inside emit_library_call_value because in
4996 emit_library_call_value we don't know if we should do a signed or
4997 unsigned promotion. */
5000 arg_mode
= promote_function_mode (NULL_TREE
, mode
,
5001 &unsigned_p
, NULL_TREE
, 0);
5002 if (arg_mode
!= mode
)
5003 return convert_to_mode (arg_mode
, arg
, uintp
);
5008 /* Generate code to convert FROM or TO a fixed-point.
5009 If UINTP is true, either TO or FROM is an unsigned integer.
5010 If SATP is true, we need to saturate the result. */
5013 expand_fixed_convert (rtx to
, rtx from
, int uintp
, int satp
)
5015 machine_mode to_mode
= GET_MODE (to
);
5016 machine_mode from_mode
= GET_MODE (from
);
5018 enum rtx_code this_code
;
5019 enum insn_code code
;
5024 if (to_mode
== from_mode
)
5026 emit_move_insn (to
, from
);
5032 tab
= satp
? satfractuns_optab
: fractuns_optab
;
5033 this_code
= satp
? UNSIGNED_SAT_FRACT
: UNSIGNED_FRACT_CONVERT
;
5037 tab
= satp
? satfract_optab
: fract_optab
;
5038 this_code
= satp
? SAT_FRACT
: FRACT_CONVERT
;
5040 code
= convert_optab_handler (tab
, to_mode
, from_mode
);
5041 if (code
!= CODE_FOR_nothing
)
5043 emit_unop_insn (code
, to
, from
, this_code
);
5047 libfunc
= convert_optab_libfunc (tab
, to_mode
, from_mode
);
5048 gcc_assert (libfunc
);
5050 from
= prepare_libcall_arg (from
, uintp
);
5051 from_mode
= GET_MODE (from
);
5054 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, to_mode
,
5055 1, from
, from_mode
);
5056 insns
= get_insns ();
5059 emit_libcall_block (insns
, to
, value
,
5060 gen_rtx_fmt_e (optab_to_code (tab
), to_mode
, from
));
5063 /* Generate code to convert FROM to fixed point and store in TO. FROM
5064 must be floating point, TO must be signed. Use the conversion optab
5065 TAB to do the conversion. */
5068 expand_sfix_optab (rtx to
, rtx from
, convert_optab tab
)
5070 enum insn_code icode
;
5072 machine_mode fmode
, imode
;
5074 /* We first try to find a pair of modes, one real and one integer, at
5075 least as wide as FROM and TO, respectively, in which we can open-code
5076 this conversion. If the integer mode is wider than the mode of TO,
5077 we can do the conversion either signed or unsigned. */
5079 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5080 fmode
= GET_MODE_WIDER_MODE (fmode
))
5081 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
5082 imode
= GET_MODE_WIDER_MODE (imode
))
5084 icode
= convert_optab_handler (tab
, imode
, fmode
);
5085 if (icode
!= CODE_FOR_nothing
)
5087 rtx_insn
*last
= get_last_insn ();
5088 if (fmode
!= GET_MODE (from
))
5089 from
= convert_to_mode (fmode
, from
, 0);
5091 if (imode
!= GET_MODE (to
))
5092 target
= gen_reg_rtx (imode
);
5094 if (!maybe_emit_unop_insn (icode
, target
, from
, UNKNOWN
))
5096 delete_insns_since (last
);
5100 convert_move (to
, target
, 0);
5108 /* Report whether we have an instruction to perform the operation
5109 specified by CODE on operands of mode MODE. */
5111 have_insn_for (enum rtx_code code
, machine_mode mode
)
5113 return (code_to_optab (code
)
5114 && (optab_handler (code_to_optab (code
), mode
)
5115 != CODE_FOR_nothing
));
5118 /* Print information about the current contents of the optabs on
5122 debug_optab_libfuncs (void)
5126 /* Dump the arithmetic optabs. */
5127 for (i
= FIRST_NORM_OPTAB
; i
<= LAST_NORMLIB_OPTAB
; ++i
)
5128 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5130 rtx l
= optab_libfunc ((optab
) i
, (machine_mode
) j
);
5133 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
5134 fprintf (stderr
, "%s\t%s:\t%s\n",
5135 GET_RTX_NAME (optab_to_code ((optab
) i
)),
5141 /* Dump the conversion optabs. */
5142 for (i
= FIRST_CONV_OPTAB
; i
<= LAST_CONVLIB_OPTAB
; ++i
)
5143 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5144 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
5146 rtx l
= convert_optab_libfunc ((optab
) i
, (machine_mode
) j
,
5150 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
5151 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
5152 GET_RTX_NAME (optab_to_code ((optab
) i
)),
5160 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5161 CODE. Return 0 on failure. */
5164 gen_cond_trap (enum rtx_code code
, rtx op1
, rtx op2
, rtx tcode
)
5166 machine_mode mode
= GET_MODE (op1
);
5167 enum insn_code icode
;
5171 if (mode
== VOIDmode
)
5174 icode
= optab_handler (ctrap_optab
, mode
);
5175 if (icode
== CODE_FOR_nothing
)
5178 /* Some targets only accept a zero trap code. */
5179 if (!insn_operand_matches (icode
, 3, tcode
))
5182 do_pending_stack_adjust ();
5184 prepare_cmp_insn (op1
, op2
, code
, NULL_RTX
, false, OPTAB_DIRECT
,
5189 insn
= GEN_FCN (icode
) (trap_rtx
, XEXP (trap_rtx
, 0), XEXP (trap_rtx
, 1),
5192 /* If that failed, then give up. */
5200 insn
= get_insns ();
5205 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5206 or unsigned operation code. */
5209 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5221 code
= unsignedp
? LTU
: LT
;
5224 code
= unsignedp
? LEU
: LE
;
5227 code
= unsignedp
? GTU
: GT
;
5230 code
= unsignedp
? GEU
: GE
;
5233 case UNORDERED_EXPR
:
5272 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5273 unsigned operators. OPNO holds an index of the first comparison
5274 operand in insn with code ICODE. Do not generate compare instruction. */
5277 vector_compare_rtx (enum tree_code tcode
, tree t_op0
, tree t_op1
,
5278 bool unsignedp
, enum insn_code icode
,
5281 struct expand_operand ops
[2];
5282 rtx rtx_op0
, rtx_op1
;
5283 machine_mode m0
, m1
;
5284 enum rtx_code rcode
= get_rtx_code (tcode
, unsignedp
);
5286 gcc_assert (TREE_CODE_CLASS (tcode
) == tcc_comparison
);
5288 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
5289 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
5290 cases, use the original mode. */
5291 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)),
5293 m0
= GET_MODE (rtx_op0
);
5295 m0
= TYPE_MODE (TREE_TYPE (t_op0
));
5297 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)),
5299 m1
= GET_MODE (rtx_op1
);
5301 m1
= TYPE_MODE (TREE_TYPE (t_op1
));
5303 create_input_operand (&ops
[0], rtx_op0
, m0
);
5304 create_input_operand (&ops
[1], rtx_op1
, m1
);
5305 if (!maybe_legitimize_operands (icode
, opno
, 2, ops
))
5307 return gen_rtx_fmt_ee (rcode
, VOIDmode
, ops
[0].value
, ops
[1].value
);
5310 /* Checks if vec_perm mask SEL is a constant equivalent to a shift of the first
5311 vec_perm operand, assuming the second operand is a constant vector of zeroes.
5312 Return the shift distance in bits if so, or NULL_RTX if the vec_perm is not a
5315 shift_amt_for_vec_perm_mask (rtx sel
)
5317 unsigned int i
, first
, nelt
= GET_MODE_NUNITS (GET_MODE (sel
));
5318 unsigned int bitsize
= GET_MODE_UNIT_BITSIZE (GET_MODE (sel
));
5320 if (GET_CODE (sel
) != CONST_VECTOR
)
5323 first
= INTVAL (CONST_VECTOR_ELT (sel
, 0));
5326 for (i
= 1; i
< nelt
; i
++)
5328 int idx
= INTVAL (CONST_VECTOR_ELT (sel
, i
));
5329 unsigned int expected
= i
+ first
;
5330 /* Indices into the second vector are all equivalent. */
5331 if (idx
< 0 || (MIN (nelt
, (unsigned) idx
) != MIN (nelt
, expected
)))
5335 return GEN_INT (first
* bitsize
);
5338 /* A subroutine of expand_vec_perm for expanding one vec_perm insn. */
5341 expand_vec_perm_1 (enum insn_code icode
, rtx target
,
5342 rtx v0
, rtx v1
, rtx sel
)
5344 machine_mode tmode
= GET_MODE (target
);
5345 machine_mode smode
= GET_MODE (sel
);
5346 struct expand_operand ops
[4];
5348 create_output_operand (&ops
[0], target
, tmode
);
5349 create_input_operand (&ops
[3], sel
, smode
);
5351 /* Make an effort to preserve v0 == v1. The target expander is able to
5352 rely on this to determine if we're permuting a single input operand. */
5353 if (rtx_equal_p (v0
, v1
))
5355 if (!insn_operand_matches (icode
, 1, v0
))
5356 v0
= force_reg (tmode
, v0
);
5357 gcc_checking_assert (insn_operand_matches (icode
, 1, v0
));
5358 gcc_checking_assert (insn_operand_matches (icode
, 2, v0
));
5360 create_fixed_operand (&ops
[1], v0
);
5361 create_fixed_operand (&ops
[2], v0
);
5365 create_input_operand (&ops
[1], v0
, tmode
);
5366 create_input_operand (&ops
[2], v1
, tmode
);
5369 if (maybe_expand_insn (icode
, 4, ops
))
5370 return ops
[0].value
;
5374 /* Generate instructions for vec_perm optab given its mode
5375 and three operands. */
5378 expand_vec_perm (machine_mode mode
, rtx v0
, rtx v1
, rtx sel
, rtx target
)
5380 enum insn_code icode
;
5381 machine_mode qimode
;
5382 unsigned int i
, w
, e
, u
;
5383 rtx tmp
, sel_qi
= NULL
;
5386 if (!target
|| GET_MODE (target
) != mode
)
5387 target
= gen_reg_rtx (mode
);
5389 w
= GET_MODE_SIZE (mode
);
5390 e
= GET_MODE_NUNITS (mode
);
5391 u
= GET_MODE_UNIT_SIZE (mode
);
5393 /* Set QIMODE to a different vector mode with byte elements.
5394 If no such mode, or if MODE already has byte elements, use VOIDmode. */
5396 if (GET_MODE_INNER (mode
) != QImode
)
5398 qimode
= mode_for_vector (QImode
, w
);
5399 if (!VECTOR_MODE_P (qimode
))
5403 /* If the input is a constant, expand it specially. */
5404 gcc_assert (GET_MODE_CLASS (GET_MODE (sel
)) == MODE_VECTOR_INT
);
5405 if (GET_CODE (sel
) == CONST_VECTOR
)
5407 /* See if this can be handled with a vec_shr. We only do this if the
5408 second vector is all zeroes. */
5409 enum insn_code shift_code
= optab_handler (vec_shr_optab
, mode
);
5410 enum insn_code shift_code_qi
= ((qimode
!= VOIDmode
&& qimode
!= mode
)
5411 ? optab_handler (vec_shr_optab
, qimode
)
5412 : CODE_FOR_nothing
);
5413 rtx shift_amt
= NULL_RTX
;
5414 if (v1
== CONST0_RTX (GET_MODE (v1
))
5415 && (shift_code
!= CODE_FOR_nothing
5416 || shift_code_qi
!= CODE_FOR_nothing
))
5418 shift_amt
= shift_amt_for_vec_perm_mask (sel
);
5421 struct expand_operand ops
[3];
5422 if (shift_code
!= CODE_FOR_nothing
)
5424 create_output_operand (&ops
[0], target
, mode
);
5425 create_input_operand (&ops
[1], v0
, mode
);
5426 create_convert_operand_from_type (&ops
[2], shift_amt
,
5428 if (maybe_expand_insn (shift_code
, 3, ops
))
5429 return ops
[0].value
;
5431 if (shift_code_qi
!= CODE_FOR_nothing
)
5433 tmp
= gen_reg_rtx (qimode
);
5434 create_output_operand (&ops
[0], tmp
, qimode
);
5435 create_input_operand (&ops
[1], gen_lowpart (qimode
, v0
),
5437 create_convert_operand_from_type (&ops
[2], shift_amt
,
5439 if (maybe_expand_insn (shift_code_qi
, 3, ops
))
5440 return gen_lowpart (mode
, ops
[0].value
);
5445 icode
= direct_optab_handler (vec_perm_const_optab
, mode
);
5446 if (icode
!= CODE_FOR_nothing
)
5448 tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel
);
5453 /* Fall back to a constant byte-based permutation. */
5454 if (qimode
!= VOIDmode
)
5456 vec
= rtvec_alloc (w
);
5457 for (i
= 0; i
< e
; ++i
)
5459 unsigned int j
, this_e
;
5461 this_e
= INTVAL (CONST_VECTOR_ELT (sel
, i
));
5462 this_e
&= 2 * e
- 1;
5465 for (j
= 0; j
< u
; ++j
)
5466 RTVEC_ELT (vec
, i
* u
+ j
) = GEN_INT (this_e
+ j
);
5468 sel_qi
= gen_rtx_CONST_VECTOR (qimode
, vec
);
5470 icode
= direct_optab_handler (vec_perm_const_optab
, qimode
);
5471 if (icode
!= CODE_FOR_nothing
)
5473 tmp
= mode
!= qimode
? gen_reg_rtx (qimode
) : target
;
5474 tmp
= expand_vec_perm_1 (icode
, tmp
, gen_lowpart (qimode
, v0
),
5475 gen_lowpart (qimode
, v1
), sel_qi
);
5477 return gen_lowpart (mode
, tmp
);
5482 /* Otherwise expand as a fully variable permuation. */
5483 icode
= direct_optab_handler (vec_perm_optab
, mode
);
5484 if (icode
!= CODE_FOR_nothing
)
5486 tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel
);
5491 /* As a special case to aid several targets, lower the element-based
5492 permutation to a byte-based permutation and try again. */
5493 if (qimode
== VOIDmode
)
5495 icode
= direct_optab_handler (vec_perm_optab
, qimode
);
5496 if (icode
== CODE_FOR_nothing
)
5501 /* Multiply each element by its byte size. */
5502 machine_mode selmode
= GET_MODE (sel
);
5504 sel
= expand_simple_binop (selmode
, PLUS
, sel
, sel
,
5505 NULL
, 0, OPTAB_DIRECT
);
5507 sel
= expand_simple_binop (selmode
, ASHIFT
, sel
,
5508 GEN_INT (exact_log2 (u
)),
5509 NULL
, 0, OPTAB_DIRECT
);
5510 gcc_assert (sel
!= NULL
);
5512 /* Broadcast the low byte each element into each of its bytes. */
5513 vec
= rtvec_alloc (w
);
5514 for (i
= 0; i
< w
; ++i
)
5516 int this_e
= i
/ u
* u
;
5517 if (BYTES_BIG_ENDIAN
)
5519 RTVEC_ELT (vec
, i
) = GEN_INT (this_e
);
5521 tmp
= gen_rtx_CONST_VECTOR (qimode
, vec
);
5522 sel
= gen_lowpart (qimode
, sel
);
5523 sel
= expand_vec_perm (qimode
, sel
, sel
, tmp
, NULL
);
5524 gcc_assert (sel
!= NULL
);
5526 /* Add the byte offset to each byte element. */
5527 /* Note that the definition of the indicies here is memory ordering,
5528 so there should be no difference between big and little endian. */
5529 vec
= rtvec_alloc (w
);
5530 for (i
= 0; i
< w
; ++i
)
5531 RTVEC_ELT (vec
, i
) = GEN_INT (i
% u
);
5532 tmp
= gen_rtx_CONST_VECTOR (qimode
, vec
);
5533 sel_qi
= expand_simple_binop (qimode
, PLUS
, sel
, tmp
,
5534 sel
, 0, OPTAB_DIRECT
);
5535 gcc_assert (sel_qi
!= NULL
);
5538 tmp
= mode
!= qimode
? gen_reg_rtx (qimode
) : target
;
5539 tmp
= expand_vec_perm_1 (icode
, tmp
, gen_lowpart (qimode
, v0
),
5540 gen_lowpart (qimode
, v1
), sel_qi
);
5542 tmp
= gen_lowpart (mode
, tmp
);
5546 /* Generate insns for a VEC_COND_EXPR with mask, given its TYPE and its
5550 expand_vec_cond_mask_expr (tree vec_cond_type
, tree op0
, tree op1
, tree op2
,
5553 struct expand_operand ops
[4];
5554 machine_mode mode
= TYPE_MODE (vec_cond_type
);
5555 machine_mode mask_mode
= TYPE_MODE (TREE_TYPE (op0
));
5556 enum insn_code icode
= get_vcond_mask_icode (mode
, mask_mode
);
5557 rtx mask
, rtx_op1
, rtx_op2
;
5559 if (icode
== CODE_FOR_nothing
)
5562 mask
= expand_normal (op0
);
5563 rtx_op1
= expand_normal (op1
);
5564 rtx_op2
= expand_normal (op2
);
5566 mask
= force_reg (mask_mode
, mask
);
5567 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
5569 create_output_operand (&ops
[0], target
, mode
);
5570 create_input_operand (&ops
[1], rtx_op1
, mode
);
5571 create_input_operand (&ops
[2], rtx_op2
, mode
);
5572 create_input_operand (&ops
[3], mask
, mask_mode
);
5573 expand_insn (icode
, 4, ops
);
5575 return ops
[0].value
;
5578 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
5582 expand_vec_cond_expr (tree vec_cond_type
, tree op0
, tree op1
, tree op2
,
5585 struct expand_operand ops
[6];
5586 enum insn_code icode
;
5587 rtx comparison
, rtx_op1
, rtx_op2
;
5588 machine_mode mode
= TYPE_MODE (vec_cond_type
);
5589 machine_mode cmp_op_mode
;
5592 enum tree_code tcode
;
5594 if (COMPARISON_CLASS_P (op0
))
5596 op0a
= TREE_OPERAND (op0
, 0);
5597 op0b
= TREE_OPERAND (op0
, 1);
5598 tcode
= TREE_CODE (op0
);
5602 gcc_assert (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (op0
)));
5603 if (get_vcond_mask_icode (mode
, TYPE_MODE (TREE_TYPE (op0
)))
5604 != CODE_FOR_nothing
)
5605 return expand_vec_cond_mask_expr (vec_cond_type
, op0
, op1
,
5610 gcc_assert (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (op0
)))
5611 == MODE_VECTOR_INT
);
5613 op0b
= build_zero_cst (TREE_TYPE (op0
));
5617 cmp_op_mode
= TYPE_MODE (TREE_TYPE (op0a
));
5618 unsignedp
= TYPE_UNSIGNED (TREE_TYPE (op0a
));
5621 gcc_assert (GET_MODE_SIZE (mode
) == GET_MODE_SIZE (cmp_op_mode
)
5622 && GET_MODE_NUNITS (mode
) == GET_MODE_NUNITS (cmp_op_mode
));
5624 icode
= get_vcond_icode (mode
, cmp_op_mode
, unsignedp
);
5625 if (icode
== CODE_FOR_nothing
)
5628 comparison
= vector_compare_rtx (tcode
, op0a
, op0b
, unsignedp
, icode
, 4);
5629 rtx_op1
= expand_normal (op1
);
5630 rtx_op2
= expand_normal (op2
);
5632 create_output_operand (&ops
[0], target
, mode
);
5633 create_input_operand (&ops
[1], rtx_op1
, mode
);
5634 create_input_operand (&ops
[2], rtx_op2
, mode
);
5635 create_fixed_operand (&ops
[3], comparison
);
5636 create_fixed_operand (&ops
[4], XEXP (comparison
, 0));
5637 create_fixed_operand (&ops
[5], XEXP (comparison
, 1));
5638 expand_insn (icode
, 6, ops
);
5639 return ops
[0].value
;
5642 /* Generate insns for a vector comparison into a mask. */
5645 expand_vec_cmp_expr (tree type
, tree exp
, rtx target
)
5647 struct expand_operand ops
[4];
5648 enum insn_code icode
;
5650 machine_mode mask_mode
= TYPE_MODE (type
);
5654 enum tree_code tcode
;
5656 op0a
= TREE_OPERAND (exp
, 0);
5657 op0b
= TREE_OPERAND (exp
, 1);
5658 tcode
= TREE_CODE (exp
);
5660 unsignedp
= TYPE_UNSIGNED (TREE_TYPE (op0a
));
5661 vmode
= TYPE_MODE (TREE_TYPE (op0a
));
5663 icode
= get_vec_cmp_icode (vmode
, mask_mode
, unsignedp
);
5664 if (icode
== CODE_FOR_nothing
)
5667 comparison
= vector_compare_rtx (tcode
, op0a
, op0b
, unsignedp
, icode
, 2);
5668 create_output_operand (&ops
[0], target
, mask_mode
);
5669 create_fixed_operand (&ops
[1], comparison
);
5670 create_fixed_operand (&ops
[2], XEXP (comparison
, 0));
5671 create_fixed_operand (&ops
[3], XEXP (comparison
, 1));
5672 expand_insn (icode
, 4, ops
);
5673 return ops
[0].value
;
5676 /* Expand a highpart multiply. */
5679 expand_mult_highpart (machine_mode mode
, rtx op0
, rtx op1
,
5680 rtx target
, bool uns_p
)
5682 struct expand_operand eops
[3];
5683 enum insn_code icode
;
5684 int method
, i
, nunits
;
5690 method
= can_mult_highpart_p (mode
, uns_p
);
5696 tab1
= uns_p
? umul_highpart_optab
: smul_highpart_optab
;
5697 return expand_binop (mode
, tab1
, op0
, op1
, target
, uns_p
,
5700 tab1
= uns_p
? vec_widen_umult_even_optab
: vec_widen_smult_even_optab
;
5701 tab2
= uns_p
? vec_widen_umult_odd_optab
: vec_widen_smult_odd_optab
;
5704 tab1
= uns_p
? vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
5705 tab2
= uns_p
? vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
5706 if (BYTES_BIG_ENDIAN
)
5707 std::swap (tab1
, tab2
);
5713 icode
= optab_handler (tab1
, mode
);
5714 nunits
= GET_MODE_NUNITS (mode
);
5715 wmode
= insn_data
[icode
].operand
[0].mode
;
5716 gcc_checking_assert (2 * GET_MODE_NUNITS (wmode
) == nunits
);
5717 gcc_checking_assert (GET_MODE_SIZE (wmode
) == GET_MODE_SIZE (mode
));
5719 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
5720 create_input_operand (&eops
[1], op0
, mode
);
5721 create_input_operand (&eops
[2], op1
, mode
);
5722 expand_insn (icode
, 3, eops
);
5723 m1
= gen_lowpart (mode
, eops
[0].value
);
5725 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
5726 create_input_operand (&eops
[1], op0
, mode
);
5727 create_input_operand (&eops
[2], op1
, mode
);
5728 expand_insn (optab_handler (tab2
, mode
), 3, eops
);
5729 m2
= gen_lowpart (mode
, eops
[0].value
);
5731 v
= rtvec_alloc (nunits
);
5734 for (i
= 0; i
< nunits
; ++i
)
5735 RTVEC_ELT (v
, i
) = GEN_INT (!BYTES_BIG_ENDIAN
+ (i
& ~1)
5736 + ((i
& 1) ? nunits
: 0));
5740 for (i
= 0; i
< nunits
; ++i
)
5741 RTVEC_ELT (v
, i
) = GEN_INT (2 * i
+ (BYTES_BIG_ENDIAN
? 0 : 1));
5743 perm
= gen_rtx_CONST_VECTOR (mode
, v
);
5745 return expand_vec_perm (mode
, m1
, m2
, perm
, target
);
5748 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
5752 find_cc_set (rtx x
, const_rtx pat
, void *data
)
5754 if (REG_P (x
) && GET_MODE_CLASS (GET_MODE (x
)) == MODE_CC
5755 && GET_CODE (pat
) == SET
)
5757 rtx
*p_cc_reg
= (rtx
*) data
;
5758 gcc_assert (!*p_cc_reg
);
5763 /* This is a helper function for the other atomic operations. This function
5764 emits a loop that contains SEQ that iterates until a compare-and-swap
5765 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5766 a set of instructions that takes a value from OLD_REG as an input and
5767 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5768 set to the current contents of MEM. After SEQ, a compare-and-swap will
5769 attempt to update MEM with NEW_REG. The function returns true when the
5770 loop was generated successfully. */
5773 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
5775 machine_mode mode
= GET_MODE (mem
);
5776 rtx_code_label
*label
;
5777 rtx cmp_reg
, success
, oldval
;
5779 /* The loop we want to generate looks like
5785 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
5789 Note that we only do the plain load from memory once. Subsequent
5790 iterations use the value loaded by the compare-and-swap pattern. */
5792 label
= gen_label_rtx ();
5793 cmp_reg
= gen_reg_rtx (mode
);
5795 emit_move_insn (cmp_reg
, mem
);
5797 emit_move_insn (old_reg
, cmp_reg
);
5803 if (!expand_atomic_compare_and_swap (&success
, &oldval
, mem
, old_reg
,
5804 new_reg
, false, MEMMODEL_SYNC_SEQ_CST
,
5808 if (oldval
!= cmp_reg
)
5809 emit_move_insn (cmp_reg
, oldval
);
5811 /* Mark this jump predicted not taken. */
5812 emit_cmp_and_jump_insns (success
, const0_rtx
, EQ
, const0_rtx
,
5813 GET_MODE (success
), 1, label
, 0);
5818 /* This function tries to emit an atomic_exchange intruction. VAL is written
5819 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
5820 using TARGET if possible. */
5823 maybe_emit_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
5825 machine_mode mode
= GET_MODE (mem
);
5826 enum insn_code icode
;
5828 /* If the target supports the exchange directly, great. */
5829 icode
= direct_optab_handler (atomic_exchange_optab
, mode
);
5830 if (icode
!= CODE_FOR_nothing
)
5832 struct expand_operand ops
[4];
5834 create_output_operand (&ops
[0], target
, mode
);
5835 create_fixed_operand (&ops
[1], mem
);
5836 create_input_operand (&ops
[2], val
, mode
);
5837 create_integer_operand (&ops
[3], model
);
5838 if (maybe_expand_insn (icode
, 4, ops
))
5839 return ops
[0].value
;
5845 /* This function tries to implement an atomic exchange operation using
5846 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
5847 The previous contents of *MEM are returned, using TARGET if possible.
5848 Since this instructionn is an acquire barrier only, stronger memory
5849 models may require additional barriers to be emitted. */
5852 maybe_emit_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
,
5853 enum memmodel model
)
5855 machine_mode mode
= GET_MODE (mem
);
5856 enum insn_code icode
;
5857 rtx_insn
*last_insn
= get_last_insn ();
5859 icode
= optab_handler (sync_lock_test_and_set_optab
, mode
);
5861 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
5862 exists, and the memory model is stronger than acquire, add a release
5863 barrier before the instruction. */
5865 if (is_mm_seq_cst (model
) || is_mm_release (model
) || is_mm_acq_rel (model
))
5866 expand_mem_thread_fence (model
);
5868 if (icode
!= CODE_FOR_nothing
)
5870 struct expand_operand ops
[3];
5871 create_output_operand (&ops
[0], target
, mode
);
5872 create_fixed_operand (&ops
[1], mem
);
5873 create_input_operand (&ops
[2], val
, mode
);
5874 if (maybe_expand_insn (icode
, 3, ops
))
5875 return ops
[0].value
;
5878 /* If an external test-and-set libcall is provided, use that instead of
5879 any external compare-and-swap that we might get from the compare-and-
5880 swap-loop expansion later. */
5881 if (!can_compare_and_swap_p (mode
, false))
5883 rtx libfunc
= optab_libfunc (sync_lock_test_and_set_optab
, mode
);
5884 if (libfunc
!= NULL
)
5888 addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
5889 return emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
5890 mode
, 2, addr
, ptr_mode
,
5895 /* If the test_and_set can't be emitted, eliminate any barrier that might
5896 have been emitted. */
5897 delete_insns_since (last_insn
);
5901 /* This function tries to implement an atomic exchange operation using a
5902 compare_and_swap loop. VAL is written to *MEM. The previous contents of
5903 *MEM are returned, using TARGET if possible. No memory model is required
5904 since a compare_and_swap loop is seq-cst. */
5907 maybe_emit_compare_and_swap_exchange_loop (rtx target
, rtx mem
, rtx val
)
5909 machine_mode mode
= GET_MODE (mem
);
5911 if (can_compare_and_swap_p (mode
, true))
5913 if (!target
|| !register_operand (target
, mode
))
5914 target
= gen_reg_rtx (mode
);
5915 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
5922 /* This function tries to implement an atomic test-and-set operation
5923 using the atomic_test_and_set instruction pattern. A boolean value
5924 is returned from the operation, using TARGET if possible. */
5927 maybe_emit_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
5929 machine_mode pat_bool_mode
;
5930 struct expand_operand ops
[3];
5932 if (!targetm
.have_atomic_test_and_set ())
5935 /* While we always get QImode from __atomic_test_and_set, we get
5936 other memory modes from __sync_lock_test_and_set. Note that we
5937 use no endian adjustment here. This matches the 4.6 behavior
5938 in the Sparc backend. */
5939 enum insn_code icode
= targetm
.code_for_atomic_test_and_set
;
5940 gcc_checking_assert (insn_data
[icode
].operand
[1].mode
== QImode
);
5941 if (GET_MODE (mem
) != QImode
)
5942 mem
= adjust_address_nv (mem
, QImode
, 0);
5944 pat_bool_mode
= insn_data
[icode
].operand
[0].mode
;
5945 create_output_operand (&ops
[0], target
, pat_bool_mode
);
5946 create_fixed_operand (&ops
[1], mem
);
5947 create_integer_operand (&ops
[2], model
);
5949 if (maybe_expand_insn (icode
, 3, ops
))
5950 return ops
[0].value
;
5954 /* This function expands the legacy _sync_lock test_and_set operation which is
5955 generally an atomic exchange. Some limited targets only allow the
5956 constant 1 to be stored. This is an ACQUIRE operation.
5958 TARGET is an optional place to stick the return value.
5959 MEM is where VAL is stored. */
5962 expand_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
)
5966 /* Try an atomic_exchange first. */
5967 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, MEMMODEL_SYNC_ACQUIRE
);
5971 ret
= maybe_emit_sync_lock_test_and_set (target
, mem
, val
,
5972 MEMMODEL_SYNC_ACQUIRE
);
5976 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
5980 /* If there are no other options, try atomic_test_and_set if the value
5981 being stored is 1. */
5982 if (val
== const1_rtx
)
5983 ret
= maybe_emit_atomic_test_and_set (target
, mem
, MEMMODEL_SYNC_ACQUIRE
);
5988 /* This function expands the atomic test_and_set operation:
5989 atomically store a boolean TRUE into MEM and return the previous value.
5991 MEMMODEL is the memory model variant to use.
5992 TARGET is an optional place to stick the return value. */
5995 expand_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
5997 machine_mode mode
= GET_MODE (mem
);
5998 rtx ret
, trueval
, subtarget
;
6000 ret
= maybe_emit_atomic_test_and_set (target
, mem
, model
);
6004 /* Be binary compatible with non-default settings of trueval, and different
6005 cpu revisions. E.g. one revision may have atomic-test-and-set, but
6006 another only has atomic-exchange. */
6007 if (targetm
.atomic_test_and_set_trueval
== 1)
6009 trueval
= const1_rtx
;
6010 subtarget
= target
? target
: gen_reg_rtx (mode
);
6014 trueval
= gen_int_mode (targetm
.atomic_test_and_set_trueval
, mode
);
6015 subtarget
= gen_reg_rtx (mode
);
6018 /* Try the atomic-exchange optab... */
6019 ret
= maybe_emit_atomic_exchange (subtarget
, mem
, trueval
, model
);
6021 /* ... then an atomic-compare-and-swap loop ... */
6023 ret
= maybe_emit_compare_and_swap_exchange_loop (subtarget
, mem
, trueval
);
6025 /* ... before trying the vaguely defined legacy lock_test_and_set. */
6027 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, trueval
, model
);
6029 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
6030 things with the value 1. Thus we try again without trueval. */
6031 if (!ret
&& targetm
.atomic_test_and_set_trueval
!= 1)
6032 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, const1_rtx
, model
);
6034 /* Failing all else, assume a single threaded environment and simply
6035 perform the operation. */
6038 /* If the result is ignored skip the move to target. */
6039 if (subtarget
!= const0_rtx
)
6040 emit_move_insn (subtarget
, mem
);
6042 emit_move_insn (mem
, trueval
);
6046 /* Recall that have to return a boolean value; rectify if trueval
6047 is not exactly one. */
6048 if (targetm
.atomic_test_and_set_trueval
!= 1)
6049 ret
= emit_store_flag_force (target
, NE
, ret
, const0_rtx
, mode
, 0, 1);
6054 /* This function expands the atomic exchange operation:
6055 atomically store VAL in MEM and return the previous value in MEM.
6057 MEMMODEL is the memory model variant to use.
6058 TARGET is an optional place to stick the return value. */
6061 expand_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
6065 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6067 /* Next try a compare-and-swap loop for the exchange. */
6069 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
6074 /* This function expands the atomic compare exchange operation:
6076 *PTARGET_BOOL is an optional place to store the boolean success/failure.
6077 *PTARGET_OVAL is an optional place to store the old value from memory.
6078 Both target parameters may be NULL or const0_rtx to indicate that we do
6079 not care about that return value. Both target parameters are updated on
6080 success to the actual location of the corresponding result.
6082 MEMMODEL is the memory model variant to use.
6084 The return value of the function is true for success. */
6087 expand_atomic_compare_and_swap (rtx
*ptarget_bool
, rtx
*ptarget_oval
,
6088 rtx mem
, rtx expected
, rtx desired
,
6089 bool is_weak
, enum memmodel succ_model
,
6090 enum memmodel fail_model
)
6092 machine_mode mode
= GET_MODE (mem
);
6093 struct expand_operand ops
[8];
6094 enum insn_code icode
;
6095 rtx target_oval
, target_bool
= NULL_RTX
;
6098 /* Load expected into a register for the compare and swap. */
6099 if (MEM_P (expected
))
6100 expected
= copy_to_reg (expected
);
6102 /* Make sure we always have some place to put the return oldval.
6103 Further, make sure that place is distinct from the input expected,
6104 just in case we need that path down below. */
6105 if (ptarget_oval
&& *ptarget_oval
== const0_rtx
)
6106 ptarget_oval
= NULL
;
6108 if (ptarget_oval
== NULL
6109 || (target_oval
= *ptarget_oval
) == NULL
6110 || reg_overlap_mentioned_p (expected
, target_oval
))
6111 target_oval
= gen_reg_rtx (mode
);
6113 icode
= direct_optab_handler (atomic_compare_and_swap_optab
, mode
);
6114 if (icode
!= CODE_FOR_nothing
)
6116 machine_mode bool_mode
= insn_data
[icode
].operand
[0].mode
;
6118 if (ptarget_bool
&& *ptarget_bool
== const0_rtx
)
6119 ptarget_bool
= NULL
;
6121 /* Make sure we always have a place for the bool operand. */
6122 if (ptarget_bool
== NULL
6123 || (target_bool
= *ptarget_bool
) == NULL
6124 || GET_MODE (target_bool
) != bool_mode
)
6125 target_bool
= gen_reg_rtx (bool_mode
);
6127 /* Emit the compare_and_swap. */
6128 create_output_operand (&ops
[0], target_bool
, bool_mode
);
6129 create_output_operand (&ops
[1], target_oval
, mode
);
6130 create_fixed_operand (&ops
[2], mem
);
6131 create_input_operand (&ops
[3], expected
, mode
);
6132 create_input_operand (&ops
[4], desired
, mode
);
6133 create_integer_operand (&ops
[5], is_weak
);
6134 create_integer_operand (&ops
[6], succ_model
);
6135 create_integer_operand (&ops
[7], fail_model
);
6136 if (maybe_expand_insn (icode
, 8, ops
))
6138 /* Return success/failure. */
6139 target_bool
= ops
[0].value
;
6140 target_oval
= ops
[1].value
;
6145 /* Otherwise fall back to the original __sync_val_compare_and_swap
6146 which is always seq-cst. */
6147 icode
= optab_handler (sync_compare_and_swap_optab
, mode
);
6148 if (icode
!= CODE_FOR_nothing
)
6152 create_output_operand (&ops
[0], target_oval
, mode
);
6153 create_fixed_operand (&ops
[1], mem
);
6154 create_input_operand (&ops
[2], expected
, mode
);
6155 create_input_operand (&ops
[3], desired
, mode
);
6156 if (!maybe_expand_insn (icode
, 4, ops
))
6159 target_oval
= ops
[0].value
;
6161 /* If the caller isn't interested in the boolean return value,
6162 skip the computation of it. */
6163 if (ptarget_bool
== NULL
)
6166 /* Otherwise, work out if the compare-and-swap succeeded. */
6168 if (have_insn_for (COMPARE
, CCmode
))
6169 note_stores (PATTERN (get_last_insn ()), find_cc_set
, &cc_reg
);
6172 target_bool
= emit_store_flag_force (target_bool
, EQ
, cc_reg
,
6173 const0_rtx
, VOIDmode
, 0, 1);
6176 goto success_bool_from_val
;
6179 /* Also check for library support for __sync_val_compare_and_swap. */
6180 libfunc
= optab_libfunc (sync_compare_and_swap_optab
, mode
);
6181 if (libfunc
!= NULL
)
6183 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
6184 rtx target
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
6185 mode
, 3, addr
, ptr_mode
,
6186 expected
, mode
, desired
, mode
);
6187 emit_move_insn (target_oval
, target
);
6189 /* Compute the boolean return value only if requested. */
6191 goto success_bool_from_val
;
6199 success_bool_from_val
:
6200 target_bool
= emit_store_flag_force (target_bool
, EQ
, target_oval
,
6201 expected
, VOIDmode
, 1, 1);
6203 /* Make sure that the oval output winds up where the caller asked. */
6205 *ptarget_oval
= target_oval
;
6207 *ptarget_bool
= target_bool
;
6211 /* Generate asm volatile("" : : : "memory") as the memory barrier. */
6214 expand_asm_memory_barrier (void)
6218 asm_op
= gen_rtx_ASM_OPERANDS (VOIDmode
, empty_string
, empty_string
, 0,
6219 rtvec_alloc (0), rtvec_alloc (0),
6220 rtvec_alloc (0), UNKNOWN_LOCATION
);
6221 MEM_VOLATILE_P (asm_op
) = 1;
6223 clob
= gen_rtx_SCRATCH (VOIDmode
);
6224 clob
= gen_rtx_MEM (BLKmode
, clob
);
6225 clob
= gen_rtx_CLOBBER (VOIDmode
, clob
);
6227 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, asm_op
, clob
)));
6230 /* This routine will either emit the mem_thread_fence pattern or issue a
6231 sync_synchronize to generate a fence for memory model MEMMODEL. */
6234 expand_mem_thread_fence (enum memmodel model
)
6236 if (targetm
.have_mem_thread_fence ())
6237 emit_insn (targetm
.gen_mem_thread_fence (GEN_INT (model
)));
6238 else if (!is_mm_relaxed (model
))
6240 if (targetm
.have_memory_barrier ())
6241 emit_insn (targetm
.gen_memory_barrier ());
6242 else if (synchronize_libfunc
!= NULL_RTX
)
6243 emit_library_call (synchronize_libfunc
, LCT_NORMAL
, VOIDmode
, 0);
6245 expand_asm_memory_barrier ();
6249 /* This routine will either emit the mem_signal_fence pattern or issue a
6250 sync_synchronize to generate a fence for memory model MEMMODEL. */
6253 expand_mem_signal_fence (enum memmodel model
)
6255 if (targetm
.have_mem_signal_fence ())
6256 emit_insn (targetm
.gen_mem_signal_fence (GEN_INT (model
)));
6257 else if (!is_mm_relaxed (model
))
6259 /* By default targets are coherent between a thread and the signal
6260 handler running on the same thread. Thus this really becomes a
6261 compiler barrier, in that stores must not be sunk past
6262 (or raised above) a given point. */
6263 expand_asm_memory_barrier ();
6267 /* This function expands the atomic load operation:
6268 return the atomically loaded value in MEM.
6270 MEMMODEL is the memory model variant to use.
6271 TARGET is an option place to stick the return value. */
6274 expand_atomic_load (rtx target
, rtx mem
, enum memmodel model
)
6276 machine_mode mode
= GET_MODE (mem
);
6277 enum insn_code icode
;
6279 /* If the target supports the load directly, great. */
6280 icode
= direct_optab_handler (atomic_load_optab
, mode
);
6281 if (icode
!= CODE_FOR_nothing
)
6283 struct expand_operand ops
[3];
6285 create_output_operand (&ops
[0], target
, mode
);
6286 create_fixed_operand (&ops
[1], mem
);
6287 create_integer_operand (&ops
[2], model
);
6288 if (maybe_expand_insn (icode
, 3, ops
))
6289 return ops
[0].value
;
6292 /* If the size of the object is greater than word size on this target,
6293 then we assume that a load will not be atomic. */
6294 if (GET_MODE_PRECISION (mode
) > BITS_PER_WORD
)
6296 /* Issue val = compare_and_swap (mem, 0, 0).
6297 This may cause the occasional harmless store of 0 when the value is
6298 already 0, but it seems to be OK according to the standards guys. */
6299 if (expand_atomic_compare_and_swap (NULL
, &target
, mem
, const0_rtx
,
6300 const0_rtx
, false, model
, model
))
6303 /* Otherwise there is no atomic load, leave the library call. */
6307 /* Otherwise assume loads are atomic, and emit the proper barriers. */
6308 if (!target
|| target
== const0_rtx
)
6309 target
= gen_reg_rtx (mode
);
6311 /* For SEQ_CST, emit a barrier before the load. */
6312 if (is_mm_seq_cst (model
))
6313 expand_mem_thread_fence (model
);
6315 emit_move_insn (target
, mem
);
6317 /* Emit the appropriate barrier after the load. */
6318 expand_mem_thread_fence (model
);
6323 /* This function expands the atomic store operation:
6324 Atomically store VAL in MEM.
6325 MEMMODEL is the memory model variant to use.
6326 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
6327 function returns const0_rtx if a pattern was emitted. */
6330 expand_atomic_store (rtx mem
, rtx val
, enum memmodel model
, bool use_release
)
6332 machine_mode mode
= GET_MODE (mem
);
6333 enum insn_code icode
;
6334 struct expand_operand ops
[3];
6336 /* If the target supports the store directly, great. */
6337 icode
= direct_optab_handler (atomic_store_optab
, mode
);
6338 if (icode
!= CODE_FOR_nothing
)
6340 create_fixed_operand (&ops
[0], mem
);
6341 create_input_operand (&ops
[1], val
, mode
);
6342 create_integer_operand (&ops
[2], model
);
6343 if (maybe_expand_insn (icode
, 3, ops
))
6347 /* If using __sync_lock_release is a viable alternative, try it. */
6350 icode
= direct_optab_handler (sync_lock_release_optab
, mode
);
6351 if (icode
!= CODE_FOR_nothing
)
6353 create_fixed_operand (&ops
[0], mem
);
6354 create_input_operand (&ops
[1], const0_rtx
, mode
);
6355 if (maybe_expand_insn (icode
, 2, ops
))
6357 /* lock_release is only a release barrier. */
6358 if (is_mm_seq_cst (model
))
6359 expand_mem_thread_fence (model
);
6365 /* If the size of the object is greater than word size on this target,
6366 a default store will not be atomic, Try a mem_exchange and throw away
6367 the result. If that doesn't work, don't do anything. */
6368 if (GET_MODE_PRECISION (mode
) > BITS_PER_WORD
)
6370 rtx target
= maybe_emit_atomic_exchange (NULL_RTX
, mem
, val
, model
);
6372 target
= maybe_emit_compare_and_swap_exchange_loop (NULL_RTX
, mem
, val
);
6379 /* Otherwise assume stores are atomic, and emit the proper barriers. */
6380 expand_mem_thread_fence (model
);
6382 emit_move_insn (mem
, val
);
6384 /* For SEQ_CST, also emit a barrier after the store. */
6385 if (is_mm_seq_cst (model
))
6386 expand_mem_thread_fence (model
);
6392 /* Structure containing the pointers and values required to process the
6393 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
6395 struct atomic_op_functions
6397 direct_optab mem_fetch_before
;
6398 direct_optab mem_fetch_after
;
6399 direct_optab mem_no_result
;
6402 direct_optab no_result
;
6403 enum rtx_code reverse_code
;
6407 /* Fill in structure pointed to by OP with the various optab entries for an
6408 operation of type CODE. */
6411 get_atomic_op_for_code (struct atomic_op_functions
*op
, enum rtx_code code
)
6413 gcc_assert (op
!= NULL
);
6415 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
6416 in the source code during compilation, and the optab entries are not
6417 computable until runtime. Fill in the values at runtime. */
6421 op
->mem_fetch_before
= atomic_fetch_add_optab
;
6422 op
->mem_fetch_after
= atomic_add_fetch_optab
;
6423 op
->mem_no_result
= atomic_add_optab
;
6424 op
->fetch_before
= sync_old_add_optab
;
6425 op
->fetch_after
= sync_new_add_optab
;
6426 op
->no_result
= sync_add_optab
;
6427 op
->reverse_code
= MINUS
;
6430 op
->mem_fetch_before
= atomic_fetch_sub_optab
;
6431 op
->mem_fetch_after
= atomic_sub_fetch_optab
;
6432 op
->mem_no_result
= atomic_sub_optab
;
6433 op
->fetch_before
= sync_old_sub_optab
;
6434 op
->fetch_after
= sync_new_sub_optab
;
6435 op
->no_result
= sync_sub_optab
;
6436 op
->reverse_code
= PLUS
;
6439 op
->mem_fetch_before
= atomic_fetch_xor_optab
;
6440 op
->mem_fetch_after
= atomic_xor_fetch_optab
;
6441 op
->mem_no_result
= atomic_xor_optab
;
6442 op
->fetch_before
= sync_old_xor_optab
;
6443 op
->fetch_after
= sync_new_xor_optab
;
6444 op
->no_result
= sync_xor_optab
;
6445 op
->reverse_code
= XOR
;
6448 op
->mem_fetch_before
= atomic_fetch_and_optab
;
6449 op
->mem_fetch_after
= atomic_and_fetch_optab
;
6450 op
->mem_no_result
= atomic_and_optab
;
6451 op
->fetch_before
= sync_old_and_optab
;
6452 op
->fetch_after
= sync_new_and_optab
;
6453 op
->no_result
= sync_and_optab
;
6454 op
->reverse_code
= UNKNOWN
;
6457 op
->mem_fetch_before
= atomic_fetch_or_optab
;
6458 op
->mem_fetch_after
= atomic_or_fetch_optab
;
6459 op
->mem_no_result
= atomic_or_optab
;
6460 op
->fetch_before
= sync_old_ior_optab
;
6461 op
->fetch_after
= sync_new_ior_optab
;
6462 op
->no_result
= sync_ior_optab
;
6463 op
->reverse_code
= UNKNOWN
;
6466 op
->mem_fetch_before
= atomic_fetch_nand_optab
;
6467 op
->mem_fetch_after
= atomic_nand_fetch_optab
;
6468 op
->mem_no_result
= atomic_nand_optab
;
6469 op
->fetch_before
= sync_old_nand_optab
;
6470 op
->fetch_after
= sync_new_nand_optab
;
6471 op
->no_result
= sync_nand_optab
;
6472 op
->reverse_code
= UNKNOWN
;
6479 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
6480 using memory order MODEL. If AFTER is true the operation needs to return
6481 the value of *MEM after the operation, otherwise the previous value.
6482 TARGET is an optional place to place the result. The result is unused if
6484 Return the result if there is a better sequence, otherwise NULL_RTX. */
6487 maybe_optimize_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
6488 enum memmodel model
, bool after
)
6490 /* If the value is prefetched, or not used, it may be possible to replace
6491 the sequence with a native exchange operation. */
6492 if (!after
|| target
== const0_rtx
)
6494 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
6495 if (code
== AND
&& val
== const0_rtx
)
6497 if (target
== const0_rtx
)
6498 target
= gen_reg_rtx (GET_MODE (mem
));
6499 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6502 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
6503 if (code
== IOR
&& val
== constm1_rtx
)
6505 if (target
== const0_rtx
)
6506 target
= gen_reg_rtx (GET_MODE (mem
));
6507 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6514 /* Try to emit an instruction for a specific operation varaition.
6515 OPTAB contains the OP functions.
6516 TARGET is an optional place to return the result. const0_rtx means unused.
6517 MEM is the memory location to operate on.
6518 VAL is the value to use in the operation.
6519 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
6520 MODEL is the memory model, if used.
6521 AFTER is true if the returned result is the value after the operation. */
6524 maybe_emit_op (const struct atomic_op_functions
*optab
, rtx target
, rtx mem
,
6525 rtx val
, bool use_memmodel
, enum memmodel model
, bool after
)
6527 machine_mode mode
= GET_MODE (mem
);
6528 struct expand_operand ops
[4];
6529 enum insn_code icode
;
6533 /* Check to see if there is a result returned. */
6534 if (target
== const0_rtx
)
6538 icode
= direct_optab_handler (optab
->mem_no_result
, mode
);
6539 create_integer_operand (&ops
[2], model
);
6544 icode
= direct_optab_handler (optab
->no_result
, mode
);
6548 /* Otherwise, we need to generate a result. */
6553 icode
= direct_optab_handler (after
? optab
->mem_fetch_after
6554 : optab
->mem_fetch_before
, mode
);
6555 create_integer_operand (&ops
[3], model
);
6560 icode
= optab_handler (after
? optab
->fetch_after
6561 : optab
->fetch_before
, mode
);
6564 create_output_operand (&ops
[op_counter
++], target
, mode
);
6566 if (icode
== CODE_FOR_nothing
)
6569 create_fixed_operand (&ops
[op_counter
++], mem
);
6570 /* VAL may have been promoted to a wider mode. Shrink it if so. */
6571 create_convert_operand_to (&ops
[op_counter
++], val
, mode
, true);
6573 if (maybe_expand_insn (icode
, num_ops
, ops
))
6574 return (target
== const0_rtx
? const0_rtx
: ops
[0].value
);
6580 /* This function expands an atomic fetch_OP or OP_fetch operation:
6581 TARGET is an option place to stick the return value. const0_rtx indicates
6582 the result is unused.
6583 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6584 CODE is the operation being performed (OP)
6585 MEMMODEL is the memory model variant to use.
6586 AFTER is true to return the result of the operation (OP_fetch).
6587 AFTER is false to return the value before the operation (fetch_OP).
6589 This function will *only* generate instructions if there is a direct
6590 optab. No compare and swap loops or libcalls will be generated. */
6593 expand_atomic_fetch_op_no_fallback (rtx target
, rtx mem
, rtx val
,
6594 enum rtx_code code
, enum memmodel model
,
6597 machine_mode mode
= GET_MODE (mem
);
6598 struct atomic_op_functions optab
;
6600 bool unused_result
= (target
== const0_rtx
);
6602 get_atomic_op_for_code (&optab
, code
);
6604 /* Check to see if there are any better instructions. */
6605 result
= maybe_optimize_fetch_op (target
, mem
, val
, code
, model
, after
);
6609 /* Check for the case where the result isn't used and try those patterns. */
6612 /* Try the memory model variant first. */
6613 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, true);
6617 /* Next try the old style withuot a memory model. */
6618 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, true);
6622 /* There is no no-result pattern, so try patterns with a result. */
6626 /* Try the __atomic version. */
6627 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, after
);
6631 /* Try the older __sync version. */
6632 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, after
);
6636 /* If the fetch value can be calculated from the other variation of fetch,
6637 try that operation. */
6638 if (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
)
6640 /* Try the __atomic version, then the older __sync version. */
6641 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, !after
);
6643 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, !after
);
6647 /* If the result isn't used, no need to do compensation code. */
6651 /* Issue compensation code. Fetch_after == fetch_before OP val.
6652 Fetch_before == after REVERSE_OP val. */
6654 code
= optab
.reverse_code
;
6657 result
= expand_simple_binop (mode
, AND
, result
, val
, NULL_RTX
,
6658 true, OPTAB_LIB_WIDEN
);
6659 result
= expand_simple_unop (mode
, NOT
, result
, target
, true);
6662 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
6663 true, OPTAB_LIB_WIDEN
);
6668 /* No direct opcode can be generated. */
6674 /* This function expands an atomic fetch_OP or OP_fetch operation:
6675 TARGET is an option place to stick the return value. const0_rtx indicates
6676 the result is unused.
6677 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6678 CODE is the operation being performed (OP)
6679 MEMMODEL is the memory model variant to use.
6680 AFTER is true to return the result of the operation (OP_fetch).
6681 AFTER is false to return the value before the operation (fetch_OP). */
6683 expand_atomic_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
6684 enum memmodel model
, bool after
)
6686 machine_mode mode
= GET_MODE (mem
);
6688 bool unused_result
= (target
== const0_rtx
);
6690 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, val
, code
, model
,
6696 /* Add/sub can be implemented by doing the reverse operation with -(val). */
6697 if (code
== PLUS
|| code
== MINUS
)
6700 enum rtx_code reverse
= (code
== PLUS
? MINUS
: PLUS
);
6703 tmp
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, true);
6704 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, tmp
, reverse
,
6708 /* PLUS worked so emit the insns and return. */
6715 /* PLUS did not work, so throw away the negation code and continue. */
6719 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
6720 if (!can_compare_and_swap_p (mode
, false))
6724 enum rtx_code orig_code
= code
;
6725 struct atomic_op_functions optab
;
6727 get_atomic_op_for_code (&optab
, code
);
6728 libfunc
= optab_libfunc (after
? optab
.fetch_after
6729 : optab
.fetch_before
, mode
);
6731 && (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
))
6735 code
= optab
.reverse_code
;
6736 libfunc
= optab_libfunc (after
? optab
.fetch_before
6737 : optab
.fetch_after
, mode
);
6739 if (libfunc
!= NULL
)
6741 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
6742 result
= emit_library_call_value (libfunc
, NULL
, LCT_NORMAL
, mode
,
6743 2, addr
, ptr_mode
, val
, mode
);
6745 if (!unused_result
&& fixup
)
6746 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
6747 true, OPTAB_LIB_WIDEN
);
6751 /* We need the original code for any further attempts. */
6755 /* If nothing else has succeeded, default to a compare and swap loop. */
6756 if (can_compare_and_swap_p (mode
, true))
6759 rtx t0
= gen_reg_rtx (mode
), t1
;
6763 /* If the result is used, get a register for it. */
6766 if (!target
|| !register_operand (target
, mode
))
6767 target
= gen_reg_rtx (mode
);
6768 /* If fetch_before, copy the value now. */
6770 emit_move_insn (target
, t0
);
6773 target
= const0_rtx
;
6778 t1
= expand_simple_binop (mode
, AND
, t1
, val
, NULL_RTX
,
6779 true, OPTAB_LIB_WIDEN
);
6780 t1
= expand_simple_unop (mode
, code
, t1
, NULL_RTX
, true);
6783 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
, true,
6786 /* For after, copy the value now. */
6787 if (!unused_result
&& after
)
6788 emit_move_insn (target
, t1
);
6789 insn
= get_insns ();
6792 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
6799 /* Return true if OPERAND is suitable for operand number OPNO of
6800 instruction ICODE. */
6803 insn_operand_matches (enum insn_code icode
, unsigned int opno
, rtx operand
)
6805 return (!insn_data
[(int) icode
].operand
[opno
].predicate
6806 || (insn_data
[(int) icode
].operand
[opno
].predicate
6807 (operand
, insn_data
[(int) icode
].operand
[opno
].mode
)));
6810 /* TARGET is a target of a multiword operation that we are going to
6811 implement as a series of word-mode operations. Return true if
6812 TARGET is suitable for this purpose. */
6815 valid_multiword_target_p (rtx target
)
6820 mode
= GET_MODE (target
);
6821 for (i
= 0; i
< GET_MODE_SIZE (mode
); i
+= UNITS_PER_WORD
)
6822 if (!validate_subreg (word_mode
, mode
, target
, i
))
6827 /* Like maybe_legitimize_operand, but do not change the code of the
6828 current rtx value. */
6831 maybe_legitimize_operand_same_code (enum insn_code icode
, unsigned int opno
,
6832 struct expand_operand
*op
)
6834 /* See if the operand matches in its current form. */
6835 if (insn_operand_matches (icode
, opno
, op
->value
))
6838 /* If the operand is a memory whose address has no side effects,
6839 try forcing the address into a non-virtual pseudo register.
6840 The check for side effects is important because copy_to_mode_reg
6841 cannot handle things like auto-modified addresses. */
6842 if (insn_data
[(int) icode
].operand
[opno
].allows_mem
&& MEM_P (op
->value
))
6847 addr
= XEXP (mem
, 0);
6848 if (!(REG_P (addr
) && REGNO (addr
) > LAST_VIRTUAL_REGISTER
)
6849 && !side_effects_p (addr
))
6854 last
= get_last_insn ();
6855 mode
= get_address_mode (mem
);
6856 mem
= replace_equiv_address (mem
, copy_to_mode_reg (mode
, addr
));
6857 if (insn_operand_matches (icode
, opno
, mem
))
6862 delete_insns_since (last
);
6869 /* Try to make OP match operand OPNO of instruction ICODE. Return true
6870 on success, storing the new operand value back in OP. */
6873 maybe_legitimize_operand (enum insn_code icode
, unsigned int opno
,
6874 struct expand_operand
*op
)
6876 machine_mode mode
, imode
;
6877 bool old_volatile_ok
, result
;
6883 old_volatile_ok
= volatile_ok
;
6885 result
= maybe_legitimize_operand_same_code (icode
, opno
, op
);
6886 volatile_ok
= old_volatile_ok
;
6890 gcc_assert (mode
!= VOIDmode
);
6892 && op
->value
!= const0_rtx
6893 && GET_MODE (op
->value
) == mode
6894 && maybe_legitimize_operand_same_code (icode
, opno
, op
))
6897 op
->value
= gen_reg_rtx (mode
);
6902 gcc_assert (mode
!= VOIDmode
);
6903 gcc_assert (GET_MODE (op
->value
) == VOIDmode
6904 || GET_MODE (op
->value
) == mode
);
6905 if (maybe_legitimize_operand_same_code (icode
, opno
, op
))
6908 op
->value
= copy_to_mode_reg (mode
, op
->value
);
6911 case EXPAND_CONVERT_TO
:
6912 gcc_assert (mode
!= VOIDmode
);
6913 op
->value
= convert_to_mode (mode
, op
->value
, op
->unsigned_p
);
6916 case EXPAND_CONVERT_FROM
:
6917 if (GET_MODE (op
->value
) != VOIDmode
)
6918 mode
= GET_MODE (op
->value
);
6920 /* The caller must tell us what mode this value has. */
6921 gcc_assert (mode
!= VOIDmode
);
6923 imode
= insn_data
[(int) icode
].operand
[opno
].mode
;
6924 if (imode
!= VOIDmode
&& imode
!= mode
)
6926 op
->value
= convert_modes (imode
, mode
, op
->value
, op
->unsigned_p
);
6931 case EXPAND_ADDRESS
:
6932 gcc_assert (mode
!= VOIDmode
);
6933 op
->value
= convert_memory_address (mode
, op
->value
);
6936 case EXPAND_INTEGER
:
6937 mode
= insn_data
[(int) icode
].operand
[opno
].mode
;
6938 if (mode
!= VOIDmode
&& const_int_operand (op
->value
, mode
))
6942 return insn_operand_matches (icode
, opno
, op
->value
);
6945 /* Make OP describe an input operand that should have the same value
6946 as VALUE, after any mode conversion that the target might request.
6947 TYPE is the type of VALUE. */
6950 create_convert_operand_from_type (struct expand_operand
*op
,
6951 rtx value
, tree type
)
6953 create_convert_operand_from (op
, value
, TYPE_MODE (type
),
6954 TYPE_UNSIGNED (type
));
6957 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
6958 of instruction ICODE. Return true on success, leaving the new operand
6959 values in the OPS themselves. Emit no code on failure. */
6962 maybe_legitimize_operands (enum insn_code icode
, unsigned int opno
,
6963 unsigned int nops
, struct expand_operand
*ops
)
6968 last
= get_last_insn ();
6969 for (i
= 0; i
< nops
; i
++)
6970 if (!maybe_legitimize_operand (icode
, opno
+ i
, &ops
[i
]))
6972 delete_insns_since (last
);
6978 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
6979 as its operands. Return the instruction pattern on success,
6980 and emit any necessary set-up code. Return null and emit no
6984 maybe_gen_insn (enum insn_code icode
, unsigned int nops
,
6985 struct expand_operand
*ops
)
6987 gcc_assert (nops
== (unsigned int) insn_data
[(int) icode
].n_generator_args
);
6988 if (!maybe_legitimize_operands (icode
, 0, nops
, ops
))
6994 return GEN_FCN (icode
) (ops
[0].value
);
6996 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
);
6998 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
);
7000 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7003 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7004 ops
[3].value
, ops
[4].value
);
7006 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7007 ops
[3].value
, ops
[4].value
, ops
[5].value
);
7009 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7010 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7013 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7014 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7015 ops
[6].value
, ops
[7].value
);
7017 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7018 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7019 ops
[6].value
, ops
[7].value
, ops
[8].value
);
7024 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7025 as its operands. Return true on success and emit no code on failure. */
7028 maybe_expand_insn (enum insn_code icode
, unsigned int nops
,
7029 struct expand_operand
*ops
)
7031 rtx_insn
*pat
= maybe_gen_insn (icode
, nops
, ops
);
7040 /* Like maybe_expand_insn, but for jumps. */
7043 maybe_expand_jump_insn (enum insn_code icode
, unsigned int nops
,
7044 struct expand_operand
*ops
)
7046 rtx_insn
*pat
= maybe_gen_insn (icode
, nops
, ops
);
7049 emit_jump_insn (pat
);
7055 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7059 expand_insn (enum insn_code icode
, unsigned int nops
,
7060 struct expand_operand
*ops
)
7062 if (!maybe_expand_insn (icode
, nops
, ops
))
7066 /* Like expand_insn, but for jumps. */
7069 expand_jump_insn (enum insn_code icode
, unsigned int nops
,
7070 struct expand_operand
*ops
)
7072 if (!maybe_expand_jump_insn (icode
, nops
, ops
))