1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
35 #include "diagnostic-core.h"
37 /* Include insn-config.h before expr.h so that HAVE_conditional_move
38 is properly defined. */
39 #include "stor-layout.h"
44 #include "optabs-tree.h"
47 static void prepare_float_lib_cmp (rtx
, rtx
, enum rtx_code
, rtx
*,
49 static rtx
expand_unop_direct (machine_mode
, optab
, rtx
, rtx
, int);
50 static void emit_libcall_block_1 (rtx_insn
*, rtx
, rtx
, rtx
, bool);
52 /* Debug facility for use in GDB. */
53 void debug_optab_libfuncs (void);
55 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
56 the result of operation CODE applied to OP0 (and OP1 if it is a binary
59 If the last insn does not set TARGET, don't do anything, but return 1.
61 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
62 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
63 try again, ensuring that TARGET is not one of the operands. */
66 add_equal_note (rtx_insn
*insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
72 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
74 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
75 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
76 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
77 && GET_RTX_CLASS (code
) != RTX_COMPARE
78 && GET_RTX_CLASS (code
) != RTX_UNARY
)
81 if (GET_CODE (target
) == ZERO_EXTRACT
)
84 for (last_insn
= insns
;
85 NEXT_INSN (last_insn
) != NULL_RTX
;
86 last_insn
= NEXT_INSN (last_insn
))
89 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
90 a value changing in the insn, so the note would be invalid for CSE. */
91 if (reg_overlap_mentioned_p (target
, op0
)
92 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
95 && (rtx_equal_p (target
, op0
)
96 || (op1
&& rtx_equal_p (target
, op1
))))
98 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
99 over expanding it as temp = MEM op X, MEM = temp. If the target
100 supports MEM = MEM op X instructions, it is sometimes too hard
101 to reconstruct that form later, especially if X is also a memory,
102 and due to multiple occurrences of addresses the address might
103 be forced into register unnecessarily.
104 Note that not emitting the REG_EQUIV note might inhibit
105 CSE in some cases. */
106 set
= single_set (last_insn
);
108 && GET_CODE (SET_SRC (set
)) == code
109 && MEM_P (SET_DEST (set
))
110 && (rtx_equal_p (SET_DEST (set
), XEXP (SET_SRC (set
), 0))
111 || (op1
&& rtx_equal_p (SET_DEST (set
),
112 XEXP (SET_SRC (set
), 1)))))
118 set
= set_for_reg_notes (last_insn
);
122 if (! rtx_equal_p (SET_DEST (set
), target
)
123 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
124 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
125 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
128 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
138 if (GET_MODE (op0
) != VOIDmode
&& GET_MODE (target
) != GET_MODE (op0
))
140 note
= gen_rtx_fmt_e (code
, GET_MODE (op0
), copy_rtx (op0
));
141 if (GET_MODE_SIZE (GET_MODE (op0
))
142 > GET_MODE_SIZE (GET_MODE (target
)))
143 note
= simplify_gen_unary (TRUNCATE
, GET_MODE (target
),
144 note
, GET_MODE (op0
));
146 note
= simplify_gen_unary (ZERO_EXTEND
, GET_MODE (target
),
147 note
, GET_MODE (op0
));
152 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
156 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
158 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
163 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
164 for a widening operation would be. In most cases this would be OP0, but if
165 that's a constant it'll be VOIDmode, which isn't useful. */
168 widened_mode (machine_mode to_mode
, rtx op0
, rtx op1
)
170 machine_mode m0
= GET_MODE (op0
);
171 machine_mode m1
= GET_MODE (op1
);
174 if (m0
== VOIDmode
&& m1
== VOIDmode
)
176 else if (m0
== VOIDmode
|| GET_MODE_SIZE (m0
) < GET_MODE_SIZE (m1
))
181 if (GET_MODE_SIZE (result
) > GET_MODE_SIZE (to_mode
))
187 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
188 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
189 not actually do a sign-extend or zero-extend, but can leave the
190 higher-order bits of the result rtx undefined, for example, in the case
191 of logical operations, but not right shifts. */
194 widen_operand (rtx op
, machine_mode mode
, machine_mode oldmode
,
195 int unsignedp
, int no_extend
)
199 /* If we don't have to extend and this is a constant, return it. */
200 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
203 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
204 extend since it will be more efficient to do so unless the signedness of
205 a promoted object differs from our extension. */
207 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
208 && SUBREG_CHECK_PROMOTED_SIGN (op
, unsignedp
)))
209 return convert_modes (mode
, oldmode
, op
, unsignedp
);
211 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
213 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
214 return gen_lowpart (mode
, force_reg (GET_MODE (op
), op
));
216 /* Otherwise, get an object of MODE, clobber it, and set the low-order
219 result
= gen_reg_rtx (mode
);
220 emit_clobber (result
);
221 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
225 /* Expand vector widening operations.
227 There are two different classes of operations handled here:
228 1) Operations whose result is wider than all the arguments to the operation.
229 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
230 In this case OP0 and optionally OP1 would be initialized,
231 but WIDE_OP wouldn't (not relevant for this case).
232 2) Operations whose result is of the same size as the last argument to the
233 operation, but wider than all the other arguments to the operation.
234 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
235 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
237 E.g, when called to expand the following operations, this is how
238 the arguments will be initialized:
240 widening-sum 2 oprnd0 - oprnd1
241 widening-dot-product 3 oprnd0 oprnd1 oprnd2
242 widening-mult 2 oprnd0 oprnd1 -
243 type-promotion (vec-unpack) 1 oprnd0 - - */
246 expand_widen_pattern_expr (sepops ops
, rtx op0
, rtx op1
, rtx wide_op
,
247 rtx target
, int unsignedp
)
249 struct expand_operand eops
[4];
250 tree oprnd0
, oprnd1
, oprnd2
;
251 machine_mode wmode
= VOIDmode
, tmode0
, tmode1
= VOIDmode
;
252 optab widen_pattern_optab
;
253 enum insn_code icode
;
254 int nops
= TREE_CODE_LENGTH (ops
->code
);
258 tmode0
= TYPE_MODE (TREE_TYPE (oprnd0
));
259 widen_pattern_optab
=
260 optab_for_tree_code (ops
->code
, TREE_TYPE (oprnd0
), optab_default
);
261 if (ops
->code
== WIDEN_MULT_PLUS_EXPR
262 || ops
->code
== WIDEN_MULT_MINUS_EXPR
)
263 icode
= find_widening_optab_handler (widen_pattern_optab
,
264 TYPE_MODE (TREE_TYPE (ops
->op2
)),
267 icode
= optab_handler (widen_pattern_optab
, tmode0
);
268 gcc_assert (icode
!= CODE_FOR_nothing
);
273 tmode1
= TYPE_MODE (TREE_TYPE (oprnd1
));
276 /* The last operand is of a wider mode than the rest of the operands. */
281 gcc_assert (tmode1
== tmode0
);
284 wmode
= TYPE_MODE (TREE_TYPE (oprnd2
));
288 create_output_operand (&eops
[op
++], target
, TYPE_MODE (ops
->type
));
289 create_convert_operand_from (&eops
[op
++], op0
, tmode0
, unsignedp
);
291 create_convert_operand_from (&eops
[op
++], op1
, tmode1
, unsignedp
);
293 create_convert_operand_from (&eops
[op
++], wide_op
, wmode
, unsignedp
);
294 expand_insn (icode
, op
, eops
);
295 return eops
[0].value
;
298 /* Generate code to perform an operation specified by TERNARY_OPTAB
299 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
301 UNSIGNEDP is for the case where we have to widen the operands
302 to perform the operation. It says to use zero-extension.
304 If TARGET is nonzero, the value
305 is generated there, if it is convenient to do so.
306 In all cases an rtx is returned for the locus of the value;
307 this may or may not be TARGET. */
310 expand_ternary_op (machine_mode mode
, optab ternary_optab
, rtx op0
,
311 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
313 struct expand_operand ops
[4];
314 enum insn_code icode
= optab_handler (ternary_optab
, mode
);
316 gcc_assert (optab_handler (ternary_optab
, mode
) != CODE_FOR_nothing
);
318 create_output_operand (&ops
[0], target
, mode
);
319 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
320 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
321 create_convert_operand_from (&ops
[3], op2
, mode
, unsignedp
);
322 expand_insn (icode
, 4, ops
);
327 /* Like expand_binop, but return a constant rtx if the result can be
328 calculated at compile time. The arguments and return value are
329 otherwise the same as for expand_binop. */
332 simplify_expand_binop (machine_mode mode
, optab binoptab
,
333 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
334 enum optab_methods methods
)
336 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
338 rtx x
= simplify_binary_operation (optab_to_code (binoptab
),
344 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
347 /* Like simplify_expand_binop, but always put the result in TARGET.
348 Return true if the expansion succeeded. */
351 force_expand_binop (machine_mode mode
, optab binoptab
,
352 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
353 enum optab_methods methods
)
355 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
356 target
, unsignedp
, methods
);
360 emit_move_insn (target
, x
);
364 /* Create a new vector value in VMODE with all elements set to OP. The
365 mode of OP must be the element mode of VMODE. If OP is a constant,
366 then the return value will be a constant. */
369 expand_vector_broadcast (machine_mode vmode
, rtx op
)
371 enum insn_code icode
;
376 gcc_checking_assert (VECTOR_MODE_P (vmode
));
378 n
= GET_MODE_NUNITS (vmode
);
379 vec
= rtvec_alloc (n
);
380 for (i
= 0; i
< n
; ++i
)
381 RTVEC_ELT (vec
, i
) = op
;
384 return gen_rtx_CONST_VECTOR (vmode
, vec
);
386 /* ??? If the target doesn't have a vec_init, then we have no easy way
387 of performing this operation. Most of this sort of generic support
388 is hidden away in the vector lowering support in gimple. */
389 icode
= convert_optab_handler (vec_init_optab
, vmode
,
390 GET_MODE_INNER (vmode
));
391 if (icode
== CODE_FOR_nothing
)
394 ret
= gen_reg_rtx (vmode
);
395 emit_insn (GEN_FCN (icode
) (ret
, gen_rtx_PARALLEL (vmode
, vec
)));
400 /* This subroutine of expand_doubleword_shift handles the cases in which
401 the effective shift value is >= BITS_PER_WORD. The arguments and return
402 value are the same as for the parent routine, except that SUPERWORD_OP1
403 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
404 INTO_TARGET may be null if the caller has decided to calculate it. */
407 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
408 rtx outof_target
, rtx into_target
,
409 int unsignedp
, enum optab_methods methods
)
411 if (into_target
!= 0)
412 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
413 into_target
, unsignedp
, methods
))
416 if (outof_target
!= 0)
418 /* For a signed right shift, we must fill OUTOF_TARGET with copies
419 of the sign bit, otherwise we must fill it with zeros. */
420 if (binoptab
!= ashr_optab
)
421 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
423 if (!force_expand_binop (word_mode
, binoptab
,
424 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
425 outof_target
, unsignedp
, methods
))
431 /* This subroutine of expand_doubleword_shift handles the cases in which
432 the effective shift value is < BITS_PER_WORD. The arguments and return
433 value are the same as for the parent routine. */
436 expand_subword_shift (machine_mode op1_mode
, optab binoptab
,
437 rtx outof_input
, rtx into_input
, rtx op1
,
438 rtx outof_target
, rtx into_target
,
439 int unsignedp
, enum optab_methods methods
,
440 unsigned HOST_WIDE_INT shift_mask
)
442 optab reverse_unsigned_shift
, unsigned_shift
;
445 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
446 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
448 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
449 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
450 the opposite direction to BINOPTAB. */
451 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
453 carries
= outof_input
;
454 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
,
455 op1_mode
), op1_mode
);
456 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
461 /* We must avoid shifting by BITS_PER_WORD bits since that is either
462 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
463 has unknown behavior. Do a single shift first, then shift by the
464 remainder. It's OK to use ~OP1 as the remainder if shift counts
465 are truncated to the mode size. */
466 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
467 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
468 if (shift_mask
== BITS_PER_WORD
- 1)
470 tmp
= immed_wide_int_const
471 (wi::minus_one (GET_MODE_PRECISION (op1_mode
)), op1_mode
);
472 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
477 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
- 1,
478 op1_mode
), op1_mode
);
479 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
483 if (tmp
== 0 || carries
== 0)
485 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
486 carries
, tmp
, 0, unsignedp
, methods
);
490 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
491 so the result can go directly into INTO_TARGET if convenient. */
492 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
493 into_target
, unsignedp
, methods
);
497 /* Now OR in the bits carried over from OUTOF_INPUT. */
498 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
499 into_target
, unsignedp
, methods
))
502 /* Use a standard word_mode shift for the out-of half. */
503 if (outof_target
!= 0)
504 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
505 outof_target
, unsignedp
, methods
))
512 /* Try implementing expand_doubleword_shift using conditional moves.
513 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
514 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
515 are the shift counts to use in the former and latter case. All other
516 arguments are the same as the parent routine. */
519 expand_doubleword_shift_condmove (machine_mode op1_mode
, optab binoptab
,
520 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
521 rtx outof_input
, rtx into_input
,
522 rtx subword_op1
, rtx superword_op1
,
523 rtx outof_target
, rtx into_target
,
524 int unsignedp
, enum optab_methods methods
,
525 unsigned HOST_WIDE_INT shift_mask
)
527 rtx outof_superword
, into_superword
;
529 /* Put the superword version of the output into OUTOF_SUPERWORD and
531 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
532 if (outof_target
!= 0 && subword_op1
== superword_op1
)
534 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
535 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
536 into_superword
= outof_target
;
537 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
538 outof_superword
, 0, unsignedp
, methods
))
543 into_superword
= gen_reg_rtx (word_mode
);
544 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
545 outof_superword
, into_superword
,
550 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
551 if (!expand_subword_shift (op1_mode
, binoptab
,
552 outof_input
, into_input
, subword_op1
,
553 outof_target
, into_target
,
554 unsignedp
, methods
, shift_mask
))
557 /* Select between them. Do the INTO half first because INTO_SUPERWORD
558 might be the current value of OUTOF_TARGET. */
559 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
560 into_target
, into_superword
, word_mode
, false))
563 if (outof_target
!= 0)
564 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
565 outof_target
, outof_superword
,
572 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
573 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
574 input operand; the shift moves bits in the direction OUTOF_INPUT->
575 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
576 of the target. OP1 is the shift count and OP1_MODE is its mode.
577 If OP1 is constant, it will have been truncated as appropriate
578 and is known to be nonzero.
580 If SHIFT_MASK is zero, the result of word shifts is undefined when the
581 shift count is outside the range [0, BITS_PER_WORD). This routine must
582 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
584 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
585 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
586 fill with zeros or sign bits as appropriate.
588 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
589 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
590 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
591 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
594 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
595 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
596 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
597 function wants to calculate it itself.
599 Return true if the shift could be successfully synthesized. */
602 expand_doubleword_shift (machine_mode op1_mode
, optab binoptab
,
603 rtx outof_input
, rtx into_input
, rtx op1
,
604 rtx outof_target
, rtx into_target
,
605 int unsignedp
, enum optab_methods methods
,
606 unsigned HOST_WIDE_INT shift_mask
)
608 rtx superword_op1
, tmp
, cmp1
, cmp2
;
609 enum rtx_code cmp_code
;
611 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
612 fill the result with sign or zero bits as appropriate. If so, the value
613 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
614 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
615 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
617 This isn't worthwhile for constant shifts since the optimizers will
618 cope better with in-range shift counts. */
619 if (shift_mask
>= BITS_PER_WORD
621 && !CONSTANT_P (op1
))
623 if (!expand_doubleword_shift (op1_mode
, binoptab
,
624 outof_input
, into_input
, op1
,
626 unsignedp
, methods
, shift_mask
))
628 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
629 outof_target
, unsignedp
, methods
))
634 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
635 is true when the effective shift value is less than BITS_PER_WORD.
636 Set SUPERWORD_OP1 to the shift count that should be used to shift
637 OUTOF_INPUT into INTO_TARGET when the condition is false. */
638 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
, op1_mode
), op1_mode
);
639 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
641 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
642 is a subword shift count. */
643 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
645 cmp2
= CONST0_RTX (op1_mode
);
651 /* Set CMP1 to OP1 - BITS_PER_WORD. */
652 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
654 cmp2
= CONST0_RTX (op1_mode
);
656 superword_op1
= cmp1
;
661 /* If we can compute the condition at compile time, pick the
662 appropriate subroutine. */
663 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
664 if (tmp
!= 0 && CONST_INT_P (tmp
))
666 if (tmp
== const0_rtx
)
667 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
668 outof_target
, into_target
,
671 return expand_subword_shift (op1_mode
, binoptab
,
672 outof_input
, into_input
, op1
,
673 outof_target
, into_target
,
674 unsignedp
, methods
, shift_mask
);
677 /* Try using conditional moves to generate straight-line code. */
678 if (HAVE_conditional_move
)
680 rtx_insn
*start
= get_last_insn ();
681 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
682 cmp_code
, cmp1
, cmp2
,
683 outof_input
, into_input
,
685 outof_target
, into_target
,
686 unsignedp
, methods
, shift_mask
))
688 delete_insns_since (start
);
691 /* As a last resort, use branches to select the correct alternative. */
692 rtx_code_label
*subword_label
= gen_label_rtx ();
693 rtx_code_label
*done_label
= gen_label_rtx ();
696 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
698 profile_probability::uninitialized ());
701 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
702 outof_target
, into_target
,
706 emit_jump_insn (targetm
.gen_jump (done_label
));
708 emit_label (subword_label
);
710 if (!expand_subword_shift (op1_mode
, binoptab
,
711 outof_input
, into_input
, op1
,
712 outof_target
, into_target
,
713 unsignedp
, methods
, shift_mask
))
716 emit_label (done_label
);
720 /* Subroutine of expand_binop. Perform a double word multiplication of
721 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
722 as the target's word_mode. This function return NULL_RTX if anything
723 goes wrong, in which case it may have already emitted instructions
724 which need to be deleted.
726 If we want to multiply two two-word values and have normal and widening
727 multiplies of single-word values, we can do this with three smaller
730 The multiplication proceeds as follows:
731 _______________________
732 [__op0_high_|__op0_low__]
733 _______________________
734 * [__op1_high_|__op1_low__]
735 _______________________________________________
736 _______________________
737 (1) [__op0_low__*__op1_low__]
738 _______________________
739 (2a) [__op0_low__*__op1_high_]
740 _______________________
741 (2b) [__op0_high_*__op1_low__]
742 _______________________
743 (3) [__op0_high_*__op1_high_]
746 This gives a 4-word result. Since we are only interested in the
747 lower 2 words, partial result (3) and the upper words of (2a) and
748 (2b) don't need to be calculated. Hence (2a) and (2b) can be
749 calculated using non-widening multiplication.
751 (1), however, needs to be calculated with an unsigned widening
752 multiplication. If this operation is not directly supported we
753 try using a signed widening multiplication and adjust the result.
754 This adjustment works as follows:
756 If both operands are positive then no adjustment is needed.
758 If the operands have different signs, for example op0_low < 0 and
759 op1_low >= 0, the instruction treats the most significant bit of
760 op0_low as a sign bit instead of a bit with significance
761 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
762 with 2**BITS_PER_WORD - op0_low, and two's complements the
763 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
766 Similarly, if both operands are negative, we need to add
767 (op0_low + op1_low) * 2**BITS_PER_WORD.
769 We use a trick to adjust quickly. We logically shift op0_low right
770 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
771 op0_high (op1_high) before it is used to calculate 2b (2a). If no
772 logical shift exists, we do an arithmetic right shift and subtract
776 expand_doubleword_mult (machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
777 bool umulp
, enum optab_methods methods
)
779 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
780 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
781 rtx wordm1
= umulp
? NULL_RTX
: GEN_INT (BITS_PER_WORD
- 1);
782 rtx product
, adjust
, product_high
, temp
;
784 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
785 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
786 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
787 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
789 /* If we're using an unsigned multiply to directly compute the product
790 of the low-order words of the operands and perform any required
791 adjustments of the operands, we begin by trying two more multiplications
792 and then computing the appropriate sum.
794 We have checked above that the required addition is provided.
795 Full-word addition will normally always succeed, especially if
796 it is provided at all, so we don't worry about its failure. The
797 multiplication may well fail, however, so we do handle that. */
801 /* ??? This could be done with emit_store_flag where available. */
802 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
803 NULL_RTX
, 1, methods
);
805 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
806 NULL_RTX
, 0, OPTAB_DIRECT
);
809 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
810 NULL_RTX
, 0, methods
);
813 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
814 NULL_RTX
, 0, OPTAB_DIRECT
);
821 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
822 NULL_RTX
, 0, OPTAB_DIRECT
);
826 /* OP0_HIGH should now be dead. */
830 /* ??? This could be done with emit_store_flag where available. */
831 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
832 NULL_RTX
, 1, methods
);
834 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
835 NULL_RTX
, 0, OPTAB_DIRECT
);
838 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
839 NULL_RTX
, 0, methods
);
842 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
843 NULL_RTX
, 0, OPTAB_DIRECT
);
850 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
851 NULL_RTX
, 0, OPTAB_DIRECT
);
855 /* OP1_HIGH should now be dead. */
857 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
858 NULL_RTX
, 0, OPTAB_DIRECT
);
860 if (target
&& !REG_P (target
))
864 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
865 target
, 1, OPTAB_DIRECT
);
867 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
868 target
, 1, OPTAB_DIRECT
);
873 product_high
= operand_subword (product
, high
, 1, mode
);
874 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
875 NULL_RTX
, 0, OPTAB_DIRECT
);
876 emit_move_insn (product_high
, adjust
);
880 /* Wrapper around expand_binop which takes an rtx code to specify
881 the operation to perform, not an optab pointer. All other
882 arguments are the same. */
884 expand_simple_binop (machine_mode mode
, enum rtx_code code
, rtx op0
,
885 rtx op1
, rtx target
, int unsignedp
,
886 enum optab_methods methods
)
888 optab binop
= code_to_optab (code
);
891 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
894 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
895 binop. Order them according to commutative_operand_precedence and, if
896 possible, try to put TARGET or a pseudo first. */
898 swap_commutative_operands_with_target (rtx target
, rtx op0
, rtx op1
)
900 int op0_prec
= commutative_operand_precedence (op0
);
901 int op1_prec
= commutative_operand_precedence (op1
);
903 if (op0_prec
< op1_prec
)
906 if (op0_prec
> op1_prec
)
909 /* With equal precedence, both orders are ok, but it is better if the
910 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
911 if (target
== 0 || REG_P (target
))
912 return (REG_P (op1
) && !REG_P (op0
)) || target
== op1
;
914 return rtx_equal_p (op1
, target
);
917 /* Return true if BINOPTAB implements a shift operation. */
920 shift_optab_p (optab binoptab
)
922 switch (optab_to_code (binoptab
))
938 /* Return true if BINOPTAB implements a commutative binary operation. */
941 commutative_optab_p (optab binoptab
)
943 return (GET_RTX_CLASS (optab_to_code (binoptab
)) == RTX_COMM_ARITH
944 || binoptab
== smul_widen_optab
945 || binoptab
== umul_widen_optab
946 || binoptab
== smul_highpart_optab
947 || binoptab
== umul_highpart_optab
);
950 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
951 optimizing, and if the operand is a constant that costs more than
952 1 instruction, force the constant into a register and return that
953 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
956 avoid_expensive_constant (machine_mode mode
, optab binoptab
,
957 int opn
, rtx x
, bool unsignedp
)
959 bool speed
= optimize_insn_for_speed_p ();
964 && (rtx_cost (x
, mode
, optab_to_code (binoptab
), opn
, speed
)
965 > set_src_cost (x
, mode
, speed
)))
969 HOST_WIDE_INT intval
= trunc_int_for_mode (INTVAL (x
), mode
);
970 if (intval
!= INTVAL (x
))
971 x
= GEN_INT (intval
);
974 x
= convert_modes (mode
, VOIDmode
, x
, unsignedp
);
975 x
= force_reg (mode
, x
);
980 /* Helper function for expand_binop: handle the case where there
981 is an insn that directly implements the indicated operation.
982 Returns null if this is not possible. */
984 expand_binop_directly (machine_mode mode
, optab binoptab
,
986 rtx target
, int unsignedp
, enum optab_methods methods
,
989 machine_mode from_mode
= widened_mode (mode
, op0
, op1
);
990 enum insn_code icode
= find_widening_optab_handler (binoptab
, mode
,
992 machine_mode xmode0
= insn_data
[(int) icode
].operand
[1].mode
;
993 machine_mode xmode1
= insn_data
[(int) icode
].operand
[2].mode
;
994 machine_mode mode0
, mode1
, tmp_mode
;
995 struct expand_operand ops
[3];
998 rtx xop0
= op0
, xop1
= op1
;
999 bool canonicalize_op1
= false;
1001 /* If it is a commutative operator and the modes would match
1002 if we would swap the operands, we can save the conversions. */
1003 commutative_p
= commutative_optab_p (binoptab
);
1005 && GET_MODE (xop0
) != xmode0
&& GET_MODE (xop1
) != xmode1
1006 && GET_MODE (xop0
) == xmode1
&& GET_MODE (xop1
) == xmode1
)
1007 std::swap (xop0
, xop1
);
1009 /* If we are optimizing, force expensive constants into a register. */
1010 xop0
= avoid_expensive_constant (xmode0
, binoptab
, 0, xop0
, unsignedp
);
1011 if (!shift_optab_p (binoptab
))
1012 xop1
= avoid_expensive_constant (xmode1
, binoptab
, 1, xop1
, unsignedp
);
1014 /* Shifts and rotates often use a different mode for op1 from op0;
1015 for VOIDmode constants we don't know the mode, so force it
1016 to be canonicalized using convert_modes. */
1017 canonicalize_op1
= true;
1019 /* In case the insn wants input operands in modes different from
1020 those of the actual operands, convert the operands. It would
1021 seem that we don't need to convert CONST_INTs, but we do, so
1022 that they're properly zero-extended, sign-extended or truncated
1025 mode0
= GET_MODE (xop0
) != VOIDmode
? GET_MODE (xop0
) : mode
;
1026 if (xmode0
!= VOIDmode
&& xmode0
!= mode0
)
1028 xop0
= convert_modes (xmode0
, mode0
, xop0
, unsignedp
);
1032 mode1
= ((GET_MODE (xop1
) != VOIDmode
|| canonicalize_op1
)
1033 ? GET_MODE (xop1
) : mode
);
1034 if (xmode1
!= VOIDmode
&& xmode1
!= mode1
)
1036 xop1
= convert_modes (xmode1
, mode1
, xop1
, unsignedp
);
1040 /* If operation is commutative,
1041 try to make the first operand a register.
1042 Even better, try to make it the same as the target.
1043 Also try to make the last operand a constant. */
1045 && swap_commutative_operands_with_target (target
, xop0
, xop1
))
1046 std::swap (xop0
, xop1
);
1048 /* Now, if insn's predicates don't allow our operands, put them into
1051 if (binoptab
== vec_pack_trunc_optab
1052 || binoptab
== vec_pack_usat_optab
1053 || binoptab
== vec_pack_ssat_optab
1054 || binoptab
== vec_pack_ufix_trunc_optab
1055 || binoptab
== vec_pack_sfix_trunc_optab
)
1057 /* The mode of the result is different then the mode of the
1059 tmp_mode
= insn_data
[(int) icode
].operand
[0].mode
;
1060 if (VECTOR_MODE_P (mode
)
1061 && GET_MODE_NUNITS (tmp_mode
) != 2 * GET_MODE_NUNITS (mode
))
1063 delete_insns_since (last
);
1070 create_output_operand (&ops
[0], target
, tmp_mode
);
1071 create_input_operand (&ops
[1], xop0
, mode0
);
1072 create_input_operand (&ops
[2], xop1
, mode1
);
1073 pat
= maybe_gen_insn (icode
, 3, ops
);
1076 /* If PAT is composed of more than one insn, try to add an appropriate
1077 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1078 operand, call expand_binop again, this time without a target. */
1079 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1080 && ! add_equal_note (pat
, ops
[0].value
,
1081 optab_to_code (binoptab
),
1082 ops
[1].value
, ops
[2].value
))
1084 delete_insns_since (last
);
1085 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1086 unsignedp
, methods
);
1090 return ops
[0].value
;
1092 delete_insns_since (last
);
1096 /* Generate code to perform an operation specified by BINOPTAB
1097 on operands OP0 and OP1, with result having machine-mode MODE.
1099 UNSIGNEDP is for the case where we have to widen the operands
1100 to perform the operation. It says to use zero-extension.
1102 If TARGET is nonzero, the value
1103 is generated there, if it is convenient to do so.
1104 In all cases an rtx is returned for the locus of the value;
1105 this may or may not be TARGET. */
1108 expand_binop (machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1109 rtx target
, int unsignedp
, enum optab_methods methods
)
1111 enum optab_methods next_methods
1112 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1113 ? OPTAB_WIDEN
: methods
);
1114 enum mode_class mclass
;
1115 machine_mode wider_mode
;
1118 rtx_insn
*entry_last
= get_last_insn ();
1121 mclass
= GET_MODE_CLASS (mode
);
1123 /* If subtracting an integer constant, convert this into an addition of
1124 the negated constant. */
1126 if (binoptab
== sub_optab
&& CONST_INT_P (op1
))
1128 op1
= negate_rtx (mode
, op1
);
1129 binoptab
= add_optab
;
1131 /* For shifts, constant invalid op1 might be expanded from different
1132 mode than MODE. As those are invalid, force them to a register
1133 to avoid further problems during expansion. */
1134 else if (CONST_INT_P (op1
)
1135 && shift_optab_p (binoptab
)
1136 && UINTVAL (op1
) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode
)))
1138 op1
= gen_int_mode (INTVAL (op1
), GET_MODE_INNER (mode
));
1139 op1
= force_reg (GET_MODE_INNER (mode
), op1
);
1142 /* Record where to delete back to if we backtrack. */
1143 last
= get_last_insn ();
1145 /* If we can do it with a three-operand insn, do so. */
1147 if (methods
!= OPTAB_MUST_WIDEN
1148 && find_widening_optab_handler (binoptab
, mode
,
1149 widened_mode (mode
, op0
, op1
), 1)
1150 != CODE_FOR_nothing
)
1152 temp
= expand_binop_directly (mode
, binoptab
, op0
, op1
, target
,
1153 unsignedp
, methods
, last
);
1158 /* If we were trying to rotate, and that didn't work, try rotating
1159 the other direction before falling back to shifts and bitwise-or. */
1160 if (((binoptab
== rotl_optab
1161 && optab_handler (rotr_optab
, mode
) != CODE_FOR_nothing
)
1162 || (binoptab
== rotr_optab
1163 && optab_handler (rotl_optab
, mode
) != CODE_FOR_nothing
))
1164 && mclass
== MODE_INT
)
1166 optab otheroptab
= (binoptab
== rotl_optab
? rotr_optab
: rotl_optab
);
1168 unsigned int bits
= GET_MODE_PRECISION (mode
);
1170 if (CONST_INT_P (op1
))
1171 newop1
= GEN_INT (bits
- INTVAL (op1
));
1172 else if (targetm
.shift_truncation_mask (mode
) == bits
- 1)
1173 newop1
= negate_rtx (GET_MODE (op1
), op1
);
1175 newop1
= expand_binop (GET_MODE (op1
), sub_optab
,
1176 gen_int_mode (bits
, GET_MODE (op1
)), op1
,
1177 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1179 temp
= expand_binop_directly (mode
, otheroptab
, op0
, newop1
,
1180 target
, unsignedp
, methods
, last
);
1185 /* If this is a multiply, see if we can do a widening operation that
1186 takes operands of this mode and makes a wider mode. */
1188 if (binoptab
== smul_optab
1189 && GET_MODE_2XWIDER_MODE (mode
) != VOIDmode
1190 && (widening_optab_handler ((unsignedp
? umul_widen_optab
1191 : smul_widen_optab
),
1192 GET_MODE_2XWIDER_MODE (mode
), mode
)
1193 != CODE_FOR_nothing
))
1195 temp
= expand_binop (GET_MODE_2XWIDER_MODE (mode
),
1196 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1197 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1201 if (GET_MODE_CLASS (mode
) == MODE_INT
1202 && TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (temp
)))
1203 return gen_lowpart (mode
, temp
);
1205 return convert_to_mode (mode
, temp
, unsignedp
);
1209 /* If this is a vector shift by a scalar, see if we can do a vector
1210 shift by a vector. If so, broadcast the scalar into a vector. */
1211 if (mclass
== MODE_VECTOR_INT
)
1213 optab otheroptab
= unknown_optab
;
1215 if (binoptab
== ashl_optab
)
1216 otheroptab
= vashl_optab
;
1217 else if (binoptab
== ashr_optab
)
1218 otheroptab
= vashr_optab
;
1219 else if (binoptab
== lshr_optab
)
1220 otheroptab
= vlshr_optab
;
1221 else if (binoptab
== rotl_optab
)
1222 otheroptab
= vrotl_optab
;
1223 else if (binoptab
== rotr_optab
)
1224 otheroptab
= vrotr_optab
;
1226 if (otheroptab
&& optab_handler (otheroptab
, mode
) != CODE_FOR_nothing
)
1228 /* The scalar may have been extended to be too wide. Truncate
1229 it back to the proper size to fit in the broadcast vector. */
1230 machine_mode inner_mode
= GET_MODE_INNER (mode
);
1231 if (!CONST_INT_P (op1
)
1232 && (GET_MODE_BITSIZE (inner_mode
)
1233 < GET_MODE_BITSIZE (GET_MODE (op1
))))
1234 op1
= force_reg (inner_mode
,
1235 simplify_gen_unary (TRUNCATE
, inner_mode
, op1
,
1237 rtx vop1
= expand_vector_broadcast (mode
, op1
);
1240 temp
= expand_binop_directly (mode
, otheroptab
, op0
, vop1
,
1241 target
, unsignedp
, methods
, last
);
1248 /* Look for a wider mode of the same class for which we think we
1249 can open-code the operation. Check for a widening multiply at the
1250 wider mode as well. */
1252 if (CLASS_HAS_WIDER_MODES_P (mclass
)
1253 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1254 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1255 wider_mode
!= VOIDmode
;
1256 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1258 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
1259 || (binoptab
== smul_optab
1260 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
1261 && (find_widening_optab_handler ((unsignedp
1263 : smul_widen_optab
),
1264 GET_MODE_WIDER_MODE (wider_mode
),
1266 != CODE_FOR_nothing
)))
1268 rtx xop0
= op0
, xop1
= op1
;
1271 /* For certain integer operations, we need not actually extend
1272 the narrow operands, as long as we will truncate
1273 the results to the same narrowness. */
1275 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1276 || binoptab
== xor_optab
1277 || binoptab
== add_optab
|| binoptab
== sub_optab
1278 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1279 && mclass
== MODE_INT
)
1282 xop0
= avoid_expensive_constant (mode
, binoptab
, 0,
1284 if (binoptab
!= ashl_optab
)
1285 xop1
= avoid_expensive_constant (mode
, binoptab
, 1,
1289 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1291 /* The second operand of a shift must always be extended. */
1292 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1293 no_extend
&& binoptab
!= ashl_optab
);
1295 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1296 unsignedp
, OPTAB_DIRECT
);
1299 if (mclass
!= MODE_INT
1300 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
1303 target
= gen_reg_rtx (mode
);
1304 convert_move (target
, temp
, 0);
1308 return gen_lowpart (mode
, temp
);
1311 delete_insns_since (last
);
1315 /* If operation is commutative,
1316 try to make the first operand a register.
1317 Even better, try to make it the same as the target.
1318 Also try to make the last operand a constant. */
1319 if (commutative_optab_p (binoptab
)
1320 && swap_commutative_operands_with_target (target
, op0
, op1
))
1321 std::swap (op0
, op1
);
1323 /* These can be done a word at a time. */
1324 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1325 && mclass
== MODE_INT
1326 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1327 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1332 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1333 won't be accurate, so use a new target. */
1337 || !valid_multiword_target_p (target
))
1338 target
= gen_reg_rtx (mode
);
1342 /* Do the actual arithmetic. */
1343 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1345 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1346 rtx x
= expand_binop (word_mode
, binoptab
,
1347 operand_subword_force (op0
, i
, mode
),
1348 operand_subword_force (op1
, i
, mode
),
1349 target_piece
, unsignedp
, next_methods
);
1354 if (target_piece
!= x
)
1355 emit_move_insn (target_piece
, x
);
1358 insns
= get_insns ();
1361 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1368 /* Synthesize double word shifts from single word shifts. */
1369 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1370 || binoptab
== ashr_optab
)
1371 && mclass
== MODE_INT
1372 && (CONST_INT_P (op1
) || optimize_insn_for_speed_p ())
1373 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1374 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
)
1375 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
1376 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1377 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1379 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1380 machine_mode op1_mode
;
1382 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1383 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1384 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1386 /* Apply the truncation to constant shifts. */
1387 if (double_shift_mask
> 0 && CONST_INT_P (op1
))
1388 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1390 if (op1
== CONST0_RTX (op1_mode
))
1393 /* Make sure that this is a combination that expand_doubleword_shift
1394 can handle. See the comments there for details. */
1395 if (double_shift_mask
== 0
1396 || (shift_mask
== BITS_PER_WORD
- 1
1397 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1400 rtx into_target
, outof_target
;
1401 rtx into_input
, outof_input
;
1402 int left_shift
, outof_word
;
1404 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1405 won't be accurate, so use a new target. */
1409 || !valid_multiword_target_p (target
))
1410 target
= gen_reg_rtx (mode
);
1414 /* OUTOF_* is the word we are shifting bits away from, and
1415 INTO_* is the word that we are shifting bits towards, thus
1416 they differ depending on the direction of the shift and
1417 WORDS_BIG_ENDIAN. */
1419 left_shift
= binoptab
== ashl_optab
;
1420 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1422 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1423 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1425 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1426 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1428 if (expand_doubleword_shift (op1_mode
, binoptab
,
1429 outof_input
, into_input
, op1
,
1430 outof_target
, into_target
,
1431 unsignedp
, next_methods
, shift_mask
))
1433 insns
= get_insns ();
1443 /* Synthesize double word rotates from single word shifts. */
1444 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1445 && mclass
== MODE_INT
1446 && CONST_INT_P (op1
)
1447 && GET_MODE_PRECISION (mode
) == 2 * BITS_PER_WORD
1448 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1449 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1452 rtx into_target
, outof_target
;
1453 rtx into_input
, outof_input
;
1455 int shift_count
, left_shift
, outof_word
;
1457 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1458 won't be accurate, so use a new target. Do this also if target is not
1459 a REG, first because having a register instead may open optimization
1460 opportunities, and second because if target and op0 happen to be MEMs
1461 designating the same location, we would risk clobbering it too early
1462 in the code sequence we generate below. */
1467 || !valid_multiword_target_p (target
))
1468 target
= gen_reg_rtx (mode
);
1472 shift_count
= INTVAL (op1
);
1474 /* OUTOF_* is the word we are shifting bits away from, and
1475 INTO_* is the word that we are shifting bits towards, thus
1476 they differ depending on the direction of the shift and
1477 WORDS_BIG_ENDIAN. */
1479 left_shift
= (binoptab
== rotl_optab
);
1480 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1482 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1483 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1485 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1486 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1488 if (shift_count
== BITS_PER_WORD
)
1490 /* This is just a word swap. */
1491 emit_move_insn (outof_target
, into_input
);
1492 emit_move_insn (into_target
, outof_input
);
1497 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1498 rtx first_shift_count
, second_shift_count
;
1499 optab reverse_unsigned_shift
, unsigned_shift
;
1501 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1502 ? lshr_optab
: ashl_optab
);
1504 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1505 ? ashl_optab
: lshr_optab
);
1507 if (shift_count
> BITS_PER_WORD
)
1509 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1510 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1514 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1515 second_shift_count
= GEN_INT (shift_count
);
1518 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1519 outof_input
, first_shift_count
,
1520 NULL_RTX
, unsignedp
, next_methods
);
1521 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1522 into_input
, second_shift_count
,
1523 NULL_RTX
, unsignedp
, next_methods
);
1525 if (into_temp1
!= 0 && into_temp2
!= 0)
1526 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1527 into_target
, unsignedp
, next_methods
);
1531 if (inter
!= 0 && inter
!= into_target
)
1532 emit_move_insn (into_target
, inter
);
1534 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1535 into_input
, first_shift_count
,
1536 NULL_RTX
, unsignedp
, next_methods
);
1537 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1538 outof_input
, second_shift_count
,
1539 NULL_RTX
, unsignedp
, next_methods
);
1541 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1542 inter
= expand_binop (word_mode
, ior_optab
,
1543 outof_temp1
, outof_temp2
,
1544 outof_target
, unsignedp
, next_methods
);
1546 if (inter
!= 0 && inter
!= outof_target
)
1547 emit_move_insn (outof_target
, inter
);
1550 insns
= get_insns ();
1560 /* These can be done a word at a time by propagating carries. */
1561 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1562 && mclass
== MODE_INT
1563 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1564 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1567 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1568 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1569 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1570 rtx xop0
, xop1
, xtarget
;
1572 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1573 value is one of those, use it. Otherwise, use 1 since it is the
1574 one easiest to get. */
1575 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1576 int normalizep
= STORE_FLAG_VALUE
;
1581 /* Prepare the operands. */
1582 xop0
= force_reg (mode
, op0
);
1583 xop1
= force_reg (mode
, op1
);
1585 xtarget
= gen_reg_rtx (mode
);
1587 if (target
== 0 || !REG_P (target
) || !valid_multiword_target_p (target
))
1590 /* Indicate for flow that the entire target reg is being set. */
1592 emit_clobber (xtarget
);
1594 /* Do the actual arithmetic. */
1595 for (i
= 0; i
< nwords
; i
++)
1597 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1598 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1599 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1600 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1603 /* Main add/subtract of the input operands. */
1604 x
= expand_binop (word_mode
, binoptab
,
1605 op0_piece
, op1_piece
,
1606 target_piece
, unsignedp
, next_methods
);
1612 /* Store carry from main add/subtract. */
1613 carry_out
= gen_reg_rtx (word_mode
);
1614 carry_out
= emit_store_flag_force (carry_out
,
1615 (binoptab
== add_optab
1618 word_mode
, 1, normalizep
);
1625 /* Add/subtract previous carry to main result. */
1626 newx
= expand_binop (word_mode
,
1627 normalizep
== 1 ? binoptab
: otheroptab
,
1629 NULL_RTX
, 1, next_methods
);
1633 /* Get out carry from adding/subtracting carry in. */
1634 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1635 carry_tmp
= emit_store_flag_force (carry_tmp
,
1636 (binoptab
== add_optab
1639 word_mode
, 1, normalizep
);
1641 /* Logical-ior the two poss. carry together. */
1642 carry_out
= expand_binop (word_mode
, ior_optab
,
1643 carry_out
, carry_tmp
,
1644 carry_out
, 0, next_methods
);
1648 emit_move_insn (target_piece
, newx
);
1652 if (x
!= target_piece
)
1653 emit_move_insn (target_piece
, x
);
1656 carry_in
= carry_out
;
1659 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1661 if (optab_handler (mov_optab
, mode
) != CODE_FOR_nothing
1662 || ! rtx_equal_p (target
, xtarget
))
1664 rtx_insn
*temp
= emit_move_insn (target
, xtarget
);
1666 set_dst_reg_note (temp
, REG_EQUAL
,
1667 gen_rtx_fmt_ee (optab_to_code (binoptab
),
1668 mode
, copy_rtx (xop0
),
1679 delete_insns_since (last
);
1682 /* Attempt to synthesize double word multiplies using a sequence of word
1683 mode multiplications. We first attempt to generate a sequence using a
1684 more efficient unsigned widening multiply, and if that fails we then
1685 try using a signed widening multiply. */
1687 if (binoptab
== smul_optab
1688 && mclass
== MODE_INT
1689 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1690 && optab_handler (smul_optab
, word_mode
) != CODE_FOR_nothing
1691 && optab_handler (add_optab
, word_mode
) != CODE_FOR_nothing
)
1693 rtx product
= NULL_RTX
;
1694 if (widening_optab_handler (umul_widen_optab
, mode
, word_mode
)
1695 != CODE_FOR_nothing
)
1697 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1700 delete_insns_since (last
);
1703 if (product
== NULL_RTX
1704 && widening_optab_handler (smul_widen_optab
, mode
, word_mode
)
1705 != CODE_FOR_nothing
)
1707 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1710 delete_insns_since (last
);
1713 if (product
!= NULL_RTX
)
1715 if (optab_handler (mov_optab
, mode
) != CODE_FOR_nothing
)
1717 rtx_insn
*move
= emit_move_insn (target
? target
: product
,
1719 set_dst_reg_note (move
,
1721 gen_rtx_fmt_ee (MULT
, mode
,
1724 target
? target
: product
);
1730 /* It can't be open-coded in this mode.
1731 Use a library call if one is available and caller says that's ok. */
1733 libfunc
= optab_libfunc (binoptab
, mode
);
1735 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1739 machine_mode op1_mode
= mode
;
1744 if (shift_optab_p (binoptab
))
1746 op1_mode
= targetm
.libgcc_shift_count_mode ();
1747 /* Specify unsigned here,
1748 since negative shift counts are meaningless. */
1749 op1x
= convert_to_mode (op1_mode
, op1
, 1);
1752 if (GET_MODE (op0
) != VOIDmode
1753 && GET_MODE (op0
) != mode
)
1754 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1756 /* Pass 1 for NO_QUEUE so we don't lose any increments
1757 if the libcall is cse'd or moved. */
1758 value
= emit_library_call_value (libfunc
,
1759 NULL_RTX
, LCT_CONST
, mode
, 2,
1760 op0
, mode
, op1x
, op1_mode
);
1762 insns
= get_insns ();
1765 bool trapv
= trapv_binoptab_p (binoptab
);
1766 target
= gen_reg_rtx (mode
);
1767 emit_libcall_block_1 (insns
, target
, value
,
1769 : gen_rtx_fmt_ee (optab_to_code (binoptab
),
1770 mode
, op0
, op1
), trapv
);
1775 delete_insns_since (last
);
1777 /* It can't be done in this mode. Can we do it in a wider mode? */
1779 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1780 || methods
== OPTAB_MUST_WIDEN
))
1782 /* Caller says, don't even try. */
1783 delete_insns_since (entry_last
);
1787 /* Compute the value of METHODS to pass to recursive calls.
1788 Don't allow widening to be tried recursively. */
1790 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1792 /* Look for a wider mode of the same class for which it appears we can do
1795 if (CLASS_HAS_WIDER_MODES_P (mclass
))
1797 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1798 wider_mode
!= VOIDmode
;
1799 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1801 if (find_widening_optab_handler (binoptab
, wider_mode
, mode
, 1)
1803 || (methods
== OPTAB_LIB
1804 && optab_libfunc (binoptab
, wider_mode
)))
1806 rtx xop0
= op0
, xop1
= op1
;
1809 /* For certain integer operations, we need not actually extend
1810 the narrow operands, as long as we will truncate
1811 the results to the same narrowness. */
1813 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1814 || binoptab
== xor_optab
1815 || binoptab
== add_optab
|| binoptab
== sub_optab
1816 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1817 && mclass
== MODE_INT
)
1820 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1821 unsignedp
, no_extend
);
1823 /* The second operand of a shift must always be extended. */
1824 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1825 no_extend
&& binoptab
!= ashl_optab
);
1827 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1828 unsignedp
, methods
);
1831 if (mclass
!= MODE_INT
1832 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
1835 target
= gen_reg_rtx (mode
);
1836 convert_move (target
, temp
, 0);
1840 return gen_lowpart (mode
, temp
);
1843 delete_insns_since (last
);
1848 delete_insns_since (entry_last
);
1852 /* Expand a binary operator which has both signed and unsigned forms.
1853 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1856 If we widen unsigned operands, we may use a signed wider operation instead
1857 of an unsigned wider operation, since the result would be the same. */
1860 sign_expand_binop (machine_mode mode
, optab uoptab
, optab soptab
,
1861 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
1862 enum optab_methods methods
)
1865 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1868 /* Do it without widening, if possible. */
1869 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1870 unsignedp
, OPTAB_DIRECT
);
1871 if (temp
|| methods
== OPTAB_DIRECT
)
1874 /* Try widening to a signed int. Disable any direct use of any
1875 signed insn in the current mode. */
1876 save_enable
= swap_optab_enable (soptab
, mode
, false);
1878 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
1879 unsignedp
, OPTAB_WIDEN
);
1881 /* For unsigned operands, try widening to an unsigned int. */
1882 if (!temp
&& unsignedp
)
1883 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1884 unsignedp
, OPTAB_WIDEN
);
1885 if (temp
|| methods
== OPTAB_WIDEN
)
1888 /* Use the right width libcall if that exists. */
1889 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1890 unsignedp
, OPTAB_LIB
);
1891 if (temp
|| methods
== OPTAB_LIB
)
1894 /* Must widen and use a libcall, use either signed or unsigned. */
1895 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
1896 unsignedp
, methods
);
1897 if (!temp
&& unsignedp
)
1898 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1899 unsignedp
, methods
);
1902 /* Undo the fiddling above. */
1904 swap_optab_enable (soptab
, mode
, true);
1908 /* Generate code to perform an operation specified by UNOPPTAB
1909 on operand OP0, with two results to TARG0 and TARG1.
1910 We assume that the order of the operands for the instruction
1911 is TARG0, TARG1, OP0.
1913 Either TARG0 or TARG1 may be zero, but what that means is that
1914 the result is not actually wanted. We will generate it into
1915 a dummy pseudo-reg and discard it. They may not both be zero.
1917 Returns 1 if this operation can be performed; 0 if not. */
1920 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
1923 machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1924 enum mode_class mclass
;
1925 machine_mode wider_mode
;
1926 rtx_insn
*entry_last
= get_last_insn ();
1929 mclass
= GET_MODE_CLASS (mode
);
1932 targ0
= gen_reg_rtx (mode
);
1934 targ1
= gen_reg_rtx (mode
);
1936 /* Record where to go back to if we fail. */
1937 last
= get_last_insn ();
1939 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
1941 struct expand_operand ops
[3];
1942 enum insn_code icode
= optab_handler (unoptab
, mode
);
1944 create_fixed_operand (&ops
[0], targ0
);
1945 create_fixed_operand (&ops
[1], targ1
);
1946 create_convert_operand_from (&ops
[2], op0
, mode
, unsignedp
);
1947 if (maybe_expand_insn (icode
, 3, ops
))
1951 /* It can't be done in this mode. Can we do it in a wider mode? */
1953 if (CLASS_HAS_WIDER_MODES_P (mclass
))
1955 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1956 wider_mode
!= VOIDmode
;
1957 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1959 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
1961 rtx t0
= gen_reg_rtx (wider_mode
);
1962 rtx t1
= gen_reg_rtx (wider_mode
);
1963 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1965 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
1967 convert_move (targ0
, t0
, unsignedp
);
1968 convert_move (targ1
, t1
, unsignedp
);
1972 delete_insns_since (last
);
1977 delete_insns_since (entry_last
);
1981 /* Generate code to perform an operation specified by BINOPTAB
1982 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1983 We assume that the order of the operands for the instruction
1984 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1985 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1987 Either TARG0 or TARG1 may be zero, but what that means is that
1988 the result is not actually wanted. We will generate it into
1989 a dummy pseudo-reg and discard it. They may not both be zero.
1991 Returns 1 if this operation can be performed; 0 if not. */
1994 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
1997 machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1998 enum mode_class mclass
;
1999 machine_mode wider_mode
;
2000 rtx_insn
*entry_last
= get_last_insn ();
2003 mclass
= GET_MODE_CLASS (mode
);
2006 targ0
= gen_reg_rtx (mode
);
2008 targ1
= gen_reg_rtx (mode
);
2010 /* Record where to go back to if we fail. */
2011 last
= get_last_insn ();
2013 if (optab_handler (binoptab
, mode
) != CODE_FOR_nothing
)
2015 struct expand_operand ops
[4];
2016 enum insn_code icode
= optab_handler (binoptab
, mode
);
2017 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2018 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2019 rtx xop0
= op0
, xop1
= op1
;
2021 /* If we are optimizing, force expensive constants into a register. */
2022 xop0
= avoid_expensive_constant (mode0
, binoptab
, 0, xop0
, unsignedp
);
2023 xop1
= avoid_expensive_constant (mode1
, binoptab
, 1, xop1
, unsignedp
);
2025 create_fixed_operand (&ops
[0], targ0
);
2026 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
2027 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
2028 create_fixed_operand (&ops
[3], targ1
);
2029 if (maybe_expand_insn (icode
, 4, ops
))
2031 delete_insns_since (last
);
2034 /* It can't be done in this mode. Can we do it in a wider mode? */
2036 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2038 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2039 wider_mode
!= VOIDmode
;
2040 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2042 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
)
2044 rtx t0
= gen_reg_rtx (wider_mode
);
2045 rtx t1
= gen_reg_rtx (wider_mode
);
2046 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2047 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2049 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2052 convert_move (targ0
, t0
, unsignedp
);
2053 convert_move (targ1
, t1
, unsignedp
);
2057 delete_insns_since (last
);
2062 delete_insns_since (entry_last
);
2066 /* Expand the two-valued library call indicated by BINOPTAB, but
2067 preserve only one of the values. If TARG0 is non-NULL, the first
2068 value is placed into TARG0; otherwise the second value is placed
2069 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2070 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2071 This routine assumes that the value returned by the library call is
2072 as if the return value was of an integral mode twice as wide as the
2073 mode of OP0. Returns 1 if the call was successful. */
2076 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2077 rtx targ0
, rtx targ1
, enum rtx_code code
)
2080 machine_mode libval_mode
;
2085 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2086 gcc_assert (!targ0
!= !targ1
);
2088 mode
= GET_MODE (op0
);
2089 libfunc
= optab_libfunc (binoptab
, mode
);
2093 /* The value returned by the library function will have twice as
2094 many bits as the nominal MODE. */
2095 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2098 libval
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
2102 /* Get the part of VAL containing the value that we want. */
2103 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2104 targ0
? 0 : GET_MODE_SIZE (mode
));
2105 insns
= get_insns ();
2107 /* Move the into the desired location. */
2108 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2109 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2115 /* Wrapper around expand_unop which takes an rtx code to specify
2116 the operation to perform, not an optab pointer. All other
2117 arguments are the same. */
2119 expand_simple_unop (machine_mode mode
, enum rtx_code code
, rtx op0
,
2120 rtx target
, int unsignedp
)
2122 optab unop
= code_to_optab (code
);
2125 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2131 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2133 A similar operation can be used for clrsb. UNOPTAB says which operation
2134 we are trying to expand. */
2136 widen_leading (machine_mode mode
, rtx op0
, rtx target
, optab unoptab
)
2138 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2139 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2141 machine_mode wider_mode
;
2142 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2143 wider_mode
!= VOIDmode
;
2144 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2146 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2151 last
= get_last_insn ();
2154 target
= gen_reg_rtx (mode
);
2155 xop0
= widen_operand (op0
, wider_mode
, mode
,
2156 unoptab
!= clrsb_optab
, false);
2157 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2158 unoptab
!= clrsb_optab
);
2161 (wider_mode
, sub_optab
, temp
,
2162 gen_int_mode (GET_MODE_PRECISION (wider_mode
)
2163 - GET_MODE_PRECISION (mode
),
2165 target
, true, OPTAB_DIRECT
);
2167 delete_insns_since (last
);
2176 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2177 quantities, choosing which based on whether the high word is nonzero. */
2179 expand_doubleword_clz (machine_mode mode
, rtx op0
, rtx target
)
2181 rtx xop0
= force_reg (mode
, op0
);
2182 rtx subhi
= gen_highpart (word_mode
, xop0
);
2183 rtx sublo
= gen_lowpart (word_mode
, xop0
);
2184 rtx_code_label
*hi0_label
= gen_label_rtx ();
2185 rtx_code_label
*after_label
= gen_label_rtx ();
2189 /* If we were not given a target, use a word_mode register, not a
2190 'mode' register. The result will fit, and nobody is expecting
2191 anything bigger (the return type of __builtin_clz* is int). */
2193 target
= gen_reg_rtx (word_mode
);
2195 /* In any case, write to a word_mode scratch in both branches of the
2196 conditional, so we can ensure there is a single move insn setting
2197 'target' to tag a REG_EQUAL note on. */
2198 result
= gen_reg_rtx (word_mode
);
2202 /* If the high word is not equal to zero,
2203 then clz of the full value is clz of the high word. */
2204 emit_cmp_and_jump_insns (subhi
, CONST0_RTX (word_mode
), EQ
, 0,
2205 word_mode
, true, hi0_label
);
2207 temp
= expand_unop_direct (word_mode
, clz_optab
, subhi
, result
, true);
2212 convert_move (result
, temp
, true);
2214 emit_jump_insn (targetm
.gen_jump (after_label
));
2217 /* Else clz of the full value is clz of the low word plus the number
2218 of bits in the high word. */
2219 emit_label (hi0_label
);
2221 temp
= expand_unop_direct (word_mode
, clz_optab
, sublo
, 0, true);
2224 temp
= expand_binop (word_mode
, add_optab
, temp
,
2225 gen_int_mode (GET_MODE_BITSIZE (word_mode
), word_mode
),
2226 result
, true, OPTAB_DIRECT
);
2230 convert_move (result
, temp
, true);
2232 emit_label (after_label
);
2233 convert_move (target
, result
, true);
2238 add_equal_note (seq
, target
, CLZ
, xop0
, 0);
2247 /* Try calculating popcount of a double-word quantity as two popcount's of
2248 word-sized quantities and summing up the results. */
2250 expand_doubleword_popcount (machine_mode mode
, rtx op0
, rtx target
)
2257 t0
= expand_unop_direct (word_mode
, popcount_optab
,
2258 operand_subword_force (op0
, 0, mode
), NULL_RTX
,
2260 t1
= expand_unop_direct (word_mode
, popcount_optab
,
2261 operand_subword_force (op0
, 1, mode
), NULL_RTX
,
2269 /* If we were not given a target, use a word_mode register, not a
2270 'mode' register. The result will fit, and nobody is expecting
2271 anything bigger (the return type of __builtin_popcount* is int). */
2273 target
= gen_reg_rtx (word_mode
);
2275 t
= expand_binop (word_mode
, add_optab
, t0
, t1
, target
, 0, OPTAB_DIRECT
);
2280 add_equal_note (seq
, t
, POPCOUNT
, op0
, 0);
2288 (parity:narrow (low (x) ^ high (x))) */
2290 expand_doubleword_parity (machine_mode mode
, rtx op0
, rtx target
)
2292 rtx t
= expand_binop (word_mode
, xor_optab
,
2293 operand_subword_force (op0
, 0, mode
),
2294 operand_subword_force (op0
, 1, mode
),
2295 NULL_RTX
, 0, OPTAB_DIRECT
);
2296 return expand_unop (word_mode
, parity_optab
, t
, target
, true);
2302 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2304 widen_bswap (machine_mode mode
, rtx op0
, rtx target
)
2306 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2307 machine_mode wider_mode
;
2311 if (!CLASS_HAS_WIDER_MODES_P (mclass
))
2314 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2315 wider_mode
!= VOIDmode
;
2316 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2317 if (optab_handler (bswap_optab
, wider_mode
) != CODE_FOR_nothing
)
2322 last
= get_last_insn ();
2324 x
= widen_operand (op0
, wider_mode
, mode
, true, true);
2325 x
= expand_unop (wider_mode
, bswap_optab
, x
, NULL_RTX
, true);
2327 gcc_assert (GET_MODE_PRECISION (wider_mode
) == GET_MODE_BITSIZE (wider_mode
)
2328 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
));
2330 x
= expand_shift (RSHIFT_EXPR
, wider_mode
, x
,
2331 GET_MODE_BITSIZE (wider_mode
)
2332 - GET_MODE_BITSIZE (mode
),
2338 target
= gen_reg_rtx (mode
);
2339 emit_move_insn (target
, gen_lowpart (mode
, x
));
2342 delete_insns_since (last
);
2347 /* Try calculating bswap as two bswaps of two word-sized operands. */
2350 expand_doubleword_bswap (machine_mode mode
, rtx op
, rtx target
)
2354 t1
= expand_unop (word_mode
, bswap_optab
,
2355 operand_subword_force (op
, 0, mode
), NULL_RTX
, true);
2356 t0
= expand_unop (word_mode
, bswap_optab
,
2357 operand_subword_force (op
, 1, mode
), NULL_RTX
, true);
2359 if (target
== 0 || !valid_multiword_target_p (target
))
2360 target
= gen_reg_rtx (mode
);
2362 emit_clobber (target
);
2363 emit_move_insn (operand_subword (target
, 0, 1, mode
), t0
);
2364 emit_move_insn (operand_subword (target
, 1, 1, mode
), t1
);
2369 /* Try calculating (parity x) as (and (popcount x) 1), where
2370 popcount can also be done in a wider mode. */
2372 expand_parity (machine_mode mode
, rtx op0
, rtx target
)
2374 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2375 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2377 machine_mode wider_mode
;
2378 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2379 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2381 if (optab_handler (popcount_optab
, wider_mode
) != CODE_FOR_nothing
)
2386 last
= get_last_insn ();
2388 if (target
== 0 || GET_MODE (target
) != wider_mode
)
2389 target
= gen_reg_rtx (wider_mode
);
2391 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2392 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2395 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2396 target
, true, OPTAB_DIRECT
);
2400 if (mclass
!= MODE_INT
2401 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2402 return convert_to_mode (mode
, temp
, 0);
2404 return gen_lowpart (mode
, temp
);
2407 delete_insns_since (last
);
2414 /* Try calculating ctz(x) as K - clz(x & -x) ,
2415 where K is GET_MODE_PRECISION(mode) - 1.
2417 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2418 don't have to worry about what the hardware does in that case. (If
2419 the clz instruction produces the usual value at 0, which is K, the
2420 result of this code sequence will be -1; expand_ffs, below, relies
2421 on this. It might be nice to have it be K instead, for consistency
2422 with the (very few) processors that provide a ctz with a defined
2423 value, but that would take one more instruction, and it would be
2424 less convenient for expand_ffs anyway. */
2427 expand_ctz (machine_mode mode
, rtx op0
, rtx target
)
2432 if (optab_handler (clz_optab
, mode
) == CODE_FOR_nothing
)
2437 temp
= expand_unop_direct (mode
, neg_optab
, op0
, NULL_RTX
, true);
2439 temp
= expand_binop (mode
, and_optab
, op0
, temp
, NULL_RTX
,
2440 true, OPTAB_DIRECT
);
2442 temp
= expand_unop_direct (mode
, clz_optab
, temp
, NULL_RTX
, true);
2444 temp
= expand_binop (mode
, sub_optab
,
2445 gen_int_mode (GET_MODE_PRECISION (mode
) - 1, mode
),
2447 true, OPTAB_DIRECT
);
2457 add_equal_note (seq
, temp
, CTZ
, op0
, 0);
2463 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2464 else with the sequence used by expand_clz.
2466 The ffs builtin promises to return zero for a zero value and ctz/clz
2467 may have an undefined value in that case. If they do not give us a
2468 convenient value, we have to generate a test and branch. */
2470 expand_ffs (machine_mode mode
, rtx op0
, rtx target
)
2472 HOST_WIDE_INT val
= 0;
2473 bool defined_at_zero
= false;
2477 if (optab_handler (ctz_optab
, mode
) != CODE_FOR_nothing
)
2481 temp
= expand_unop_direct (mode
, ctz_optab
, op0
, 0, true);
2485 defined_at_zero
= (CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2);
2487 else if (optab_handler (clz_optab
, mode
) != CODE_FOR_nothing
)
2490 temp
= expand_ctz (mode
, op0
, 0);
2494 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2)
2496 defined_at_zero
= true;
2497 val
= (GET_MODE_PRECISION (mode
) - 1) - val
;
2503 if (defined_at_zero
&& val
== -1)
2504 /* No correction needed at zero. */;
2507 /* We don't try to do anything clever with the situation found
2508 on some processors (eg Alpha) where ctz(0:mode) ==
2509 bitsize(mode). If someone can think of a way to send N to -1
2510 and leave alone all values in the range 0..N-1 (where N is a
2511 power of two), cheaper than this test-and-branch, please add it.
2513 The test-and-branch is done after the operation itself, in case
2514 the operation sets condition codes that can be recycled for this.
2515 (This is true on i386, for instance.) */
2517 rtx_code_label
*nonzero_label
= gen_label_rtx ();
2518 emit_cmp_and_jump_insns (op0
, CONST0_RTX (mode
), NE
, 0,
2519 mode
, true, nonzero_label
);
2521 convert_move (temp
, GEN_INT (-1), false);
2522 emit_label (nonzero_label
);
2525 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2526 to produce a value in the range 0..bitsize. */
2527 temp
= expand_binop (mode
, add_optab
, temp
, gen_int_mode (1, mode
),
2528 target
, false, OPTAB_DIRECT
);
2535 add_equal_note (seq
, temp
, FFS
, op0
, 0);
2544 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2545 conditions, VAL may already be a SUBREG against which we cannot generate
2546 a further SUBREG. In this case, we expect forcing the value into a
2547 register will work around the situation. */
2550 lowpart_subreg_maybe_copy (machine_mode omode
, rtx val
,
2554 ret
= lowpart_subreg (omode
, val
, imode
);
2557 val
= force_reg (imode
, val
);
2558 ret
= lowpart_subreg (omode
, val
, imode
);
2559 gcc_assert (ret
!= NULL
);
2564 /* Expand a floating point absolute value or negation operation via a
2565 logical operation on the sign bit. */
2568 expand_absneg_bit (enum rtx_code code
, machine_mode mode
,
2569 rtx op0
, rtx target
)
2571 const struct real_format
*fmt
;
2572 int bitpos
, word
, nwords
, i
;
2577 /* The format has to have a simple sign bit. */
2578 fmt
= REAL_MODE_FORMAT (mode
);
2582 bitpos
= fmt
->signbit_rw
;
2586 /* Don't create negative zeros if the format doesn't support them. */
2587 if (code
== NEG
&& !fmt
->has_signed_zero
)
2590 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2592 imode
= int_mode_for_mode (mode
);
2593 if (imode
== BLKmode
)
2602 if (FLOAT_WORDS_BIG_ENDIAN
)
2603 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2605 word
= bitpos
/ BITS_PER_WORD
;
2606 bitpos
= bitpos
% BITS_PER_WORD
;
2607 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2610 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
2616 || (nwords
> 1 && !valid_multiword_target_p (target
)))
2617 target
= gen_reg_rtx (mode
);
2623 for (i
= 0; i
< nwords
; ++i
)
2625 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2626 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2630 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2632 immed_wide_int_const (mask
, imode
),
2633 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2634 if (temp
!= targ_piece
)
2635 emit_move_insn (targ_piece
, temp
);
2638 emit_move_insn (targ_piece
, op0_piece
);
2641 insns
= get_insns ();
2648 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2649 gen_lowpart (imode
, op0
),
2650 immed_wide_int_const (mask
, imode
),
2651 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2652 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2654 set_dst_reg_note (get_last_insn (), REG_EQUAL
,
2655 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)),
2662 /* As expand_unop, but will fail rather than attempt the operation in a
2663 different mode or with a libcall. */
2665 expand_unop_direct (machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2668 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
2670 struct expand_operand ops
[2];
2671 enum insn_code icode
= optab_handler (unoptab
, mode
);
2672 rtx_insn
*last
= get_last_insn ();
2675 create_output_operand (&ops
[0], target
, mode
);
2676 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
2677 pat
= maybe_gen_insn (icode
, 2, ops
);
2680 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2681 && ! add_equal_note (pat
, ops
[0].value
,
2682 optab_to_code (unoptab
),
2683 ops
[1].value
, NULL_RTX
))
2685 delete_insns_since (last
);
2686 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2691 return ops
[0].value
;
2697 /* Generate code to perform an operation specified by UNOPTAB
2698 on operand OP0, with result having machine-mode MODE.
2700 UNSIGNEDP is for the case where we have to widen the operands
2701 to perform the operation. It says to use zero-extension.
2703 If TARGET is nonzero, the value
2704 is generated there, if it is convenient to do so.
2705 In all cases an rtx is returned for the locus of the value;
2706 this may or may not be TARGET. */
2709 expand_unop (machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2712 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2713 machine_mode wider_mode
;
2717 temp
= expand_unop_direct (mode
, unoptab
, op0
, target
, unsignedp
);
2721 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2723 /* Widening (or narrowing) clz needs special treatment. */
2724 if (unoptab
== clz_optab
)
2726 temp
= widen_leading (mode
, op0
, target
, unoptab
);
2730 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2731 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2733 temp
= expand_doubleword_clz (mode
, op0
, target
);
2741 if (unoptab
== clrsb_optab
)
2743 temp
= widen_leading (mode
, op0
, target
, unoptab
);
2749 if (unoptab
== popcount_optab
2750 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2751 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
2752 && optimize_insn_for_speed_p ())
2754 temp
= expand_doubleword_popcount (mode
, op0
, target
);
2759 if (unoptab
== parity_optab
2760 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2761 && (optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
2762 || optab_handler (popcount_optab
, word_mode
) != CODE_FOR_nothing
)
2763 && optimize_insn_for_speed_p ())
2765 temp
= expand_doubleword_parity (mode
, op0
, target
);
2770 /* Widening (or narrowing) bswap needs special treatment. */
2771 if (unoptab
== bswap_optab
)
2773 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
2774 or ROTATERT. First try these directly; if this fails, then try the
2775 obvious pair of shifts with allowed widening, as this will probably
2776 be always more efficient than the other fallback methods. */
2782 if (optab_handler (rotl_optab
, mode
) != CODE_FOR_nothing
)
2784 temp
= expand_binop (mode
, rotl_optab
, op0
, GEN_INT (8), target
,
2785 unsignedp
, OPTAB_DIRECT
);
2790 if (optab_handler (rotr_optab
, mode
) != CODE_FOR_nothing
)
2792 temp
= expand_binop (mode
, rotr_optab
, op0
, GEN_INT (8), target
,
2793 unsignedp
, OPTAB_DIRECT
);
2798 last
= get_last_insn ();
2800 temp1
= expand_binop (mode
, ashl_optab
, op0
, GEN_INT (8), NULL_RTX
,
2801 unsignedp
, OPTAB_WIDEN
);
2802 temp2
= expand_binop (mode
, lshr_optab
, op0
, GEN_INT (8), NULL_RTX
,
2803 unsignedp
, OPTAB_WIDEN
);
2806 temp
= expand_binop (mode
, ior_optab
, temp1
, temp2
, target
,
2807 unsignedp
, OPTAB_WIDEN
);
2812 delete_insns_since (last
);
2815 temp
= widen_bswap (mode
, op0
, target
);
2819 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2820 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2822 temp
= expand_doubleword_bswap (mode
, op0
, target
);
2830 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2831 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2832 wider_mode
!= VOIDmode
;
2833 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2835 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2838 rtx_insn
*last
= get_last_insn ();
2840 /* For certain operations, we need not actually extend
2841 the narrow operand, as long as we will truncate the
2842 results to the same narrowness. */
2844 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2845 (unoptab
== neg_optab
2846 || unoptab
== one_cmpl_optab
)
2847 && mclass
== MODE_INT
);
2849 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2854 if (mclass
!= MODE_INT
2855 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2858 target
= gen_reg_rtx (mode
);
2859 convert_move (target
, temp
, 0);
2863 return gen_lowpart (mode
, temp
);
2866 delete_insns_since (last
);
2870 /* These can be done a word at a time. */
2871 if (unoptab
== one_cmpl_optab
2872 && mclass
== MODE_INT
2873 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2874 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2879 if (target
== 0 || target
== op0
|| !valid_multiword_target_p (target
))
2880 target
= gen_reg_rtx (mode
);
2884 /* Do the actual arithmetic. */
2885 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2887 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2888 rtx x
= expand_unop (word_mode
, unoptab
,
2889 operand_subword_force (op0
, i
, mode
),
2890 target_piece
, unsignedp
);
2892 if (target_piece
!= x
)
2893 emit_move_insn (target_piece
, x
);
2896 insns
= get_insns ();
2903 if (optab_to_code (unoptab
) == NEG
)
2905 /* Try negating floating point values by flipping the sign bit. */
2906 if (SCALAR_FLOAT_MODE_P (mode
))
2908 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
2913 /* If there is no negation pattern, and we have no negative zero,
2914 try subtracting from zero. */
2915 if (!HONOR_SIGNED_ZEROS (mode
))
2917 temp
= expand_binop (mode
, (unoptab
== negv_optab
2918 ? subv_optab
: sub_optab
),
2919 CONST0_RTX (mode
), op0
, target
,
2920 unsignedp
, OPTAB_DIRECT
);
2926 /* Try calculating parity (x) as popcount (x) % 2. */
2927 if (unoptab
== parity_optab
)
2929 temp
= expand_parity (mode
, op0
, target
);
2934 /* Try implementing ffs (x) in terms of clz (x). */
2935 if (unoptab
== ffs_optab
)
2937 temp
= expand_ffs (mode
, op0
, target
);
2942 /* Try implementing ctz (x) in terms of clz (x). */
2943 if (unoptab
== ctz_optab
)
2945 temp
= expand_ctz (mode
, op0
, target
);
2951 /* Now try a library call in this mode. */
2952 libfunc
= optab_libfunc (unoptab
, mode
);
2958 machine_mode outmode
= mode
;
2960 /* All of these functions return small values. Thus we choose to
2961 have them return something that isn't a double-word. */
2962 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2963 || unoptab
== clrsb_optab
|| unoptab
== popcount_optab
2964 || unoptab
== parity_optab
)
2966 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
),
2967 optab_libfunc (unoptab
, mode
)));
2971 /* Pass 1 for NO_QUEUE so we don't lose any increments
2972 if the libcall is cse'd or moved. */
2973 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, outmode
,
2975 insns
= get_insns ();
2978 target
= gen_reg_rtx (outmode
);
2979 bool trapv
= trapv_unoptab_p (unoptab
);
2981 eq_value
= NULL_RTX
;
2984 eq_value
= gen_rtx_fmt_e (optab_to_code (unoptab
), mode
, op0
);
2985 if (GET_MODE_SIZE (outmode
) < GET_MODE_SIZE (mode
))
2986 eq_value
= simplify_gen_unary (TRUNCATE
, outmode
, eq_value
, mode
);
2987 else if (GET_MODE_SIZE (outmode
) > GET_MODE_SIZE (mode
))
2988 eq_value
= simplify_gen_unary (ZERO_EXTEND
,
2989 outmode
, eq_value
, mode
);
2991 emit_libcall_block_1 (insns
, target
, value
, eq_value
, trapv
);
2996 /* It can't be done in this mode. Can we do it in a wider mode? */
2998 if (CLASS_HAS_WIDER_MODES_P (mclass
))
3000 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
3001 wider_mode
!= VOIDmode
;
3002 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3004 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
3005 || optab_libfunc (unoptab
, wider_mode
))
3008 rtx_insn
*last
= get_last_insn ();
3010 /* For certain operations, we need not actually extend
3011 the narrow operand, as long as we will truncate the
3012 results to the same narrowness. */
3013 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3014 (unoptab
== neg_optab
3015 || unoptab
== one_cmpl_optab
3016 || unoptab
== bswap_optab
)
3017 && mclass
== MODE_INT
);
3019 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3022 /* If we are generating clz using wider mode, adjust the
3023 result. Similarly for clrsb. */
3024 if ((unoptab
== clz_optab
|| unoptab
== clrsb_optab
)
3027 (wider_mode
, sub_optab
, temp
,
3028 gen_int_mode (GET_MODE_PRECISION (wider_mode
)
3029 - GET_MODE_PRECISION (mode
),
3031 target
, true, OPTAB_DIRECT
);
3033 /* Likewise for bswap. */
3034 if (unoptab
== bswap_optab
&& temp
!= 0)
3036 gcc_assert (GET_MODE_PRECISION (wider_mode
)
3037 == GET_MODE_BITSIZE (wider_mode
)
3038 && GET_MODE_PRECISION (mode
)
3039 == GET_MODE_BITSIZE (mode
));
3041 temp
= expand_shift (RSHIFT_EXPR
, wider_mode
, temp
,
3042 GET_MODE_BITSIZE (wider_mode
)
3043 - GET_MODE_BITSIZE (mode
),
3049 if (mclass
!= MODE_INT
)
3052 target
= gen_reg_rtx (mode
);
3053 convert_move (target
, temp
, 0);
3057 return gen_lowpart (mode
, temp
);
3060 delete_insns_since (last
);
3065 /* One final attempt at implementing negation via subtraction,
3066 this time allowing widening of the operand. */
3067 if (optab_to_code (unoptab
) == NEG
&& !HONOR_SIGNED_ZEROS (mode
))
3070 temp
= expand_binop (mode
,
3071 unoptab
== negv_optab
? subv_optab
: sub_optab
,
3072 CONST0_RTX (mode
), op0
,
3073 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3081 /* Emit code to compute the absolute value of OP0, with result to
3082 TARGET if convenient. (TARGET may be 0.) The return value says
3083 where the result actually is to be found.
3085 MODE is the mode of the operand; the mode of the result is
3086 different but can be deduced from MODE.
3091 expand_abs_nojump (machine_mode mode
, rtx op0
, rtx target
,
3092 int result_unsignedp
)
3096 if (GET_MODE_CLASS (mode
) != MODE_INT
3098 result_unsignedp
= 1;
3100 /* First try to do it with a special abs instruction. */
3101 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
3106 /* For floating point modes, try clearing the sign bit. */
3107 if (SCALAR_FLOAT_MODE_P (mode
))
3109 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
3114 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3115 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
3116 && !HONOR_SIGNED_ZEROS (mode
))
3118 rtx_insn
*last
= get_last_insn ();
3120 temp
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3123 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3129 delete_insns_since (last
);
3132 /* If this machine has expensive jumps, we can do integer absolute
3133 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3134 where W is the width of MODE. */
3136 if (GET_MODE_CLASS (mode
) == MODE_INT
3137 && BRANCH_COST (optimize_insn_for_speed_p (),
3140 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3141 GET_MODE_PRECISION (mode
) - 1,
3144 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
3147 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
3148 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
3158 expand_abs (machine_mode mode
, rtx op0
, rtx target
,
3159 int result_unsignedp
, int safe
)
3162 rtx_code_label
*op1
;
3164 if (GET_MODE_CLASS (mode
) != MODE_INT
3166 result_unsignedp
= 1;
3168 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
3172 /* If that does not win, use conditional jump and negate. */
3174 /* It is safe to use the target if it is the same
3175 as the source if this is also a pseudo register */
3176 if (op0
== target
&& REG_P (op0
)
3177 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
3180 op1
= gen_label_rtx ();
3181 if (target
== 0 || ! safe
3182 || GET_MODE (target
) != mode
3183 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
3185 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
3186 target
= gen_reg_rtx (mode
);
3188 emit_move_insn (target
, op0
);
3191 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
3192 NULL_RTX
, NULL
, op1
,
3193 profile_probability::uninitialized ());
3195 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3198 emit_move_insn (target
, op0
);
3204 /* Emit code to compute the one's complement absolute value of OP0
3205 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3206 (TARGET may be NULL_RTX.) The return value says where the result
3207 actually is to be found.
3209 MODE is the mode of the operand; the mode of the result is
3210 different but can be deduced from MODE. */
3213 expand_one_cmpl_abs_nojump (machine_mode mode
, rtx op0
, rtx target
)
3217 /* Not applicable for floating point modes. */
3218 if (FLOAT_MODE_P (mode
))
3221 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3222 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
)
3224 rtx_insn
*last
= get_last_insn ();
3226 temp
= expand_unop (mode
, one_cmpl_optab
, op0
, NULL_RTX
, 0);
3228 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3234 delete_insns_since (last
);
3237 /* If this machine has expensive jumps, we can do one's complement
3238 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3240 if (GET_MODE_CLASS (mode
) == MODE_INT
3241 && BRANCH_COST (optimize_insn_for_speed_p (),
3244 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3245 GET_MODE_PRECISION (mode
) - 1,
3248 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
3258 /* A subroutine of expand_copysign, perform the copysign operation using the
3259 abs and neg primitives advertised to exist on the target. The assumption
3260 is that we have a split register file, and leaving op0 in fp registers,
3261 and not playing with subregs so much, will help the register allocator. */
3264 expand_copysign_absneg (machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3265 int bitpos
, bool op0_is_abs
)
3268 enum insn_code icode
;
3270 rtx_code_label
*label
;
3275 /* Check if the back end provides an insn that handles signbit for the
3277 icode
= optab_handler (signbit_optab
, mode
);
3278 if (icode
!= CODE_FOR_nothing
)
3280 imode
= insn_data
[(int) icode
].operand
[0].mode
;
3281 sign
= gen_reg_rtx (imode
);
3282 emit_unop_insn (icode
, sign
, op1
, UNKNOWN
);
3286 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3288 imode
= int_mode_for_mode (mode
);
3289 if (imode
== BLKmode
)
3291 op1
= gen_lowpart (imode
, op1
);
3298 if (FLOAT_WORDS_BIG_ENDIAN
)
3299 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3301 word
= bitpos
/ BITS_PER_WORD
;
3302 bitpos
= bitpos
% BITS_PER_WORD
;
3303 op1
= operand_subword_force (op1
, word
, mode
);
3306 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3307 sign
= expand_binop (imode
, and_optab
, op1
,
3308 immed_wide_int_const (mask
, imode
),
3309 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3314 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
3321 if (target
== NULL_RTX
)
3322 target
= copy_to_reg (op0
);
3324 emit_move_insn (target
, op0
);
3327 label
= gen_label_rtx ();
3328 emit_cmp_and_jump_insns (sign
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
3330 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
3331 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3333 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
3335 emit_move_insn (target
, op0
);
3343 /* A subroutine of expand_copysign, perform the entire copysign operation
3344 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3345 is true if op0 is known to have its sign bit clear. */
3348 expand_copysign_bit (machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3349 int bitpos
, bool op0_is_abs
)
3352 int word
, nwords
, i
;
3356 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3358 imode
= int_mode_for_mode (mode
);
3359 if (imode
== BLKmode
)
3368 if (FLOAT_WORDS_BIG_ENDIAN
)
3369 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3371 word
= bitpos
/ BITS_PER_WORD
;
3372 bitpos
= bitpos
% BITS_PER_WORD
;
3373 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3376 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3381 || (nwords
> 1 && !valid_multiword_target_p (target
)))
3382 target
= gen_reg_rtx (mode
);
3388 for (i
= 0; i
< nwords
; ++i
)
3390 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3391 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3397 = expand_binop (imode
, and_optab
, op0_piece
,
3398 immed_wide_int_const (~mask
, imode
),
3399 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3400 op1
= expand_binop (imode
, and_optab
,
3401 operand_subword_force (op1
, i
, mode
),
3402 immed_wide_int_const (mask
, imode
),
3403 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3405 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
3406 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3407 if (temp
!= targ_piece
)
3408 emit_move_insn (targ_piece
, temp
);
3411 emit_move_insn (targ_piece
, op0_piece
);
3414 insns
= get_insns ();
3421 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
3422 immed_wide_int_const (mask
, imode
),
3423 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3425 op0
= gen_lowpart (imode
, op0
);
3427 op0
= expand_binop (imode
, and_optab
, op0
,
3428 immed_wide_int_const (~mask
, imode
),
3429 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3431 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
3432 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3433 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3439 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3440 scalar floating point mode. Return NULL if we do not know how to
3441 expand the operation inline. */
3444 expand_copysign (rtx op0
, rtx op1
, rtx target
)
3446 machine_mode mode
= GET_MODE (op0
);
3447 const struct real_format
*fmt
;
3451 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
3452 gcc_assert (GET_MODE (op1
) == mode
);
3454 /* First try to do it with a special instruction. */
3455 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
3456 target
, 0, OPTAB_DIRECT
);
3460 fmt
= REAL_MODE_FORMAT (mode
);
3461 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
3465 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
3467 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
3468 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
3472 if (fmt
->signbit_ro
>= 0
3473 && (CONST_DOUBLE_AS_FLOAT_P (op0
)
3474 || (optab_handler (neg_optab
, mode
) != CODE_FOR_nothing
3475 && optab_handler (abs_optab
, mode
) != CODE_FOR_nothing
)))
3477 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
3478 fmt
->signbit_ro
, op0_is_abs
);
3483 if (fmt
->signbit_rw
< 0)
3485 return expand_copysign_bit (mode
, op0
, op1
, target
,
3486 fmt
->signbit_rw
, op0_is_abs
);
3489 /* Generate an instruction whose insn-code is INSN_CODE,
3490 with two operands: an output TARGET and an input OP0.
3491 TARGET *must* be nonzero, and the output is always stored there.
3492 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3493 the value that is stored into TARGET.
3495 Return false if expansion failed. */
3498 maybe_emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
,
3501 struct expand_operand ops
[2];
3504 create_output_operand (&ops
[0], target
, GET_MODE (target
));
3505 create_input_operand (&ops
[1], op0
, GET_MODE (op0
));
3506 pat
= maybe_gen_insn (icode
, 2, ops
);
3510 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
3512 add_equal_note (pat
, ops
[0].value
, code
, ops
[1].value
, NULL_RTX
);
3516 if (ops
[0].value
!= target
)
3517 emit_move_insn (target
, ops
[0].value
);
3520 /* Generate an instruction whose insn-code is INSN_CODE,
3521 with two operands: an output TARGET and an input OP0.
3522 TARGET *must* be nonzero, and the output is always stored there.
3523 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3524 the value that is stored into TARGET. */
3527 emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
, enum rtx_code code
)
3529 bool ok
= maybe_emit_unop_insn (icode
, target
, op0
, code
);
3533 struct no_conflict_data
3536 rtx_insn
*first
, *insn
;
3540 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3541 the currently examined clobber / store has to stay in the list of
3542 insns that constitute the actual libcall block. */
3544 no_conflict_move_test (rtx dest
, const_rtx set
, void *p0
)
3546 struct no_conflict_data
*p
= (struct no_conflict_data
*) p0
;
3548 /* If this inns directly contributes to setting the target, it must stay. */
3549 if (reg_overlap_mentioned_p (p
->target
, dest
))
3550 p
->must_stay
= true;
3551 /* If we haven't committed to keeping any other insns in the list yet,
3552 there is nothing more to check. */
3553 else if (p
->insn
== p
->first
)
3555 /* If this insn sets / clobbers a register that feeds one of the insns
3556 already in the list, this insn has to stay too. */
3557 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
3558 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
3559 || reg_used_between_p (dest
, p
->first
, p
->insn
)
3560 /* Likewise if this insn depends on a register set by a previous
3561 insn in the list, or if it sets a result (presumably a hard
3562 register) that is set or clobbered by a previous insn.
3563 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3564 SET_DEST perform the former check on the address, and the latter
3565 check on the MEM. */
3566 || (GET_CODE (set
) == SET
3567 && (modified_in_p (SET_SRC (set
), p
->first
)
3568 || modified_in_p (SET_DEST (set
), p
->first
)
3569 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
3570 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
3571 p
->must_stay
= true;
3575 /* Emit code to make a call to a constant function or a library call.
3577 INSNS is a list containing all insns emitted in the call.
3578 These insns leave the result in RESULT. Our block is to copy RESULT
3579 to TARGET, which is logically equivalent to EQUIV.
3581 We first emit any insns that set a pseudo on the assumption that these are
3582 loading constants into registers; doing so allows them to be safely cse'ed
3583 between blocks. Then we emit all the other insns in the block, followed by
3584 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3585 note with an operand of EQUIV. */
3588 emit_libcall_block_1 (rtx_insn
*insns
, rtx target
, rtx result
, rtx equiv
,
3589 bool equiv_may_trap
)
3591 rtx final_dest
= target
;
3592 rtx_insn
*next
, *last
, *insn
;
3594 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3595 into a MEM later. Protect the libcall block from this change. */
3596 if (! REG_P (target
) || REG_USERVAR_P (target
))
3597 target
= gen_reg_rtx (GET_MODE (target
));
3599 /* If we're using non-call exceptions, a libcall corresponding to an
3600 operation that may trap may also trap. */
3601 /* ??? See the comment in front of make_reg_eh_region_note. */
3602 if (cfun
->can_throw_non_call_exceptions
3603 && (equiv_may_trap
|| may_trap_p (equiv
)))
3605 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3608 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3611 int lp_nr
= INTVAL (XEXP (note
, 0));
3612 if (lp_nr
== 0 || lp_nr
== INT_MIN
)
3613 remove_note (insn
, note
);
3619 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3620 reg note to indicate that this call cannot throw or execute a nonlocal
3621 goto (unless there is already a REG_EH_REGION note, in which case
3623 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3625 make_reg_eh_region_note_nothrow_nononlocal (insn
);
3628 /* First emit all insns that set pseudos. Remove them from the list as
3629 we go. Avoid insns that set pseudos which were referenced in previous
3630 insns. These can be generated by move_by_pieces, for example,
3631 to update an address. Similarly, avoid insns that reference things
3632 set in previous insns. */
3634 for (insn
= insns
; insn
; insn
= next
)
3636 rtx set
= single_set (insn
);
3638 next
= NEXT_INSN (insn
);
3640 if (set
!= 0 && REG_P (SET_DEST (set
))
3641 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3643 struct no_conflict_data data
;
3645 data
.target
= const0_rtx
;
3649 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3650 if (! data
.must_stay
)
3652 if (PREV_INSN (insn
))
3653 SET_NEXT_INSN (PREV_INSN (insn
)) = next
;
3658 SET_PREV_INSN (next
) = PREV_INSN (insn
);
3664 /* Some ports use a loop to copy large arguments onto the stack.
3665 Don't move anything outside such a loop. */
3670 /* Write the remaining insns followed by the final copy. */
3671 for (insn
= insns
; insn
; insn
= next
)
3673 next
= NEXT_INSN (insn
);
3678 last
= emit_move_insn (target
, result
);
3680 set_dst_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
), target
);
3682 if (final_dest
!= target
)
3683 emit_move_insn (final_dest
, target
);
3687 emit_libcall_block (rtx_insn
*insns
, rtx target
, rtx result
, rtx equiv
)
3689 emit_libcall_block_1 (insns
, target
, result
, equiv
, false);
3692 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3693 PURPOSE describes how this comparison will be used. CODE is the rtx
3694 comparison code we will be using.
3696 ??? Actually, CODE is slightly weaker than that. A target is still
3697 required to implement all of the normal bcc operations, but not
3698 required to implement all (or any) of the unordered bcc operations. */
3701 can_compare_p (enum rtx_code code
, machine_mode mode
,
3702 enum can_compare_purpose purpose
)
3705 test
= gen_rtx_fmt_ee (code
, mode
, const0_rtx
, const0_rtx
);
3708 enum insn_code icode
;
3710 if (purpose
== ccp_jump
3711 && (icode
= optab_handler (cbranch_optab
, mode
)) != CODE_FOR_nothing
3712 && insn_operand_matches (icode
, 0, test
))
3714 if (purpose
== ccp_store_flag
3715 && (icode
= optab_handler (cstore_optab
, mode
)) != CODE_FOR_nothing
3716 && insn_operand_matches (icode
, 1, test
))
3718 if (purpose
== ccp_cmov
3719 && optab_handler (cmov_optab
, mode
) != CODE_FOR_nothing
)
3722 mode
= GET_MODE_WIDER_MODE (mode
);
3723 PUT_MODE (test
, mode
);
3725 while (mode
!= VOIDmode
);
3730 /* This function is called when we are going to emit a compare instruction that
3731 compares the values found in X and Y, using the rtl operator COMPARISON.
3733 If they have mode BLKmode, then SIZE specifies the size of both operands.
3735 UNSIGNEDP nonzero says that the operands are unsigned;
3736 this matters if they need to be widened (as given by METHODS).
3738 *PTEST is where the resulting comparison RTX is returned or NULL_RTX
3739 if we failed to produce one.
3741 *PMODE is the mode of the inputs (in case they are const_int).
3743 This function performs all the setup necessary so that the caller only has
3744 to emit a single comparison insn. This setup can involve doing a BLKmode
3745 comparison or emitting a library call to perform the comparison if no insn
3746 is available to handle it.
3747 The values which are passed in through pointers can be modified; the caller
3748 should perform the comparison on the modified values. Constant
3749 comparisons must have already been folded. */
3752 prepare_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3753 int unsignedp
, enum optab_methods methods
,
3754 rtx
*ptest
, machine_mode
*pmode
)
3756 machine_mode mode
= *pmode
;
3758 machine_mode cmp_mode
;
3759 enum mode_class mclass
;
3761 /* The other methods are not needed. */
3762 gcc_assert (methods
== OPTAB_DIRECT
|| methods
== OPTAB_WIDEN
3763 || methods
== OPTAB_LIB_WIDEN
);
3765 /* If we are optimizing, force expensive constants into a register. */
3766 if (CONSTANT_P (x
) && optimize
3767 && (rtx_cost (x
, mode
, COMPARE
, 0, optimize_insn_for_speed_p ())
3768 > COSTS_N_INSNS (1)))
3769 x
= force_reg (mode
, x
);
3771 if (CONSTANT_P (y
) && optimize
3772 && (rtx_cost (y
, mode
, COMPARE
, 1, optimize_insn_for_speed_p ())
3773 > COSTS_N_INSNS (1)))
3774 y
= force_reg (mode
, y
);
3777 /* Make sure if we have a canonical comparison. The RTL
3778 documentation states that canonical comparisons are required only
3779 for targets which have cc0. */
3780 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
3783 /* Don't let both operands fail to indicate the mode. */
3784 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3785 x
= force_reg (mode
, x
);
3786 if (mode
== VOIDmode
)
3787 mode
= GET_MODE (x
) != VOIDmode
? GET_MODE (x
) : GET_MODE (y
);
3789 /* Handle all BLKmode compares. */
3791 if (mode
== BLKmode
)
3793 machine_mode result_mode
;
3794 enum insn_code cmp_code
;
3797 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3801 /* Try to use a memory block compare insn - either cmpstr
3802 or cmpmem will do. */
3803 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
3804 cmp_mode
!= VOIDmode
;
3805 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
3807 cmp_code
= direct_optab_handler (cmpmem_optab
, cmp_mode
);
3808 if (cmp_code
== CODE_FOR_nothing
)
3809 cmp_code
= direct_optab_handler (cmpstr_optab
, cmp_mode
);
3810 if (cmp_code
== CODE_FOR_nothing
)
3811 cmp_code
= direct_optab_handler (cmpstrn_optab
, cmp_mode
);
3812 if (cmp_code
== CODE_FOR_nothing
)
3815 /* Must make sure the size fits the insn's mode. */
3816 if ((CONST_INT_P (size
)
3817 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
3818 || (GET_MODE_BITSIZE (GET_MODE (size
))
3819 > GET_MODE_BITSIZE (cmp_mode
)))
3822 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3823 result
= gen_reg_rtx (result_mode
);
3824 size
= convert_to_mode (cmp_mode
, size
, 1);
3825 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3827 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, result
, const0_rtx
);
3828 *pmode
= result_mode
;
3832 if (methods
!= OPTAB_LIB
&& methods
!= OPTAB_LIB_WIDEN
)
3835 /* Otherwise call a library function. */
3836 result
= emit_block_comp_via_libcall (XEXP (x
, 0), XEXP (y
, 0), size
);
3840 mode
= TYPE_MODE (integer_type_node
);
3841 methods
= OPTAB_LIB_WIDEN
;
3845 /* Don't allow operands to the compare to trap, as that can put the
3846 compare and branch in different basic blocks. */
3847 if (cfun
->can_throw_non_call_exceptions
)
3850 x
= copy_to_reg (x
);
3852 y
= copy_to_reg (y
);
3855 if (GET_MODE_CLASS (mode
) == MODE_CC
)
3857 enum insn_code icode
= optab_handler (cbranch_optab
, CCmode
);
3858 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
3859 gcc_assert (icode
!= CODE_FOR_nothing
3860 && insn_operand_matches (icode
, 0, test
));
3865 mclass
= GET_MODE_CLASS (mode
);
3866 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
3870 enum insn_code icode
;
3871 icode
= optab_handler (cbranch_optab
, cmp_mode
);
3872 if (icode
!= CODE_FOR_nothing
3873 && insn_operand_matches (icode
, 0, test
))
3875 rtx_insn
*last
= get_last_insn ();
3876 rtx op0
= prepare_operand (icode
, x
, 1, mode
, cmp_mode
, unsignedp
);
3877 rtx op1
= prepare_operand (icode
, y
, 2, mode
, cmp_mode
, unsignedp
);
3879 && insn_operand_matches (icode
, 1, op0
)
3880 && insn_operand_matches (icode
, 2, op1
))
3882 XEXP (test
, 0) = op0
;
3883 XEXP (test
, 1) = op1
;
3888 delete_insns_since (last
);
3891 if (methods
== OPTAB_DIRECT
|| !CLASS_HAS_WIDER_MODES_P (mclass
))
3893 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
);
3895 while (cmp_mode
!= VOIDmode
);
3897 if (methods
!= OPTAB_LIB_WIDEN
)
3900 if (!SCALAR_FLOAT_MODE_P (mode
))
3903 machine_mode ret_mode
;
3905 /* Handle a libcall just for the mode we are using. */
3906 libfunc
= optab_libfunc (cmp_optab
, mode
);
3907 gcc_assert (libfunc
);
3909 /* If we want unsigned, and this mode has a distinct unsigned
3910 comparison routine, use that. */
3913 rtx ulibfunc
= optab_libfunc (ucmp_optab
, mode
);
3918 ret_mode
= targetm
.libgcc_cmp_return_mode ();
3919 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
3920 ret_mode
, 2, x
, mode
, y
, mode
);
3922 /* There are two kinds of comparison routines. Biased routines
3923 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3924 of gcc expect that the comparison operation is equivalent
3925 to the modified comparison. For signed comparisons compare the
3926 result against 1 in the biased case, and zero in the unbiased
3927 case. For unsigned comparisons always compare against 1 after
3928 biasing the unbiased result by adding 1. This gives us a way to
3930 The comparisons in the fixed-point helper library are always
3935 if (!TARGET_LIB_INT_CMP_BIASED
&& !ALL_FIXED_POINT_MODE_P (mode
))
3938 x
= plus_constant (ret_mode
, result
, 1);
3944 prepare_cmp_insn (x
, y
, comparison
, NULL_RTX
, unsignedp
, methods
,
3948 prepare_float_lib_cmp (x
, y
, comparison
, ptest
, pmode
);
3956 /* Before emitting an insn with code ICODE, make sure that X, which is going
3957 to be used for operand OPNUM of the insn, is converted from mode MODE to
3958 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3959 that it is accepted by the operand predicate. Return the new value. */
3962 prepare_operand (enum insn_code icode
, rtx x
, int opnum
, machine_mode mode
,
3963 machine_mode wider_mode
, int unsignedp
)
3965 if (mode
!= wider_mode
)
3966 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3968 if (!insn_operand_matches (icode
, opnum
, x
))
3970 machine_mode op_mode
= insn_data
[(int) icode
].operand
[opnum
].mode
;
3971 if (reload_completed
)
3973 if (GET_MODE (x
) != op_mode
&& GET_MODE (x
) != VOIDmode
)
3975 x
= copy_to_mode_reg (op_mode
, x
);
3981 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3982 we can do the branch. */
3985 emit_cmp_and_jump_insn_1 (rtx test
, machine_mode mode
, rtx label
,
3986 profile_probability prob
)
3988 machine_mode optab_mode
;
3989 enum mode_class mclass
;
3990 enum insn_code icode
;
3993 mclass
= GET_MODE_CLASS (mode
);
3994 optab_mode
= (mclass
== MODE_CC
) ? CCmode
: mode
;
3995 icode
= optab_handler (cbranch_optab
, optab_mode
);
3997 gcc_assert (icode
!= CODE_FOR_nothing
);
3998 gcc_assert (insn_operand_matches (icode
, 0, test
));
3999 insn
= emit_jump_insn (GEN_FCN (icode
) (test
, XEXP (test
, 0),
4000 XEXP (test
, 1), label
));
4001 if (prob
.initialized_p ()
4002 && profile_status_for_fn (cfun
) != PROFILE_ABSENT
4005 && any_condjump_p (insn
)
4006 && !find_reg_note (insn
, REG_BR_PROB
, 0))
4007 add_reg_br_prob_note (insn
, prob
);
4010 /* Generate code to compare X with Y so that the condition codes are
4011 set and to jump to LABEL if the condition is true. If X is a
4012 constant and Y is not a constant, then the comparison is swapped to
4013 ensure that the comparison RTL has the canonical form.
4015 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4016 need to be widened. UNSIGNEDP is also used to select the proper
4017 branch condition code.
4019 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4021 MODE is the mode of the inputs (in case they are const_int).
4023 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4024 It will be potentially converted into an unsigned variant based on
4025 UNSIGNEDP to select a proper jump instruction.
4027 PROB is the probability of jumping to LABEL. */
4030 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4031 machine_mode mode
, int unsignedp
, rtx label
,
4032 profile_probability prob
)
4034 rtx op0
= x
, op1
= y
;
4037 /* Swap operands and condition to ensure canonical RTL. */
4038 if (swap_commutative_operands_p (x
, y
)
4039 && can_compare_p (swap_condition (comparison
), mode
, ccp_jump
))
4042 comparison
= swap_condition (comparison
);
4045 /* If OP0 is still a constant, then both X and Y must be constants
4046 or the opposite comparison is not supported. Force X into a register
4047 to create canonical RTL. */
4048 if (CONSTANT_P (op0
))
4049 op0
= force_reg (mode
, op0
);
4052 comparison
= unsigned_condition (comparison
);
4054 prepare_cmp_insn (op0
, op1
, comparison
, size
, unsignedp
, OPTAB_LIB_WIDEN
,
4056 emit_cmp_and_jump_insn_1 (test
, mode
, label
, prob
);
4060 /* Emit a library call comparison between floating point X and Y.
4061 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4064 prepare_float_lib_cmp (rtx x
, rtx y
, enum rtx_code comparison
,
4065 rtx
*ptest
, machine_mode
*pmode
)
4067 enum rtx_code swapped
= swap_condition (comparison
);
4068 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
4069 machine_mode orig_mode
= GET_MODE (x
);
4070 machine_mode mode
, cmp_mode
;
4071 rtx true_rtx
, false_rtx
;
4072 rtx value
, target
, equiv
;
4075 bool reversed_p
= false;
4076 cmp_mode
= targetm
.libgcc_cmp_return_mode ();
4078 for (mode
= orig_mode
;
4080 mode
= GET_MODE_WIDER_MODE (mode
))
4082 if (code_to_optab (comparison
)
4083 && (libfunc
= optab_libfunc (code_to_optab (comparison
), mode
)))
4086 if (code_to_optab (swapped
)
4087 && (libfunc
= optab_libfunc (code_to_optab (swapped
), mode
)))
4090 comparison
= swapped
;
4094 if (code_to_optab (reversed
)
4095 && (libfunc
= optab_libfunc (code_to_optab (reversed
), mode
)))
4097 comparison
= reversed
;
4103 gcc_assert (mode
!= VOIDmode
);
4105 if (mode
!= orig_mode
)
4107 x
= convert_to_mode (mode
, x
, 0);
4108 y
= convert_to_mode (mode
, y
, 0);
4111 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4112 the RTL. The allows the RTL optimizers to delete the libcall if the
4113 condition can be determined at compile-time. */
4114 if (comparison
== UNORDERED
4115 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4117 true_rtx
= const_true_rtx
;
4118 false_rtx
= const0_rtx
;
4125 true_rtx
= const0_rtx
;
4126 false_rtx
= const_true_rtx
;
4130 true_rtx
= const_true_rtx
;
4131 false_rtx
= const0_rtx
;
4135 true_rtx
= const1_rtx
;
4136 false_rtx
= const0_rtx
;
4140 true_rtx
= const0_rtx
;
4141 false_rtx
= constm1_rtx
;
4145 true_rtx
= constm1_rtx
;
4146 false_rtx
= const0_rtx
;
4150 true_rtx
= const0_rtx
;
4151 false_rtx
= const1_rtx
;
4159 if (comparison
== UNORDERED
)
4161 rtx temp
= simplify_gen_relational (NE
, cmp_mode
, mode
, x
, x
);
4162 equiv
= simplify_gen_relational (NE
, cmp_mode
, mode
, y
, y
);
4163 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4164 temp
, const_true_rtx
, equiv
);
4168 equiv
= simplify_gen_relational (comparison
, cmp_mode
, mode
, x
, y
);
4169 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4170 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4171 equiv
, true_rtx
, false_rtx
);
4175 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4176 cmp_mode
, 2, x
, mode
, y
, mode
);
4177 insns
= get_insns ();
4180 target
= gen_reg_rtx (cmp_mode
);
4181 emit_libcall_block (insns
, target
, value
, equiv
);
4183 if (comparison
== UNORDERED
4184 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
)
4186 *ptest
= gen_rtx_fmt_ee (reversed_p
? EQ
: NE
, VOIDmode
, target
, false_rtx
);
4188 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, target
, const0_rtx
);
4193 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4196 emit_indirect_jump (rtx loc
)
4198 if (!targetm
.have_indirect_jump ())
4199 sorry ("indirect jumps are not available on this target");
4202 struct expand_operand ops
[1];
4203 create_address_operand (&ops
[0], loc
);
4204 expand_jump_insn (targetm
.code_for_indirect_jump
, 1, ops
);
4210 /* Emit a conditional move instruction if the machine supports one for that
4211 condition and machine mode.
4213 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4214 the mode to use should they be constants. If it is VOIDmode, they cannot
4217 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4218 should be stored there. MODE is the mode to use should they be constants.
4219 If it is VOIDmode, they cannot both be constants.
4221 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4222 is not supported. */
4225 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4226 machine_mode cmode
, rtx op2
, rtx op3
,
4227 machine_mode mode
, int unsignedp
)
4231 enum insn_code icode
;
4232 enum rtx_code reversed
;
4234 /* If the two source operands are identical, that's just a move. */
4236 if (rtx_equal_p (op2
, op3
))
4239 target
= gen_reg_rtx (mode
);
4241 emit_move_insn (target
, op3
);
4245 /* If one operand is constant, make it the second one. Only do this
4246 if the other operand is not constant as well. */
4248 if (swap_commutative_operands_p (op0
, op1
))
4250 std::swap (op0
, op1
);
4251 code
= swap_condition (code
);
4254 /* get_condition will prefer to generate LT and GT even if the old
4255 comparison was against zero, so undo that canonicalization here since
4256 comparisons against zero are cheaper. */
4257 if (code
== LT
&& op1
== const1_rtx
)
4258 code
= LE
, op1
= const0_rtx
;
4259 else if (code
== GT
&& op1
== constm1_rtx
)
4260 code
= GE
, op1
= const0_rtx
;
4262 if (cmode
== VOIDmode
)
4263 cmode
= GET_MODE (op0
);
4265 enum rtx_code orig_code
= code
;
4266 bool swapped
= false;
4267 if (swap_commutative_operands_p (op2
, op3
)
4268 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4271 std::swap (op2
, op3
);
4276 if (mode
== VOIDmode
)
4277 mode
= GET_MODE (op2
);
4279 icode
= direct_optab_handler (movcc_optab
, mode
);
4281 if (icode
== CODE_FOR_nothing
)
4285 target
= gen_reg_rtx (mode
);
4287 for (int pass
= 0; ; pass
++)
4289 code
= unsignedp
? unsigned_condition (code
) : code
;
4290 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4292 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4293 punt and let the caller figure out how best to deal with this
4295 if (COMPARISON_P (comparison
))
4297 saved_pending_stack_adjust save
;
4298 save_pending_stack_adjust (&save
);
4299 last
= get_last_insn ();
4300 do_pending_stack_adjust ();
4301 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4302 GET_CODE (comparison
), NULL_RTX
, unsignedp
,
4303 OPTAB_WIDEN
, &comparison
, &cmode
);
4306 struct expand_operand ops
[4];
4308 create_output_operand (&ops
[0], target
, mode
);
4309 create_fixed_operand (&ops
[1], comparison
);
4310 create_input_operand (&ops
[2], op2
, mode
);
4311 create_input_operand (&ops
[3], op3
, mode
);
4312 if (maybe_expand_insn (icode
, 4, ops
))
4314 if (ops
[0].value
!= target
)
4315 convert_move (target
, ops
[0].value
, false);
4319 delete_insns_since (last
);
4320 restore_pending_stack_adjust (&save
);
4326 /* If the preferred op2/op3 order is not usable, retry with other
4327 operand order, perhaps it will expand successfully. */
4330 else if ((reversed
= reversed_comparison_code_parts (orig_code
, op0
, op1
,
4336 std::swap (op2
, op3
);
4341 /* Emit a conditional negate or bitwise complement using the
4342 negcc or notcc optabs if available. Return NULL_RTX if such operations
4343 are not available. Otherwise return the RTX holding the result.
4344 TARGET is the desired destination of the result. COMP is the comparison
4345 on which to negate. If COND is true move into TARGET the negation
4346 or bitwise complement of OP1. Otherwise move OP2 into TARGET.
4347 CODE is either NEG or NOT. MODE is the machine mode in which the
4348 operation is performed. */
4351 emit_conditional_neg_or_complement (rtx target
, rtx_code code
,
4352 machine_mode mode
, rtx cond
, rtx op1
,
4355 optab op
= unknown_optab
;
4358 else if (code
== NOT
)
4363 insn_code icode
= direct_optab_handler (op
, mode
);
4365 if (icode
== CODE_FOR_nothing
)
4369 target
= gen_reg_rtx (mode
);
4371 rtx_insn
*last
= get_last_insn ();
4372 struct expand_operand ops
[4];
4374 create_output_operand (&ops
[0], target
, mode
);
4375 create_fixed_operand (&ops
[1], cond
);
4376 create_input_operand (&ops
[2], op1
, mode
);
4377 create_input_operand (&ops
[3], op2
, mode
);
4379 if (maybe_expand_insn (icode
, 4, ops
))
4381 if (ops
[0].value
!= target
)
4382 convert_move (target
, ops
[0].value
, false);
4386 delete_insns_since (last
);
4390 /* Emit a conditional addition instruction if the machine supports one for that
4391 condition and machine mode.
4393 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4394 the mode to use should they be constants. If it is VOIDmode, they cannot
4397 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
4398 should be stored there. MODE is the mode to use should they be constants.
4399 If it is VOIDmode, they cannot both be constants.
4401 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4402 is not supported. */
4405 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4406 machine_mode cmode
, rtx op2
, rtx op3
,
4407 machine_mode mode
, int unsignedp
)
4411 enum insn_code icode
;
4413 /* If one operand is constant, make it the second one. Only do this
4414 if the other operand is not constant as well. */
4416 if (swap_commutative_operands_p (op0
, op1
))
4418 std::swap (op0
, op1
);
4419 code
= swap_condition (code
);
4422 /* get_condition will prefer to generate LT and GT even if the old
4423 comparison was against zero, so undo that canonicalization here since
4424 comparisons against zero are cheaper. */
4425 if (code
== LT
&& op1
== const1_rtx
)
4426 code
= LE
, op1
= const0_rtx
;
4427 else if (code
== GT
&& op1
== constm1_rtx
)
4428 code
= GE
, op1
= const0_rtx
;
4430 if (cmode
== VOIDmode
)
4431 cmode
= GET_MODE (op0
);
4433 if (mode
== VOIDmode
)
4434 mode
= GET_MODE (op2
);
4436 icode
= optab_handler (addcc_optab
, mode
);
4438 if (icode
== CODE_FOR_nothing
)
4442 target
= gen_reg_rtx (mode
);
4444 code
= unsignedp
? unsigned_condition (code
) : code
;
4445 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4447 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4448 return NULL and let the caller figure out how best to deal with this
4450 if (!COMPARISON_P (comparison
))
4453 do_pending_stack_adjust ();
4454 last
= get_last_insn ();
4455 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4456 GET_CODE (comparison
), NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4457 &comparison
, &cmode
);
4460 struct expand_operand ops
[4];
4462 create_output_operand (&ops
[0], target
, mode
);
4463 create_fixed_operand (&ops
[1], comparison
);
4464 create_input_operand (&ops
[2], op2
, mode
);
4465 create_input_operand (&ops
[3], op3
, mode
);
4466 if (maybe_expand_insn (icode
, 4, ops
))
4468 if (ops
[0].value
!= target
)
4469 convert_move (target
, ops
[0].value
, false);
4473 delete_insns_since (last
);
4477 /* These functions attempt to generate an insn body, rather than
4478 emitting the insn, but if the gen function already emits them, we
4479 make no attempt to turn them back into naked patterns. */
4481 /* Generate and return an insn body to add Y to X. */
4484 gen_add2_insn (rtx x
, rtx y
)
4486 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (x
));
4488 gcc_assert (insn_operand_matches (icode
, 0, x
));
4489 gcc_assert (insn_operand_matches (icode
, 1, x
));
4490 gcc_assert (insn_operand_matches (icode
, 2, y
));
4492 return GEN_FCN (icode
) (x
, x
, y
);
4495 /* Generate and return an insn body to add r1 and c,
4496 storing the result in r0. */
4499 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4501 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (r0
));
4503 if (icode
== CODE_FOR_nothing
4504 || !insn_operand_matches (icode
, 0, r0
)
4505 || !insn_operand_matches (icode
, 1, r1
)
4506 || !insn_operand_matches (icode
, 2, c
))
4509 return GEN_FCN (icode
) (r0
, r1
, c
);
4513 have_add2_insn (rtx x
, rtx y
)
4515 enum insn_code icode
;
4517 gcc_assert (GET_MODE (x
) != VOIDmode
);
4519 icode
= optab_handler (add_optab
, GET_MODE (x
));
4521 if (icode
== CODE_FOR_nothing
)
4524 if (!insn_operand_matches (icode
, 0, x
)
4525 || !insn_operand_matches (icode
, 1, x
)
4526 || !insn_operand_matches (icode
, 2, y
))
4532 /* Generate and return an insn body to add Y to X. */
4535 gen_addptr3_insn (rtx x
, rtx y
, rtx z
)
4537 enum insn_code icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
4539 gcc_assert (insn_operand_matches (icode
, 0, x
));
4540 gcc_assert (insn_operand_matches (icode
, 1, y
));
4541 gcc_assert (insn_operand_matches (icode
, 2, z
));
4543 return GEN_FCN (icode
) (x
, y
, z
);
4546 /* Return true if the target implements an addptr pattern and X, Y,
4547 and Z are valid for the pattern predicates. */
4550 have_addptr3_insn (rtx x
, rtx y
, rtx z
)
4552 enum insn_code icode
;
4554 gcc_assert (GET_MODE (x
) != VOIDmode
);
4556 icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
4558 if (icode
== CODE_FOR_nothing
)
4561 if (!insn_operand_matches (icode
, 0, x
)
4562 || !insn_operand_matches (icode
, 1, y
)
4563 || !insn_operand_matches (icode
, 2, z
))
4569 /* Generate and return an insn body to subtract Y from X. */
4572 gen_sub2_insn (rtx x
, rtx y
)
4574 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (x
));
4576 gcc_assert (insn_operand_matches (icode
, 0, x
));
4577 gcc_assert (insn_operand_matches (icode
, 1, x
));
4578 gcc_assert (insn_operand_matches (icode
, 2, y
));
4580 return GEN_FCN (icode
) (x
, x
, y
);
4583 /* Generate and return an insn body to subtract r1 and c,
4584 storing the result in r0. */
4587 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4589 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (r0
));
4591 if (icode
== CODE_FOR_nothing
4592 || !insn_operand_matches (icode
, 0, r0
)
4593 || !insn_operand_matches (icode
, 1, r1
)
4594 || !insn_operand_matches (icode
, 2, c
))
4597 return GEN_FCN (icode
) (r0
, r1
, c
);
4601 have_sub2_insn (rtx x
, rtx y
)
4603 enum insn_code icode
;
4605 gcc_assert (GET_MODE (x
) != VOIDmode
);
4607 icode
= optab_handler (sub_optab
, GET_MODE (x
));
4609 if (icode
== CODE_FOR_nothing
)
4612 if (!insn_operand_matches (icode
, 0, x
)
4613 || !insn_operand_matches (icode
, 1, x
)
4614 || !insn_operand_matches (icode
, 2, y
))
4620 /* Generate the body of an insn to extend Y (with mode MFROM)
4621 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4624 gen_extend_insn (rtx x
, rtx y
, machine_mode mto
,
4625 machine_mode mfrom
, int unsignedp
)
4627 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4628 return GEN_FCN (icode
) (x
, y
);
4631 /* Generate code to convert FROM to floating point
4632 and store in TO. FROM must be fixed point and not VOIDmode.
4633 UNSIGNEDP nonzero means regard FROM as unsigned.
4634 Normally this is done by correcting the final value
4635 if it is negative. */
4638 expand_float (rtx to
, rtx from
, int unsignedp
)
4640 enum insn_code icode
;
4642 machine_mode fmode
, imode
;
4643 bool can_do_signed
= false;
4645 /* Crash now, because we won't be able to decide which mode to use. */
4646 gcc_assert (GET_MODE (from
) != VOIDmode
);
4648 /* Look for an insn to do the conversion. Do it in the specified
4649 modes if possible; otherwise convert either input, output or both to
4650 wider mode. If the integer mode is wider than the mode of FROM,
4651 we can do the conversion signed even if the input is unsigned. */
4653 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4654 fmode
= GET_MODE_WIDER_MODE (fmode
))
4655 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4656 imode
= GET_MODE_WIDER_MODE (imode
))
4658 int doing_unsigned
= unsignedp
;
4660 if (fmode
!= GET_MODE (to
)
4661 && significand_size (fmode
) < GET_MODE_PRECISION (GET_MODE (from
)))
4664 icode
= can_float_p (fmode
, imode
, unsignedp
);
4665 if (icode
== CODE_FOR_nothing
&& unsignedp
)
4667 enum insn_code scode
= can_float_p (fmode
, imode
, 0);
4668 if (scode
!= CODE_FOR_nothing
)
4669 can_do_signed
= true;
4670 if (imode
!= GET_MODE (from
))
4671 icode
= scode
, doing_unsigned
= 0;
4674 if (icode
!= CODE_FOR_nothing
)
4676 if (imode
!= GET_MODE (from
))
4677 from
= convert_to_mode (imode
, from
, unsignedp
);
4679 if (fmode
!= GET_MODE (to
))
4680 target
= gen_reg_rtx (fmode
);
4682 emit_unop_insn (icode
, target
, from
,
4683 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4686 convert_move (to
, target
, 0);
4691 /* Unsigned integer, and no way to convert directly. Convert as signed,
4692 then unconditionally adjust the result. */
4693 if (unsignedp
&& can_do_signed
)
4695 rtx_code_label
*label
= gen_label_rtx ();
4697 REAL_VALUE_TYPE offset
;
4699 /* Look for a usable floating mode FMODE wider than the source and at
4700 least as wide as the target. Using FMODE will avoid rounding woes
4701 with unsigned values greater than the signed maximum value. */
4703 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4704 fmode
= GET_MODE_WIDER_MODE (fmode
))
4705 if (GET_MODE_PRECISION (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4706 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4709 if (fmode
== VOIDmode
)
4711 /* There is no such mode. Pretend the target is wide enough. */
4712 fmode
= GET_MODE (to
);
4714 /* Avoid double-rounding when TO is narrower than FROM. */
4715 if ((significand_size (fmode
) + 1)
4716 < GET_MODE_PRECISION (GET_MODE (from
)))
4719 rtx_code_label
*neglabel
= gen_label_rtx ();
4721 /* Don't use TARGET if it isn't a register, is a hard register,
4722 or is the wrong mode. */
4724 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4725 || GET_MODE (target
) != fmode
)
4726 target
= gen_reg_rtx (fmode
);
4728 imode
= GET_MODE (from
);
4729 do_pending_stack_adjust ();
4731 /* Test whether the sign bit is set. */
4732 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4735 /* The sign bit is not set. Convert as signed. */
4736 expand_float (target
, from
, 0);
4737 emit_jump_insn (targetm
.gen_jump (label
));
4740 /* The sign bit is set.
4741 Convert to a usable (positive signed) value by shifting right
4742 one bit, while remembering if a nonzero bit was shifted
4743 out; i.e., compute (from & 1) | (from >> 1). */
4745 emit_label (neglabel
);
4746 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4747 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4748 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, 1, NULL_RTX
, 1);
4749 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4751 expand_float (target
, temp
, 0);
4753 /* Multiply by 2 to undo the shift above. */
4754 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4755 target
, 0, OPTAB_LIB_WIDEN
);
4757 emit_move_insn (target
, temp
);
4759 do_pending_stack_adjust ();
4765 /* If we are about to do some arithmetic to correct for an
4766 unsigned operand, do it in a pseudo-register. */
4768 if (GET_MODE (to
) != fmode
4769 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4770 target
= gen_reg_rtx (fmode
);
4772 /* Convert as signed integer to floating. */
4773 expand_float (target
, from
, 0);
4775 /* If FROM is negative (and therefore TO is negative),
4776 correct its value by 2**bitwidth. */
4778 do_pending_stack_adjust ();
4779 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4783 real_2expN (&offset
, GET_MODE_PRECISION (GET_MODE (from
)), fmode
);
4784 temp
= expand_binop (fmode
, add_optab
, target
,
4785 const_double_from_real_value (offset
, fmode
),
4786 target
, 0, OPTAB_LIB_WIDEN
);
4788 emit_move_insn (target
, temp
);
4790 do_pending_stack_adjust ();
4795 /* No hardware instruction available; call a library routine. */
4800 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4802 if (GET_MODE_PRECISION (GET_MODE (from
)) < GET_MODE_PRECISION (SImode
))
4803 from
= convert_to_mode (SImode
, from
, unsignedp
);
4805 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
4806 gcc_assert (libfunc
);
4810 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4811 GET_MODE (to
), 1, from
,
4813 insns
= get_insns ();
4816 emit_libcall_block (insns
, target
, value
,
4817 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FLOAT
: FLOAT
,
4818 GET_MODE (to
), from
));
4823 /* Copy result to requested destination
4824 if we have been computing in a temp location. */
4828 if (GET_MODE (target
) == GET_MODE (to
))
4829 emit_move_insn (to
, target
);
4831 convert_move (to
, target
, 0);
4835 /* Generate code to convert FROM to fixed point and store in TO. FROM
4836 must be floating point. */
4839 expand_fix (rtx to
, rtx from
, int unsignedp
)
4841 enum insn_code icode
;
4843 machine_mode fmode
, imode
;
4844 bool must_trunc
= false;
4846 /* We first try to find a pair of modes, one real and one integer, at
4847 least as wide as FROM and TO, respectively, in which we can open-code
4848 this conversion. If the integer mode is wider than the mode of TO,
4849 we can do the conversion either signed or unsigned. */
4851 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4852 fmode
= GET_MODE_WIDER_MODE (fmode
))
4853 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4854 imode
= GET_MODE_WIDER_MODE (imode
))
4856 int doing_unsigned
= unsignedp
;
4858 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4859 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4860 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4862 if (icode
!= CODE_FOR_nothing
)
4864 rtx_insn
*last
= get_last_insn ();
4865 if (fmode
!= GET_MODE (from
))
4866 from
= convert_to_mode (fmode
, from
, 0);
4870 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4871 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
4875 if (imode
!= GET_MODE (to
))
4876 target
= gen_reg_rtx (imode
);
4878 if (maybe_emit_unop_insn (icode
, target
, from
,
4879 doing_unsigned
? UNSIGNED_FIX
: FIX
))
4882 convert_move (to
, target
, unsignedp
);
4885 delete_insns_since (last
);
4889 /* For an unsigned conversion, there is one more way to do it.
4890 If we have a signed conversion, we generate code that compares
4891 the real value to the largest representable positive number. If if
4892 is smaller, the conversion is done normally. Otherwise, subtract
4893 one plus the highest signed number, convert, and add it back.
4895 We only need to check all real modes, since we know we didn't find
4896 anything with a wider integer mode.
4898 This code used to extend FP value into mode wider than the destination.
4899 This is needed for decimal float modes which cannot accurately
4900 represent one plus the highest signed number of the same size, but
4901 not for binary modes. Consider, for instance conversion from SFmode
4904 The hot path through the code is dealing with inputs smaller than 2^63
4905 and doing just the conversion, so there is no bits to lose.
4907 In the other path we know the value is positive in the range 2^63..2^64-1
4908 inclusive. (as for other input overflow happens and result is undefined)
4909 So we know that the most important bit set in mantissa corresponds to
4910 2^63. The subtraction of 2^63 should not generate any rounding as it
4911 simply clears out that bit. The rest is trivial. */
4913 if (unsignedp
&& GET_MODE_PRECISION (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4914 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4915 fmode
= GET_MODE_WIDER_MODE (fmode
))
4916 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0, &must_trunc
)
4917 && (!DECIMAL_FLOAT_MODE_P (fmode
)
4918 || GET_MODE_BITSIZE (fmode
) > GET_MODE_PRECISION (GET_MODE (to
))))
4921 REAL_VALUE_TYPE offset
;
4923 rtx_code_label
*lab1
, *lab2
;
4926 bitsize
= GET_MODE_PRECISION (GET_MODE (to
));
4927 real_2expN (&offset
, bitsize
- 1, fmode
);
4928 limit
= const_double_from_real_value (offset
, fmode
);
4929 lab1
= gen_label_rtx ();
4930 lab2
= gen_label_rtx ();
4932 if (fmode
!= GET_MODE (from
))
4933 from
= convert_to_mode (fmode
, from
, 0);
4935 /* See if we need to do the subtraction. */
4936 do_pending_stack_adjust ();
4937 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4940 /* If not, do the signed "fix" and branch around fixup code. */
4941 expand_fix (to
, from
, 0);
4942 emit_jump_insn (targetm
.gen_jump (lab2
));
4945 /* Otherwise, subtract 2**(N-1), convert to signed number,
4946 then add 2**(N-1). Do the addition using XOR since this
4947 will often generate better code. */
4949 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4950 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4951 expand_fix (to
, target
, 0);
4952 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4954 (HOST_WIDE_INT_1
<< (bitsize
- 1),
4956 to
, 1, OPTAB_LIB_WIDEN
);
4959 emit_move_insn (to
, target
);
4963 if (optab_handler (mov_optab
, GET_MODE (to
)) != CODE_FOR_nothing
)
4965 /* Make a place for a REG_NOTE and add it. */
4966 insn
= emit_move_insn (to
, to
);
4967 set_dst_reg_note (insn
, REG_EQUAL
,
4968 gen_rtx_fmt_e (UNSIGNED_FIX
, GET_MODE (to
),
4976 /* We can't do it with an insn, so use a library call. But first ensure
4977 that the mode of TO is at least as wide as SImode, since those are the
4978 only library calls we know about. */
4980 if (GET_MODE_PRECISION (GET_MODE (to
)) < GET_MODE_PRECISION (SImode
))
4982 target
= gen_reg_rtx (SImode
);
4984 expand_fix (target
, from
, unsignedp
);
4992 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
4993 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
4994 gcc_assert (libfunc
);
4998 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4999 GET_MODE (to
), 1, from
,
5001 insns
= get_insns ();
5004 emit_libcall_block (insns
, target
, value
,
5005 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
5006 GET_MODE (to
), from
));
5011 if (GET_MODE (to
) == GET_MODE (target
))
5012 emit_move_insn (to
, target
);
5014 convert_move (to
, target
, 0);
5019 /* Promote integer arguments for a libcall if necessary.
5020 emit_library_call_value cannot do the promotion because it does not
5021 know if it should do a signed or unsigned promotion. This is because
5022 there are no tree types defined for libcalls. */
5025 prepare_libcall_arg (rtx arg
, int uintp
)
5027 machine_mode mode
= GET_MODE (arg
);
5028 machine_mode arg_mode
;
5029 if (SCALAR_INT_MODE_P (mode
))
5031 /* If we need to promote the integer function argument we need to do
5032 it here instead of inside emit_library_call_value because in
5033 emit_library_call_value we don't know if we should do a signed or
5034 unsigned promotion. */
5037 arg_mode
= promote_function_mode (NULL_TREE
, mode
,
5038 &unsigned_p
, NULL_TREE
, 0);
5039 if (arg_mode
!= mode
)
5040 return convert_to_mode (arg_mode
, arg
, uintp
);
5045 /* Generate code to convert FROM or TO a fixed-point.
5046 If UINTP is true, either TO or FROM is an unsigned integer.
5047 If SATP is true, we need to saturate the result. */
5050 expand_fixed_convert (rtx to
, rtx from
, int uintp
, int satp
)
5052 machine_mode to_mode
= GET_MODE (to
);
5053 machine_mode from_mode
= GET_MODE (from
);
5055 enum rtx_code this_code
;
5056 enum insn_code code
;
5061 if (to_mode
== from_mode
)
5063 emit_move_insn (to
, from
);
5069 tab
= satp
? satfractuns_optab
: fractuns_optab
;
5070 this_code
= satp
? UNSIGNED_SAT_FRACT
: UNSIGNED_FRACT_CONVERT
;
5074 tab
= satp
? satfract_optab
: fract_optab
;
5075 this_code
= satp
? SAT_FRACT
: FRACT_CONVERT
;
5077 code
= convert_optab_handler (tab
, to_mode
, from_mode
);
5078 if (code
!= CODE_FOR_nothing
)
5080 emit_unop_insn (code
, to
, from
, this_code
);
5084 libfunc
= convert_optab_libfunc (tab
, to_mode
, from_mode
);
5085 gcc_assert (libfunc
);
5087 from
= prepare_libcall_arg (from
, uintp
);
5088 from_mode
= GET_MODE (from
);
5091 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, to_mode
,
5092 1, from
, from_mode
);
5093 insns
= get_insns ();
5096 emit_libcall_block (insns
, to
, value
,
5097 gen_rtx_fmt_e (optab_to_code (tab
), to_mode
, from
));
5100 /* Generate code to convert FROM to fixed point and store in TO. FROM
5101 must be floating point, TO must be signed. Use the conversion optab
5102 TAB to do the conversion. */
5105 expand_sfix_optab (rtx to
, rtx from
, convert_optab tab
)
5107 enum insn_code icode
;
5109 machine_mode fmode
, imode
;
5111 /* We first try to find a pair of modes, one real and one integer, at
5112 least as wide as FROM and TO, respectively, in which we can open-code
5113 this conversion. If the integer mode is wider than the mode of TO,
5114 we can do the conversion either signed or unsigned. */
5116 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5117 fmode
= GET_MODE_WIDER_MODE (fmode
))
5118 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
5119 imode
= GET_MODE_WIDER_MODE (imode
))
5121 icode
= convert_optab_handler (tab
, imode
, fmode
);
5122 if (icode
!= CODE_FOR_nothing
)
5124 rtx_insn
*last
= get_last_insn ();
5125 if (fmode
!= GET_MODE (from
))
5126 from
= convert_to_mode (fmode
, from
, 0);
5128 if (imode
!= GET_MODE (to
))
5129 target
= gen_reg_rtx (imode
);
5131 if (!maybe_emit_unop_insn (icode
, target
, from
, UNKNOWN
))
5133 delete_insns_since (last
);
5137 convert_move (to
, target
, 0);
5145 /* Report whether we have an instruction to perform the operation
5146 specified by CODE on operands of mode MODE. */
5148 have_insn_for (enum rtx_code code
, machine_mode mode
)
5150 return (code_to_optab (code
)
5151 && (optab_handler (code_to_optab (code
), mode
)
5152 != CODE_FOR_nothing
));
5155 /* Print information about the current contents of the optabs on
5159 debug_optab_libfuncs (void)
5163 /* Dump the arithmetic optabs. */
5164 for (i
= FIRST_NORM_OPTAB
; i
<= LAST_NORMLIB_OPTAB
; ++i
)
5165 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5167 rtx l
= optab_libfunc ((optab
) i
, (machine_mode
) j
);
5170 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
5171 fprintf (stderr
, "%s\t%s:\t%s\n",
5172 GET_RTX_NAME (optab_to_code ((optab
) i
)),
5178 /* Dump the conversion optabs. */
5179 for (i
= FIRST_CONV_OPTAB
; i
<= LAST_CONVLIB_OPTAB
; ++i
)
5180 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5181 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
5183 rtx l
= convert_optab_libfunc ((optab
) i
, (machine_mode
) j
,
5187 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
5188 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
5189 GET_RTX_NAME (optab_to_code ((optab
) i
)),
5197 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5198 CODE. Return 0 on failure. */
5201 gen_cond_trap (enum rtx_code code
, rtx op1
, rtx op2
, rtx tcode
)
5203 machine_mode mode
= GET_MODE (op1
);
5204 enum insn_code icode
;
5208 if (mode
== VOIDmode
)
5211 icode
= optab_handler (ctrap_optab
, mode
);
5212 if (icode
== CODE_FOR_nothing
)
5215 /* Some targets only accept a zero trap code. */
5216 if (!insn_operand_matches (icode
, 3, tcode
))
5219 do_pending_stack_adjust ();
5221 prepare_cmp_insn (op1
, op2
, code
, NULL_RTX
, false, OPTAB_DIRECT
,
5226 insn
= GEN_FCN (icode
) (trap_rtx
, XEXP (trap_rtx
, 0), XEXP (trap_rtx
, 1),
5229 /* If that failed, then give up. */
5237 insn
= get_insns ();
5242 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5243 or unsigned operation code. */
5246 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5258 code
= unsignedp
? LTU
: LT
;
5261 code
= unsignedp
? LEU
: LE
;
5264 code
= unsignedp
? GTU
: GT
;
5267 code
= unsignedp
? GEU
: GE
;
5270 case UNORDERED_EXPR
:
5309 /* Return a comparison rtx of mode CMP_MODE for COND. Use UNSIGNEDP to
5310 select signed or unsigned operators. OPNO holds the index of the
5311 first comparison operand for insn ICODE. Do not generate the
5312 compare instruction itself. */
5315 vector_compare_rtx (machine_mode cmp_mode
, enum tree_code tcode
,
5316 tree t_op0
, tree t_op1
, bool unsignedp
,
5317 enum insn_code icode
, unsigned int opno
)
5319 struct expand_operand ops
[2];
5320 rtx rtx_op0
, rtx_op1
;
5321 machine_mode m0
, m1
;
5322 enum rtx_code rcode
= get_rtx_code (tcode
, unsignedp
);
5324 gcc_assert (TREE_CODE_CLASS (tcode
) == tcc_comparison
);
5326 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
5327 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
5328 cases, use the original mode. */
5329 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)),
5331 m0
= GET_MODE (rtx_op0
);
5333 m0
= TYPE_MODE (TREE_TYPE (t_op0
));
5335 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)),
5337 m1
= GET_MODE (rtx_op1
);
5339 m1
= TYPE_MODE (TREE_TYPE (t_op1
));
5341 create_input_operand (&ops
[0], rtx_op0
, m0
);
5342 create_input_operand (&ops
[1], rtx_op1
, m1
);
5343 if (!maybe_legitimize_operands (icode
, opno
, 2, ops
))
5345 return gen_rtx_fmt_ee (rcode
, cmp_mode
, ops
[0].value
, ops
[1].value
);
5348 /* Checks if vec_perm mask SEL is a constant equivalent to a shift of the first
5349 vec_perm operand, assuming the second operand is a constant vector of zeroes.
5350 Return the shift distance in bits if so, or NULL_RTX if the vec_perm is not a
5353 shift_amt_for_vec_perm_mask (rtx sel
)
5355 unsigned int i
, first
, nelt
= GET_MODE_NUNITS (GET_MODE (sel
));
5356 unsigned int bitsize
= GET_MODE_UNIT_BITSIZE (GET_MODE (sel
));
5358 if (GET_CODE (sel
) != CONST_VECTOR
)
5361 first
= INTVAL (CONST_VECTOR_ELT (sel
, 0));
5364 for (i
= 1; i
< nelt
; i
++)
5366 int idx
= INTVAL (CONST_VECTOR_ELT (sel
, i
));
5367 unsigned int expected
= i
+ first
;
5368 /* Indices into the second vector are all equivalent. */
5369 if (idx
< 0 || (MIN (nelt
, (unsigned) idx
) != MIN (nelt
, expected
)))
5373 return GEN_INT (first
* bitsize
);
5376 /* A subroutine of expand_vec_perm for expanding one vec_perm insn. */
5379 expand_vec_perm_1 (enum insn_code icode
, rtx target
,
5380 rtx v0
, rtx v1
, rtx sel
)
5382 machine_mode tmode
= GET_MODE (target
);
5383 machine_mode smode
= GET_MODE (sel
);
5384 struct expand_operand ops
[4];
5386 create_output_operand (&ops
[0], target
, tmode
);
5387 create_input_operand (&ops
[3], sel
, smode
);
5389 /* Make an effort to preserve v0 == v1. The target expander is able to
5390 rely on this to determine if we're permuting a single input operand. */
5391 if (rtx_equal_p (v0
, v1
))
5393 if (!insn_operand_matches (icode
, 1, v0
))
5394 v0
= force_reg (tmode
, v0
);
5395 gcc_checking_assert (insn_operand_matches (icode
, 1, v0
));
5396 gcc_checking_assert (insn_operand_matches (icode
, 2, v0
));
5398 create_fixed_operand (&ops
[1], v0
);
5399 create_fixed_operand (&ops
[2], v0
);
5403 create_input_operand (&ops
[1], v0
, tmode
);
5404 create_input_operand (&ops
[2], v1
, tmode
);
5407 if (maybe_expand_insn (icode
, 4, ops
))
5408 return ops
[0].value
;
5412 /* Generate instructions for vec_perm optab given its mode
5413 and three operands. */
5416 expand_vec_perm (machine_mode mode
, rtx v0
, rtx v1
, rtx sel
, rtx target
)
5418 enum insn_code icode
;
5419 machine_mode qimode
;
5420 unsigned int i
, w
, e
, u
;
5421 rtx tmp
, sel_qi
= NULL
;
5424 if (!target
|| GET_MODE (target
) != mode
)
5425 target
= gen_reg_rtx (mode
);
5427 w
= GET_MODE_SIZE (mode
);
5428 e
= GET_MODE_NUNITS (mode
);
5429 u
= GET_MODE_UNIT_SIZE (mode
);
5431 /* Set QIMODE to a different vector mode with byte elements.
5432 If no such mode, or if MODE already has byte elements, use VOIDmode. */
5434 if (GET_MODE_INNER (mode
) != QImode
)
5436 qimode
= mode_for_vector (QImode
, w
);
5437 if (!VECTOR_MODE_P (qimode
))
5441 /* If the input is a constant, expand it specially. */
5442 gcc_assert (GET_MODE_CLASS (GET_MODE (sel
)) == MODE_VECTOR_INT
);
5443 if (GET_CODE (sel
) == CONST_VECTOR
)
5445 /* See if this can be handled with a vec_shr. We only do this if the
5446 second vector is all zeroes. */
5447 enum insn_code shift_code
= optab_handler (vec_shr_optab
, mode
);
5448 enum insn_code shift_code_qi
= ((qimode
!= VOIDmode
&& qimode
!= mode
)
5449 ? optab_handler (vec_shr_optab
, qimode
)
5450 : CODE_FOR_nothing
);
5451 rtx shift_amt
= NULL_RTX
;
5452 if (v1
== CONST0_RTX (GET_MODE (v1
))
5453 && (shift_code
!= CODE_FOR_nothing
5454 || shift_code_qi
!= CODE_FOR_nothing
))
5456 shift_amt
= shift_amt_for_vec_perm_mask (sel
);
5459 struct expand_operand ops
[3];
5460 if (shift_code
!= CODE_FOR_nothing
)
5462 create_output_operand (&ops
[0], target
, mode
);
5463 create_input_operand (&ops
[1], v0
, mode
);
5464 create_convert_operand_from_type (&ops
[2], shift_amt
,
5466 if (maybe_expand_insn (shift_code
, 3, ops
))
5467 return ops
[0].value
;
5469 if (shift_code_qi
!= CODE_FOR_nothing
)
5471 tmp
= gen_reg_rtx (qimode
);
5472 create_output_operand (&ops
[0], tmp
, qimode
);
5473 create_input_operand (&ops
[1], gen_lowpart (qimode
, v0
),
5475 create_convert_operand_from_type (&ops
[2], shift_amt
,
5477 if (maybe_expand_insn (shift_code_qi
, 3, ops
))
5478 return gen_lowpart (mode
, ops
[0].value
);
5483 icode
= direct_optab_handler (vec_perm_const_optab
, mode
);
5484 if (icode
!= CODE_FOR_nothing
)
5486 tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel
);
5491 /* Fall back to a constant byte-based permutation. */
5492 if (qimode
!= VOIDmode
)
5494 vec
= rtvec_alloc (w
);
5495 for (i
= 0; i
< e
; ++i
)
5497 unsigned int j
, this_e
;
5499 this_e
= INTVAL (CONST_VECTOR_ELT (sel
, i
));
5500 this_e
&= 2 * e
- 1;
5503 for (j
= 0; j
< u
; ++j
)
5504 RTVEC_ELT (vec
, i
* u
+ j
) = GEN_INT (this_e
+ j
);
5506 sel_qi
= gen_rtx_CONST_VECTOR (qimode
, vec
);
5508 icode
= direct_optab_handler (vec_perm_const_optab
, qimode
);
5509 if (icode
!= CODE_FOR_nothing
)
5511 tmp
= mode
!= qimode
? gen_reg_rtx (qimode
) : target
;
5512 tmp
= expand_vec_perm_1 (icode
, tmp
, gen_lowpart (qimode
, v0
),
5513 gen_lowpart (qimode
, v1
), sel_qi
);
5515 return gen_lowpart (mode
, tmp
);
5520 /* Otherwise expand as a fully variable permuation. */
5521 icode
= direct_optab_handler (vec_perm_optab
, mode
);
5522 if (icode
!= CODE_FOR_nothing
)
5524 tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel
);
5529 /* As a special case to aid several targets, lower the element-based
5530 permutation to a byte-based permutation and try again. */
5531 if (qimode
== VOIDmode
)
5533 icode
= direct_optab_handler (vec_perm_optab
, qimode
);
5534 if (icode
== CODE_FOR_nothing
)
5539 /* Multiply each element by its byte size. */
5540 machine_mode selmode
= GET_MODE (sel
);
5542 sel
= expand_simple_binop (selmode
, PLUS
, sel
, sel
,
5543 NULL
, 0, OPTAB_DIRECT
);
5545 sel
= expand_simple_binop (selmode
, ASHIFT
, sel
,
5546 GEN_INT (exact_log2 (u
)),
5547 NULL
, 0, OPTAB_DIRECT
);
5548 gcc_assert (sel
!= NULL
);
5550 /* Broadcast the low byte each element into each of its bytes. */
5551 vec
= rtvec_alloc (w
);
5552 for (i
= 0; i
< w
; ++i
)
5554 int this_e
= i
/ u
* u
;
5555 if (BYTES_BIG_ENDIAN
)
5557 RTVEC_ELT (vec
, i
) = GEN_INT (this_e
);
5559 tmp
= gen_rtx_CONST_VECTOR (qimode
, vec
);
5560 sel
= gen_lowpart (qimode
, sel
);
5561 sel
= expand_vec_perm (qimode
, sel
, sel
, tmp
, NULL
);
5562 gcc_assert (sel
!= NULL
);
5564 /* Add the byte offset to each byte element. */
5565 /* Note that the definition of the indicies here is memory ordering,
5566 so there should be no difference between big and little endian. */
5567 vec
= rtvec_alloc (w
);
5568 for (i
= 0; i
< w
; ++i
)
5569 RTVEC_ELT (vec
, i
) = GEN_INT (i
% u
);
5570 tmp
= gen_rtx_CONST_VECTOR (qimode
, vec
);
5571 sel_qi
= expand_simple_binop (qimode
, PLUS
, sel
, tmp
,
5572 sel
, 0, OPTAB_DIRECT
);
5573 gcc_assert (sel_qi
!= NULL
);
5576 tmp
= mode
!= qimode
? gen_reg_rtx (qimode
) : target
;
5577 tmp
= expand_vec_perm_1 (icode
, tmp
, gen_lowpart (qimode
, v0
),
5578 gen_lowpart (qimode
, v1
), sel_qi
);
5580 tmp
= gen_lowpart (mode
, tmp
);
5584 /* Generate insns for a VEC_COND_EXPR with mask, given its TYPE and its
5588 expand_vec_cond_mask_expr (tree vec_cond_type
, tree op0
, tree op1
, tree op2
,
5591 struct expand_operand ops
[4];
5592 machine_mode mode
= TYPE_MODE (vec_cond_type
);
5593 machine_mode mask_mode
= TYPE_MODE (TREE_TYPE (op0
));
5594 enum insn_code icode
= get_vcond_mask_icode (mode
, mask_mode
);
5595 rtx mask
, rtx_op1
, rtx_op2
;
5597 if (icode
== CODE_FOR_nothing
)
5600 mask
= expand_normal (op0
);
5601 rtx_op1
= expand_normal (op1
);
5602 rtx_op2
= expand_normal (op2
);
5604 mask
= force_reg (mask_mode
, mask
);
5605 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
5607 create_output_operand (&ops
[0], target
, mode
);
5608 create_input_operand (&ops
[1], rtx_op1
, mode
);
5609 create_input_operand (&ops
[2], rtx_op2
, mode
);
5610 create_input_operand (&ops
[3], mask
, mask_mode
);
5611 expand_insn (icode
, 4, ops
);
5613 return ops
[0].value
;
5616 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
5620 expand_vec_cond_expr (tree vec_cond_type
, tree op0
, tree op1
, tree op2
,
5623 struct expand_operand ops
[6];
5624 enum insn_code icode
;
5625 rtx comparison
, rtx_op1
, rtx_op2
;
5626 machine_mode mode
= TYPE_MODE (vec_cond_type
);
5627 machine_mode cmp_op_mode
;
5630 enum tree_code tcode
;
5632 if (COMPARISON_CLASS_P (op0
))
5634 op0a
= TREE_OPERAND (op0
, 0);
5635 op0b
= TREE_OPERAND (op0
, 1);
5636 tcode
= TREE_CODE (op0
);
5640 gcc_assert (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (op0
)));
5641 if (get_vcond_mask_icode (mode
, TYPE_MODE (TREE_TYPE (op0
)))
5642 != CODE_FOR_nothing
)
5643 return expand_vec_cond_mask_expr (vec_cond_type
, op0
, op1
,
5648 gcc_assert (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (op0
)))
5649 == MODE_VECTOR_INT
);
5651 op0b
= build_zero_cst (TREE_TYPE (op0
));
5655 cmp_op_mode
= TYPE_MODE (TREE_TYPE (op0a
));
5656 unsignedp
= TYPE_UNSIGNED (TREE_TYPE (op0a
));
5659 gcc_assert (GET_MODE_SIZE (mode
) == GET_MODE_SIZE (cmp_op_mode
)
5660 && GET_MODE_NUNITS (mode
) == GET_MODE_NUNITS (cmp_op_mode
));
5662 icode
= get_vcond_icode (mode
, cmp_op_mode
, unsignedp
);
5663 if (icode
== CODE_FOR_nothing
)
5665 if (tcode
== EQ_EXPR
|| tcode
== NE_EXPR
)
5666 icode
= get_vcond_eq_icode (mode
, cmp_op_mode
);
5667 if (icode
== CODE_FOR_nothing
)
5671 comparison
= vector_compare_rtx (VOIDmode
, tcode
, op0a
, op0b
, unsignedp
,
5673 rtx_op1
= expand_normal (op1
);
5674 rtx_op2
= expand_normal (op2
);
5676 create_output_operand (&ops
[0], target
, mode
);
5677 create_input_operand (&ops
[1], rtx_op1
, mode
);
5678 create_input_operand (&ops
[2], rtx_op2
, mode
);
5679 create_fixed_operand (&ops
[3], comparison
);
5680 create_fixed_operand (&ops
[4], XEXP (comparison
, 0));
5681 create_fixed_operand (&ops
[5], XEXP (comparison
, 1));
5682 expand_insn (icode
, 6, ops
);
5683 return ops
[0].value
;
5686 /* Generate insns for a vector comparison into a mask. */
5689 expand_vec_cmp_expr (tree type
, tree exp
, rtx target
)
5691 struct expand_operand ops
[4];
5692 enum insn_code icode
;
5694 machine_mode mask_mode
= TYPE_MODE (type
);
5698 enum tree_code tcode
;
5700 op0a
= TREE_OPERAND (exp
, 0);
5701 op0b
= TREE_OPERAND (exp
, 1);
5702 tcode
= TREE_CODE (exp
);
5704 unsignedp
= TYPE_UNSIGNED (TREE_TYPE (op0a
));
5705 vmode
= TYPE_MODE (TREE_TYPE (op0a
));
5707 icode
= get_vec_cmp_icode (vmode
, mask_mode
, unsignedp
);
5708 if (icode
== CODE_FOR_nothing
)
5710 if (tcode
== EQ_EXPR
|| tcode
== NE_EXPR
)
5711 icode
= get_vec_cmp_eq_icode (vmode
, mask_mode
);
5712 if (icode
== CODE_FOR_nothing
)
5716 comparison
= vector_compare_rtx (mask_mode
, tcode
, op0a
, op0b
,
5717 unsignedp
, icode
, 2);
5718 create_output_operand (&ops
[0], target
, mask_mode
);
5719 create_fixed_operand (&ops
[1], comparison
);
5720 create_fixed_operand (&ops
[2], XEXP (comparison
, 0));
5721 create_fixed_operand (&ops
[3], XEXP (comparison
, 1));
5722 expand_insn (icode
, 4, ops
);
5723 return ops
[0].value
;
5726 /* Expand a highpart multiply. */
5729 expand_mult_highpart (machine_mode mode
, rtx op0
, rtx op1
,
5730 rtx target
, bool uns_p
)
5732 struct expand_operand eops
[3];
5733 enum insn_code icode
;
5734 int method
, i
, nunits
;
5740 method
= can_mult_highpart_p (mode
, uns_p
);
5746 tab1
= uns_p
? umul_highpart_optab
: smul_highpart_optab
;
5747 return expand_binop (mode
, tab1
, op0
, op1
, target
, uns_p
,
5750 tab1
= uns_p
? vec_widen_umult_even_optab
: vec_widen_smult_even_optab
;
5751 tab2
= uns_p
? vec_widen_umult_odd_optab
: vec_widen_smult_odd_optab
;
5754 tab1
= uns_p
? vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
5755 tab2
= uns_p
? vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
5756 if (BYTES_BIG_ENDIAN
)
5757 std::swap (tab1
, tab2
);
5763 icode
= optab_handler (tab1
, mode
);
5764 nunits
= GET_MODE_NUNITS (mode
);
5765 wmode
= insn_data
[icode
].operand
[0].mode
;
5766 gcc_checking_assert (2 * GET_MODE_NUNITS (wmode
) == nunits
);
5767 gcc_checking_assert (GET_MODE_SIZE (wmode
) == GET_MODE_SIZE (mode
));
5769 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
5770 create_input_operand (&eops
[1], op0
, mode
);
5771 create_input_operand (&eops
[2], op1
, mode
);
5772 expand_insn (icode
, 3, eops
);
5773 m1
= gen_lowpart (mode
, eops
[0].value
);
5775 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
5776 create_input_operand (&eops
[1], op0
, mode
);
5777 create_input_operand (&eops
[2], op1
, mode
);
5778 expand_insn (optab_handler (tab2
, mode
), 3, eops
);
5779 m2
= gen_lowpart (mode
, eops
[0].value
);
5781 v
= rtvec_alloc (nunits
);
5784 for (i
= 0; i
< nunits
; ++i
)
5785 RTVEC_ELT (v
, i
) = GEN_INT (!BYTES_BIG_ENDIAN
+ (i
& ~1)
5786 + ((i
& 1) ? nunits
: 0));
5790 for (i
= 0; i
< nunits
; ++i
)
5791 RTVEC_ELT (v
, i
) = GEN_INT (2 * i
+ (BYTES_BIG_ENDIAN
? 0 : 1));
5793 perm
= gen_rtx_CONST_VECTOR (mode
, v
);
5795 return expand_vec_perm (mode
, m1
, m2
, perm
, target
);
5798 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
5802 find_cc_set (rtx x
, const_rtx pat
, void *data
)
5804 if (REG_P (x
) && GET_MODE_CLASS (GET_MODE (x
)) == MODE_CC
5805 && GET_CODE (pat
) == SET
)
5807 rtx
*p_cc_reg
= (rtx
*) data
;
5808 gcc_assert (!*p_cc_reg
);
5813 /* This is a helper function for the other atomic operations. This function
5814 emits a loop that contains SEQ that iterates until a compare-and-swap
5815 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5816 a set of instructions that takes a value from OLD_REG as an input and
5817 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5818 set to the current contents of MEM. After SEQ, a compare-and-swap will
5819 attempt to update MEM with NEW_REG. The function returns true when the
5820 loop was generated successfully. */
5823 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
5825 machine_mode mode
= GET_MODE (mem
);
5826 rtx_code_label
*label
;
5827 rtx cmp_reg
, success
, oldval
;
5829 /* The loop we want to generate looks like
5835 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
5839 Note that we only do the plain load from memory once. Subsequent
5840 iterations use the value loaded by the compare-and-swap pattern. */
5842 label
= gen_label_rtx ();
5843 cmp_reg
= gen_reg_rtx (mode
);
5845 emit_move_insn (cmp_reg
, mem
);
5847 emit_move_insn (old_reg
, cmp_reg
);
5853 if (!expand_atomic_compare_and_swap (&success
, &oldval
, mem
, old_reg
,
5854 new_reg
, false, MEMMODEL_SYNC_SEQ_CST
,
5858 if (oldval
!= cmp_reg
)
5859 emit_move_insn (cmp_reg
, oldval
);
5861 /* Mark this jump predicted not taken. */
5862 emit_cmp_and_jump_insns (success
, const0_rtx
, EQ
, const0_rtx
,
5863 GET_MODE (success
), 1, label
,
5864 profile_probability::guessed_never ());
5869 /* This function tries to emit an atomic_exchange intruction. VAL is written
5870 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
5871 using TARGET if possible. */
5874 maybe_emit_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
5876 machine_mode mode
= GET_MODE (mem
);
5877 enum insn_code icode
;
5879 /* If the target supports the exchange directly, great. */
5880 icode
= direct_optab_handler (atomic_exchange_optab
, mode
);
5881 if (icode
!= CODE_FOR_nothing
)
5883 struct expand_operand ops
[4];
5885 create_output_operand (&ops
[0], target
, mode
);
5886 create_fixed_operand (&ops
[1], mem
);
5887 create_input_operand (&ops
[2], val
, mode
);
5888 create_integer_operand (&ops
[3], model
);
5889 if (maybe_expand_insn (icode
, 4, ops
))
5890 return ops
[0].value
;
5896 /* This function tries to implement an atomic exchange operation using
5897 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
5898 The previous contents of *MEM are returned, using TARGET if possible.
5899 Since this instructionn is an acquire barrier only, stronger memory
5900 models may require additional barriers to be emitted. */
5903 maybe_emit_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
,
5904 enum memmodel model
)
5906 machine_mode mode
= GET_MODE (mem
);
5907 enum insn_code icode
;
5908 rtx_insn
*last_insn
= get_last_insn ();
5910 icode
= optab_handler (sync_lock_test_and_set_optab
, mode
);
5912 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
5913 exists, and the memory model is stronger than acquire, add a release
5914 barrier before the instruction. */
5916 if (is_mm_seq_cst (model
) || is_mm_release (model
) || is_mm_acq_rel (model
))
5917 expand_mem_thread_fence (model
);
5919 if (icode
!= CODE_FOR_nothing
)
5921 struct expand_operand ops
[3];
5922 create_output_operand (&ops
[0], target
, mode
);
5923 create_fixed_operand (&ops
[1], mem
);
5924 create_input_operand (&ops
[2], val
, mode
);
5925 if (maybe_expand_insn (icode
, 3, ops
))
5926 return ops
[0].value
;
5929 /* If an external test-and-set libcall is provided, use that instead of
5930 any external compare-and-swap that we might get from the compare-and-
5931 swap-loop expansion later. */
5932 if (!can_compare_and_swap_p (mode
, false))
5934 rtx libfunc
= optab_libfunc (sync_lock_test_and_set_optab
, mode
);
5935 if (libfunc
!= NULL
)
5939 addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
5940 return emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
5941 mode
, 2, addr
, ptr_mode
,
5946 /* If the test_and_set can't be emitted, eliminate any barrier that might
5947 have been emitted. */
5948 delete_insns_since (last_insn
);
5952 /* This function tries to implement an atomic exchange operation using a
5953 compare_and_swap loop. VAL is written to *MEM. The previous contents of
5954 *MEM are returned, using TARGET if possible. No memory model is required
5955 since a compare_and_swap loop is seq-cst. */
5958 maybe_emit_compare_and_swap_exchange_loop (rtx target
, rtx mem
, rtx val
)
5960 machine_mode mode
= GET_MODE (mem
);
5962 if (can_compare_and_swap_p (mode
, true))
5964 if (!target
|| !register_operand (target
, mode
))
5965 target
= gen_reg_rtx (mode
);
5966 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
5973 /* This function tries to implement an atomic test-and-set operation
5974 using the atomic_test_and_set instruction pattern. A boolean value
5975 is returned from the operation, using TARGET if possible. */
5978 maybe_emit_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
5980 machine_mode pat_bool_mode
;
5981 struct expand_operand ops
[3];
5983 if (!targetm
.have_atomic_test_and_set ())
5986 /* While we always get QImode from __atomic_test_and_set, we get
5987 other memory modes from __sync_lock_test_and_set. Note that we
5988 use no endian adjustment here. This matches the 4.6 behavior
5989 in the Sparc backend. */
5990 enum insn_code icode
= targetm
.code_for_atomic_test_and_set
;
5991 gcc_checking_assert (insn_data
[icode
].operand
[1].mode
== QImode
);
5992 if (GET_MODE (mem
) != QImode
)
5993 mem
= adjust_address_nv (mem
, QImode
, 0);
5995 pat_bool_mode
= insn_data
[icode
].operand
[0].mode
;
5996 create_output_operand (&ops
[0], target
, pat_bool_mode
);
5997 create_fixed_operand (&ops
[1], mem
);
5998 create_integer_operand (&ops
[2], model
);
6000 if (maybe_expand_insn (icode
, 3, ops
))
6001 return ops
[0].value
;
6005 /* This function expands the legacy _sync_lock test_and_set operation which is
6006 generally an atomic exchange. Some limited targets only allow the
6007 constant 1 to be stored. This is an ACQUIRE operation.
6009 TARGET is an optional place to stick the return value.
6010 MEM is where VAL is stored. */
6013 expand_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
)
6017 /* Try an atomic_exchange first. */
6018 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, MEMMODEL_SYNC_ACQUIRE
);
6022 ret
= maybe_emit_sync_lock_test_and_set (target
, mem
, val
,
6023 MEMMODEL_SYNC_ACQUIRE
);
6027 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
6031 /* If there are no other options, try atomic_test_and_set if the value
6032 being stored is 1. */
6033 if (val
== const1_rtx
)
6034 ret
= maybe_emit_atomic_test_and_set (target
, mem
, MEMMODEL_SYNC_ACQUIRE
);
6039 /* This function expands the atomic test_and_set operation:
6040 atomically store a boolean TRUE into MEM and return the previous value.
6042 MEMMODEL is the memory model variant to use.
6043 TARGET is an optional place to stick the return value. */
6046 expand_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
6048 machine_mode mode
= GET_MODE (mem
);
6049 rtx ret
, trueval
, subtarget
;
6051 ret
= maybe_emit_atomic_test_and_set (target
, mem
, model
);
6055 /* Be binary compatible with non-default settings of trueval, and different
6056 cpu revisions. E.g. one revision may have atomic-test-and-set, but
6057 another only has atomic-exchange. */
6058 if (targetm
.atomic_test_and_set_trueval
== 1)
6060 trueval
= const1_rtx
;
6061 subtarget
= target
? target
: gen_reg_rtx (mode
);
6065 trueval
= gen_int_mode (targetm
.atomic_test_and_set_trueval
, mode
);
6066 subtarget
= gen_reg_rtx (mode
);
6069 /* Try the atomic-exchange optab... */
6070 ret
= maybe_emit_atomic_exchange (subtarget
, mem
, trueval
, model
);
6072 /* ... then an atomic-compare-and-swap loop ... */
6074 ret
= maybe_emit_compare_and_swap_exchange_loop (subtarget
, mem
, trueval
);
6076 /* ... before trying the vaguely defined legacy lock_test_and_set. */
6078 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, trueval
, model
);
6080 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
6081 things with the value 1. Thus we try again without trueval. */
6082 if (!ret
&& targetm
.atomic_test_and_set_trueval
!= 1)
6083 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, const1_rtx
, model
);
6085 /* Failing all else, assume a single threaded environment and simply
6086 perform the operation. */
6089 /* If the result is ignored skip the move to target. */
6090 if (subtarget
!= const0_rtx
)
6091 emit_move_insn (subtarget
, mem
);
6093 emit_move_insn (mem
, trueval
);
6097 /* Recall that have to return a boolean value; rectify if trueval
6098 is not exactly one. */
6099 if (targetm
.atomic_test_and_set_trueval
!= 1)
6100 ret
= emit_store_flag_force (target
, NE
, ret
, const0_rtx
, mode
, 0, 1);
6105 /* This function expands the atomic exchange operation:
6106 atomically store VAL in MEM and return the previous value in MEM.
6108 MEMMODEL is the memory model variant to use.
6109 TARGET is an optional place to stick the return value. */
6112 expand_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
6114 machine_mode mode
= GET_MODE (mem
);
6117 /* If loads are not atomic for the required size and we are not called to
6118 provide a __sync builtin, do not do anything so that we stay consistent
6119 with atomic loads of the same size. */
6120 if (!can_atomic_load_p (mode
) && !is_mm_sync (model
))
6123 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6125 /* Next try a compare-and-swap loop for the exchange. */
6127 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
6132 /* This function expands the atomic compare exchange operation:
6134 *PTARGET_BOOL is an optional place to store the boolean success/failure.
6135 *PTARGET_OVAL is an optional place to store the old value from memory.
6136 Both target parameters may be NULL or const0_rtx to indicate that we do
6137 not care about that return value. Both target parameters are updated on
6138 success to the actual location of the corresponding result.
6140 MEMMODEL is the memory model variant to use.
6142 The return value of the function is true for success. */
6145 expand_atomic_compare_and_swap (rtx
*ptarget_bool
, rtx
*ptarget_oval
,
6146 rtx mem
, rtx expected
, rtx desired
,
6147 bool is_weak
, enum memmodel succ_model
,
6148 enum memmodel fail_model
)
6150 machine_mode mode
= GET_MODE (mem
);
6151 struct expand_operand ops
[8];
6152 enum insn_code icode
;
6153 rtx target_oval
, target_bool
= NULL_RTX
;
6156 /* If loads are not atomic for the required size and we are not called to
6157 provide a __sync builtin, do not do anything so that we stay consistent
6158 with atomic loads of the same size. */
6159 if (!can_atomic_load_p (mode
) && !is_mm_sync (succ_model
))
6162 /* Load expected into a register for the compare and swap. */
6163 if (MEM_P (expected
))
6164 expected
= copy_to_reg (expected
);
6166 /* Make sure we always have some place to put the return oldval.
6167 Further, make sure that place is distinct from the input expected,
6168 just in case we need that path down below. */
6169 if (ptarget_oval
&& *ptarget_oval
== const0_rtx
)
6170 ptarget_oval
= NULL
;
6172 if (ptarget_oval
== NULL
6173 || (target_oval
= *ptarget_oval
) == NULL
6174 || reg_overlap_mentioned_p (expected
, target_oval
))
6175 target_oval
= gen_reg_rtx (mode
);
6177 icode
= direct_optab_handler (atomic_compare_and_swap_optab
, mode
);
6178 if (icode
!= CODE_FOR_nothing
)
6180 machine_mode bool_mode
= insn_data
[icode
].operand
[0].mode
;
6182 if (ptarget_bool
&& *ptarget_bool
== const0_rtx
)
6183 ptarget_bool
= NULL
;
6185 /* Make sure we always have a place for the bool operand. */
6186 if (ptarget_bool
== NULL
6187 || (target_bool
= *ptarget_bool
) == NULL
6188 || GET_MODE (target_bool
) != bool_mode
)
6189 target_bool
= gen_reg_rtx (bool_mode
);
6191 /* Emit the compare_and_swap. */
6192 create_output_operand (&ops
[0], target_bool
, bool_mode
);
6193 create_output_operand (&ops
[1], target_oval
, mode
);
6194 create_fixed_operand (&ops
[2], mem
);
6195 create_input_operand (&ops
[3], expected
, mode
);
6196 create_input_operand (&ops
[4], desired
, mode
);
6197 create_integer_operand (&ops
[5], is_weak
);
6198 create_integer_operand (&ops
[6], succ_model
);
6199 create_integer_operand (&ops
[7], fail_model
);
6200 if (maybe_expand_insn (icode
, 8, ops
))
6202 /* Return success/failure. */
6203 target_bool
= ops
[0].value
;
6204 target_oval
= ops
[1].value
;
6209 /* Otherwise fall back to the original __sync_val_compare_and_swap
6210 which is always seq-cst. */
6211 icode
= optab_handler (sync_compare_and_swap_optab
, mode
);
6212 if (icode
!= CODE_FOR_nothing
)
6216 create_output_operand (&ops
[0], target_oval
, mode
);
6217 create_fixed_operand (&ops
[1], mem
);
6218 create_input_operand (&ops
[2], expected
, mode
);
6219 create_input_operand (&ops
[3], desired
, mode
);
6220 if (!maybe_expand_insn (icode
, 4, ops
))
6223 target_oval
= ops
[0].value
;
6225 /* If the caller isn't interested in the boolean return value,
6226 skip the computation of it. */
6227 if (ptarget_bool
== NULL
)
6230 /* Otherwise, work out if the compare-and-swap succeeded. */
6232 if (have_insn_for (COMPARE
, CCmode
))
6233 note_stores (PATTERN (get_last_insn ()), find_cc_set
, &cc_reg
);
6236 target_bool
= emit_store_flag_force (target_bool
, EQ
, cc_reg
,
6237 const0_rtx
, VOIDmode
, 0, 1);
6240 goto success_bool_from_val
;
6243 /* Also check for library support for __sync_val_compare_and_swap. */
6244 libfunc
= optab_libfunc (sync_compare_and_swap_optab
, mode
);
6245 if (libfunc
!= NULL
)
6247 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
6248 rtx target
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
6249 mode
, 3, addr
, ptr_mode
,
6250 expected
, mode
, desired
, mode
);
6251 emit_move_insn (target_oval
, target
);
6253 /* Compute the boolean return value only if requested. */
6255 goto success_bool_from_val
;
6263 success_bool_from_val
:
6264 target_bool
= emit_store_flag_force (target_bool
, EQ
, target_oval
,
6265 expected
, VOIDmode
, 1, 1);
6267 /* Make sure that the oval output winds up where the caller asked. */
6269 *ptarget_oval
= target_oval
;
6271 *ptarget_bool
= target_bool
;
6275 /* Generate asm volatile("" : : : "memory") as the memory barrier. */
6278 expand_asm_memory_barrier (void)
6282 asm_op
= gen_rtx_ASM_OPERANDS (VOIDmode
, "", "", 0,
6283 rtvec_alloc (0), rtvec_alloc (0),
6284 rtvec_alloc (0), UNKNOWN_LOCATION
);
6285 MEM_VOLATILE_P (asm_op
) = 1;
6287 clob
= gen_rtx_SCRATCH (VOIDmode
);
6288 clob
= gen_rtx_MEM (BLKmode
, clob
);
6289 clob
= gen_rtx_CLOBBER (VOIDmode
, clob
);
6291 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, asm_op
, clob
)));
6294 /* This routine will either emit the mem_thread_fence pattern or issue a
6295 sync_synchronize to generate a fence for memory model MEMMODEL. */
6298 expand_mem_thread_fence (enum memmodel model
)
6300 if (targetm
.have_mem_thread_fence ())
6301 emit_insn (targetm
.gen_mem_thread_fence (GEN_INT (model
)));
6302 else if (!is_mm_relaxed (model
))
6304 if (targetm
.have_memory_barrier ())
6305 emit_insn (targetm
.gen_memory_barrier ());
6306 else if (synchronize_libfunc
!= NULL_RTX
)
6307 emit_library_call (synchronize_libfunc
, LCT_NORMAL
, VOIDmode
, 0);
6309 expand_asm_memory_barrier ();
6313 /* This routine will either emit the mem_signal_fence pattern or issue a
6314 sync_synchronize to generate a fence for memory model MEMMODEL. */
6317 expand_mem_signal_fence (enum memmodel model
)
6319 if (targetm
.have_mem_signal_fence ())
6320 emit_insn (targetm
.gen_mem_signal_fence (GEN_INT (model
)));
6321 else if (!is_mm_relaxed (model
))
6323 /* By default targets are coherent between a thread and the signal
6324 handler running on the same thread. Thus this really becomes a
6325 compiler barrier, in that stores must not be sunk past
6326 (or raised above) a given point. */
6327 expand_asm_memory_barrier ();
6331 /* This function expands the atomic load operation:
6332 return the atomically loaded value in MEM.
6334 MEMMODEL is the memory model variant to use.
6335 TARGET is an option place to stick the return value. */
6338 expand_atomic_load (rtx target
, rtx mem
, enum memmodel model
)
6340 machine_mode mode
= GET_MODE (mem
);
6341 enum insn_code icode
;
6343 /* If the target supports the load directly, great. */
6344 icode
= direct_optab_handler (atomic_load_optab
, mode
);
6345 if (icode
!= CODE_FOR_nothing
)
6347 struct expand_operand ops
[3];
6349 create_output_operand (&ops
[0], target
, mode
);
6350 create_fixed_operand (&ops
[1], mem
);
6351 create_integer_operand (&ops
[2], model
);
6352 if (maybe_expand_insn (icode
, 3, ops
))
6353 return ops
[0].value
;
6356 /* If the size of the object is greater than word size on this target,
6357 then we assume that a load will not be atomic. We could try to
6358 emulate a load with a compare-and-swap operation, but the store that
6359 doing this could result in would be incorrect if this is a volatile
6360 atomic load or targetting read-only-mapped memory. */
6361 if (GET_MODE_PRECISION (mode
) > BITS_PER_WORD
)
6362 /* If there is no atomic load, leave the library call. */
6365 /* Otherwise assume loads are atomic, and emit the proper barriers. */
6366 if (!target
|| target
== const0_rtx
)
6367 target
= gen_reg_rtx (mode
);
6369 /* For SEQ_CST, emit a barrier before the load. */
6370 if (is_mm_seq_cst (model
))
6371 expand_mem_thread_fence (model
);
6373 emit_move_insn (target
, mem
);
6375 /* Emit the appropriate barrier after the load. */
6376 expand_mem_thread_fence (model
);
6381 /* This function expands the atomic store operation:
6382 Atomically store VAL in MEM.
6383 MEMMODEL is the memory model variant to use.
6384 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
6385 function returns const0_rtx if a pattern was emitted. */
6388 expand_atomic_store (rtx mem
, rtx val
, enum memmodel model
, bool use_release
)
6390 machine_mode mode
= GET_MODE (mem
);
6391 enum insn_code icode
;
6392 struct expand_operand ops
[3];
6394 /* If the target supports the store directly, great. */
6395 icode
= direct_optab_handler (atomic_store_optab
, mode
);
6396 if (icode
!= CODE_FOR_nothing
)
6398 create_fixed_operand (&ops
[0], mem
);
6399 create_input_operand (&ops
[1], val
, mode
);
6400 create_integer_operand (&ops
[2], model
);
6401 if (maybe_expand_insn (icode
, 3, ops
))
6405 /* If using __sync_lock_release is a viable alternative, try it.
6406 Note that this will not be set to true if we are expanding a generic
6407 __atomic_store_n. */
6410 icode
= direct_optab_handler (sync_lock_release_optab
, mode
);
6411 if (icode
!= CODE_FOR_nothing
)
6413 create_fixed_operand (&ops
[0], mem
);
6414 create_input_operand (&ops
[1], const0_rtx
, mode
);
6415 if (maybe_expand_insn (icode
, 2, ops
))
6417 /* lock_release is only a release barrier. */
6418 if (is_mm_seq_cst (model
))
6419 expand_mem_thread_fence (model
);
6425 /* If the size of the object is greater than word size on this target,
6426 a default store will not be atomic. */
6427 if (GET_MODE_PRECISION (mode
) > BITS_PER_WORD
)
6429 /* If loads are atomic or we are called to provide a __sync builtin,
6430 we can try a atomic_exchange and throw away the result. Otherwise,
6431 don't do anything so that we do not create an inconsistency between
6432 loads and stores. */
6433 if (can_atomic_load_p (mode
) || is_mm_sync (model
))
6435 rtx target
= maybe_emit_atomic_exchange (NULL_RTX
, mem
, val
, model
);
6437 target
= maybe_emit_compare_and_swap_exchange_loop (NULL_RTX
, mem
,
6445 /* Otherwise assume stores are atomic, and emit the proper barriers. */
6446 expand_mem_thread_fence (model
);
6448 emit_move_insn (mem
, val
);
6450 /* For SEQ_CST, also emit a barrier after the store. */
6451 if (is_mm_seq_cst (model
))
6452 expand_mem_thread_fence (model
);
6458 /* Structure containing the pointers and values required to process the
6459 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
6461 struct atomic_op_functions
6463 direct_optab mem_fetch_before
;
6464 direct_optab mem_fetch_after
;
6465 direct_optab mem_no_result
;
6468 direct_optab no_result
;
6469 enum rtx_code reverse_code
;
6473 /* Fill in structure pointed to by OP with the various optab entries for an
6474 operation of type CODE. */
6477 get_atomic_op_for_code (struct atomic_op_functions
*op
, enum rtx_code code
)
6479 gcc_assert (op
!= NULL
);
6481 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
6482 in the source code during compilation, and the optab entries are not
6483 computable until runtime. Fill in the values at runtime. */
6487 op
->mem_fetch_before
= atomic_fetch_add_optab
;
6488 op
->mem_fetch_after
= atomic_add_fetch_optab
;
6489 op
->mem_no_result
= atomic_add_optab
;
6490 op
->fetch_before
= sync_old_add_optab
;
6491 op
->fetch_after
= sync_new_add_optab
;
6492 op
->no_result
= sync_add_optab
;
6493 op
->reverse_code
= MINUS
;
6496 op
->mem_fetch_before
= atomic_fetch_sub_optab
;
6497 op
->mem_fetch_after
= atomic_sub_fetch_optab
;
6498 op
->mem_no_result
= atomic_sub_optab
;
6499 op
->fetch_before
= sync_old_sub_optab
;
6500 op
->fetch_after
= sync_new_sub_optab
;
6501 op
->no_result
= sync_sub_optab
;
6502 op
->reverse_code
= PLUS
;
6505 op
->mem_fetch_before
= atomic_fetch_xor_optab
;
6506 op
->mem_fetch_after
= atomic_xor_fetch_optab
;
6507 op
->mem_no_result
= atomic_xor_optab
;
6508 op
->fetch_before
= sync_old_xor_optab
;
6509 op
->fetch_after
= sync_new_xor_optab
;
6510 op
->no_result
= sync_xor_optab
;
6511 op
->reverse_code
= XOR
;
6514 op
->mem_fetch_before
= atomic_fetch_and_optab
;
6515 op
->mem_fetch_after
= atomic_and_fetch_optab
;
6516 op
->mem_no_result
= atomic_and_optab
;
6517 op
->fetch_before
= sync_old_and_optab
;
6518 op
->fetch_after
= sync_new_and_optab
;
6519 op
->no_result
= sync_and_optab
;
6520 op
->reverse_code
= UNKNOWN
;
6523 op
->mem_fetch_before
= atomic_fetch_or_optab
;
6524 op
->mem_fetch_after
= atomic_or_fetch_optab
;
6525 op
->mem_no_result
= atomic_or_optab
;
6526 op
->fetch_before
= sync_old_ior_optab
;
6527 op
->fetch_after
= sync_new_ior_optab
;
6528 op
->no_result
= sync_ior_optab
;
6529 op
->reverse_code
= UNKNOWN
;
6532 op
->mem_fetch_before
= atomic_fetch_nand_optab
;
6533 op
->mem_fetch_after
= atomic_nand_fetch_optab
;
6534 op
->mem_no_result
= atomic_nand_optab
;
6535 op
->fetch_before
= sync_old_nand_optab
;
6536 op
->fetch_after
= sync_new_nand_optab
;
6537 op
->no_result
= sync_nand_optab
;
6538 op
->reverse_code
= UNKNOWN
;
6545 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
6546 using memory order MODEL. If AFTER is true the operation needs to return
6547 the value of *MEM after the operation, otherwise the previous value.
6548 TARGET is an optional place to place the result. The result is unused if
6550 Return the result if there is a better sequence, otherwise NULL_RTX. */
6553 maybe_optimize_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
6554 enum memmodel model
, bool after
)
6556 /* If the value is prefetched, or not used, it may be possible to replace
6557 the sequence with a native exchange operation. */
6558 if (!after
|| target
== const0_rtx
)
6560 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
6561 if (code
== AND
&& val
== const0_rtx
)
6563 if (target
== const0_rtx
)
6564 target
= gen_reg_rtx (GET_MODE (mem
));
6565 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6568 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
6569 if (code
== IOR
&& val
== constm1_rtx
)
6571 if (target
== const0_rtx
)
6572 target
= gen_reg_rtx (GET_MODE (mem
));
6573 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6580 /* Try to emit an instruction for a specific operation varaition.
6581 OPTAB contains the OP functions.
6582 TARGET is an optional place to return the result. const0_rtx means unused.
6583 MEM is the memory location to operate on.
6584 VAL is the value to use in the operation.
6585 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
6586 MODEL is the memory model, if used.
6587 AFTER is true if the returned result is the value after the operation. */
6590 maybe_emit_op (const struct atomic_op_functions
*optab
, rtx target
, rtx mem
,
6591 rtx val
, bool use_memmodel
, enum memmodel model
, bool after
)
6593 machine_mode mode
= GET_MODE (mem
);
6594 struct expand_operand ops
[4];
6595 enum insn_code icode
;
6599 /* Check to see if there is a result returned. */
6600 if (target
== const0_rtx
)
6604 icode
= direct_optab_handler (optab
->mem_no_result
, mode
);
6605 create_integer_operand (&ops
[2], model
);
6610 icode
= direct_optab_handler (optab
->no_result
, mode
);
6614 /* Otherwise, we need to generate a result. */
6619 icode
= direct_optab_handler (after
? optab
->mem_fetch_after
6620 : optab
->mem_fetch_before
, mode
);
6621 create_integer_operand (&ops
[3], model
);
6626 icode
= optab_handler (after
? optab
->fetch_after
6627 : optab
->fetch_before
, mode
);
6630 create_output_operand (&ops
[op_counter
++], target
, mode
);
6632 if (icode
== CODE_FOR_nothing
)
6635 create_fixed_operand (&ops
[op_counter
++], mem
);
6636 /* VAL may have been promoted to a wider mode. Shrink it if so. */
6637 create_convert_operand_to (&ops
[op_counter
++], val
, mode
, true);
6639 if (maybe_expand_insn (icode
, num_ops
, ops
))
6640 return (target
== const0_rtx
? const0_rtx
: ops
[0].value
);
6646 /* This function expands an atomic fetch_OP or OP_fetch operation:
6647 TARGET is an option place to stick the return value. const0_rtx indicates
6648 the result is unused.
6649 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6650 CODE is the operation being performed (OP)
6651 MEMMODEL is the memory model variant to use.
6652 AFTER is true to return the result of the operation (OP_fetch).
6653 AFTER is false to return the value before the operation (fetch_OP).
6655 This function will *only* generate instructions if there is a direct
6656 optab. No compare and swap loops or libcalls will be generated. */
6659 expand_atomic_fetch_op_no_fallback (rtx target
, rtx mem
, rtx val
,
6660 enum rtx_code code
, enum memmodel model
,
6663 machine_mode mode
= GET_MODE (mem
);
6664 struct atomic_op_functions optab
;
6666 bool unused_result
= (target
== const0_rtx
);
6668 get_atomic_op_for_code (&optab
, code
);
6670 /* Check to see if there are any better instructions. */
6671 result
= maybe_optimize_fetch_op (target
, mem
, val
, code
, model
, after
);
6675 /* Check for the case where the result isn't used and try those patterns. */
6678 /* Try the memory model variant first. */
6679 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, true);
6683 /* Next try the old style withuot a memory model. */
6684 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, true);
6688 /* There is no no-result pattern, so try patterns with a result. */
6692 /* Try the __atomic version. */
6693 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, after
);
6697 /* Try the older __sync version. */
6698 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, after
);
6702 /* If the fetch value can be calculated from the other variation of fetch,
6703 try that operation. */
6704 if (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
)
6706 /* Try the __atomic version, then the older __sync version. */
6707 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, !after
);
6709 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, !after
);
6713 /* If the result isn't used, no need to do compensation code. */
6717 /* Issue compensation code. Fetch_after == fetch_before OP val.
6718 Fetch_before == after REVERSE_OP val. */
6720 code
= optab
.reverse_code
;
6723 result
= expand_simple_binop (mode
, AND
, result
, val
, NULL_RTX
,
6724 true, OPTAB_LIB_WIDEN
);
6725 result
= expand_simple_unop (mode
, NOT
, result
, target
, true);
6728 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
6729 true, OPTAB_LIB_WIDEN
);
6734 /* No direct opcode can be generated. */
6740 /* This function expands an atomic fetch_OP or OP_fetch operation:
6741 TARGET is an option place to stick the return value. const0_rtx indicates
6742 the result is unused.
6743 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6744 CODE is the operation being performed (OP)
6745 MEMMODEL is the memory model variant to use.
6746 AFTER is true to return the result of the operation (OP_fetch).
6747 AFTER is false to return the value before the operation (fetch_OP). */
6749 expand_atomic_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
6750 enum memmodel model
, bool after
)
6752 machine_mode mode
= GET_MODE (mem
);
6754 bool unused_result
= (target
== const0_rtx
);
6756 /* If loads are not atomic for the required size and we are not called to
6757 provide a __sync builtin, do not do anything so that we stay consistent
6758 with atomic loads of the same size. */
6759 if (!can_atomic_load_p (mode
) && !is_mm_sync (model
))
6762 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, val
, code
, model
,
6768 /* Add/sub can be implemented by doing the reverse operation with -(val). */
6769 if (code
== PLUS
|| code
== MINUS
)
6772 enum rtx_code reverse
= (code
== PLUS
? MINUS
: PLUS
);
6775 tmp
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, true);
6776 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, tmp
, reverse
,
6780 /* PLUS worked so emit the insns and return. */
6787 /* PLUS did not work, so throw away the negation code and continue. */
6791 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
6792 if (!can_compare_and_swap_p (mode
, false))
6796 enum rtx_code orig_code
= code
;
6797 struct atomic_op_functions optab
;
6799 get_atomic_op_for_code (&optab
, code
);
6800 libfunc
= optab_libfunc (after
? optab
.fetch_after
6801 : optab
.fetch_before
, mode
);
6803 && (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
))
6807 code
= optab
.reverse_code
;
6808 libfunc
= optab_libfunc (after
? optab
.fetch_before
6809 : optab
.fetch_after
, mode
);
6811 if (libfunc
!= NULL
)
6813 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
6814 result
= emit_library_call_value (libfunc
, NULL
, LCT_NORMAL
, mode
,
6815 2, addr
, ptr_mode
, val
, mode
);
6817 if (!unused_result
&& fixup
)
6818 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
6819 true, OPTAB_LIB_WIDEN
);
6823 /* We need the original code for any further attempts. */
6827 /* If nothing else has succeeded, default to a compare and swap loop. */
6828 if (can_compare_and_swap_p (mode
, true))
6831 rtx t0
= gen_reg_rtx (mode
), t1
;
6835 /* If the result is used, get a register for it. */
6838 if (!target
|| !register_operand (target
, mode
))
6839 target
= gen_reg_rtx (mode
);
6840 /* If fetch_before, copy the value now. */
6842 emit_move_insn (target
, t0
);
6845 target
= const0_rtx
;
6850 t1
= expand_simple_binop (mode
, AND
, t1
, val
, NULL_RTX
,
6851 true, OPTAB_LIB_WIDEN
);
6852 t1
= expand_simple_unop (mode
, code
, t1
, NULL_RTX
, true);
6855 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
, true,
6858 /* For after, copy the value now. */
6859 if (!unused_result
&& after
)
6860 emit_move_insn (target
, t1
);
6861 insn
= get_insns ();
6864 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
6871 /* Return true if OPERAND is suitable for operand number OPNO of
6872 instruction ICODE. */
6875 insn_operand_matches (enum insn_code icode
, unsigned int opno
, rtx operand
)
6877 return (!insn_data
[(int) icode
].operand
[opno
].predicate
6878 || (insn_data
[(int) icode
].operand
[opno
].predicate
6879 (operand
, insn_data
[(int) icode
].operand
[opno
].mode
)));
6882 /* TARGET is a target of a multiword operation that we are going to
6883 implement as a series of word-mode operations. Return true if
6884 TARGET is suitable for this purpose. */
6887 valid_multiword_target_p (rtx target
)
6892 mode
= GET_MODE (target
);
6893 for (i
= 0; i
< GET_MODE_SIZE (mode
); i
+= UNITS_PER_WORD
)
6894 if (!validate_subreg (word_mode
, mode
, target
, i
))
6899 /* Like maybe_legitimize_operand, but do not change the code of the
6900 current rtx value. */
6903 maybe_legitimize_operand_same_code (enum insn_code icode
, unsigned int opno
,
6904 struct expand_operand
*op
)
6906 /* See if the operand matches in its current form. */
6907 if (insn_operand_matches (icode
, opno
, op
->value
))
6910 /* If the operand is a memory whose address has no side effects,
6911 try forcing the address into a non-virtual pseudo register.
6912 The check for side effects is important because copy_to_mode_reg
6913 cannot handle things like auto-modified addresses. */
6914 if (insn_data
[(int) icode
].operand
[opno
].allows_mem
&& MEM_P (op
->value
))
6919 addr
= XEXP (mem
, 0);
6920 if (!(REG_P (addr
) && REGNO (addr
) > LAST_VIRTUAL_REGISTER
)
6921 && !side_effects_p (addr
))
6926 last
= get_last_insn ();
6927 mode
= get_address_mode (mem
);
6928 mem
= replace_equiv_address (mem
, copy_to_mode_reg (mode
, addr
));
6929 if (insn_operand_matches (icode
, opno
, mem
))
6934 delete_insns_since (last
);
6941 /* Try to make OP match operand OPNO of instruction ICODE. Return true
6942 on success, storing the new operand value back in OP. */
6945 maybe_legitimize_operand (enum insn_code icode
, unsigned int opno
,
6946 struct expand_operand
*op
)
6948 machine_mode mode
, imode
;
6949 bool old_volatile_ok
, result
;
6955 old_volatile_ok
= volatile_ok
;
6957 result
= maybe_legitimize_operand_same_code (icode
, opno
, op
);
6958 volatile_ok
= old_volatile_ok
;
6962 gcc_assert (mode
!= VOIDmode
);
6964 && op
->value
!= const0_rtx
6965 && GET_MODE (op
->value
) == mode
6966 && maybe_legitimize_operand_same_code (icode
, opno
, op
))
6969 op
->value
= gen_reg_rtx (mode
);
6975 gcc_assert (mode
!= VOIDmode
);
6976 gcc_assert (GET_MODE (op
->value
) == VOIDmode
6977 || GET_MODE (op
->value
) == mode
);
6978 if (maybe_legitimize_operand_same_code (icode
, opno
, op
))
6981 op
->value
= copy_to_mode_reg (mode
, op
->value
);
6984 case EXPAND_CONVERT_TO
:
6985 gcc_assert (mode
!= VOIDmode
);
6986 op
->value
= convert_to_mode (mode
, op
->value
, op
->unsigned_p
);
6989 case EXPAND_CONVERT_FROM
:
6990 if (GET_MODE (op
->value
) != VOIDmode
)
6991 mode
= GET_MODE (op
->value
);
6993 /* The caller must tell us what mode this value has. */
6994 gcc_assert (mode
!= VOIDmode
);
6996 imode
= insn_data
[(int) icode
].operand
[opno
].mode
;
6997 if (imode
!= VOIDmode
&& imode
!= mode
)
6999 op
->value
= convert_modes (imode
, mode
, op
->value
, op
->unsigned_p
);
7004 case EXPAND_ADDRESS
:
7005 gcc_assert (mode
!= VOIDmode
);
7006 op
->value
= convert_memory_address (mode
, op
->value
);
7009 case EXPAND_INTEGER
:
7010 mode
= insn_data
[(int) icode
].operand
[opno
].mode
;
7011 if (mode
!= VOIDmode
&& const_int_operand (op
->value
, mode
))
7015 return insn_operand_matches (icode
, opno
, op
->value
);
7018 /* Make OP describe an input operand that should have the same value
7019 as VALUE, after any mode conversion that the target might request.
7020 TYPE is the type of VALUE. */
7023 create_convert_operand_from_type (struct expand_operand
*op
,
7024 rtx value
, tree type
)
7026 create_convert_operand_from (op
, value
, TYPE_MODE (type
),
7027 TYPE_UNSIGNED (type
));
7030 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
7031 of instruction ICODE. Return true on success, leaving the new operand
7032 values in the OPS themselves. Emit no code on failure. */
7035 maybe_legitimize_operands (enum insn_code icode
, unsigned int opno
,
7036 unsigned int nops
, struct expand_operand
*ops
)
7041 last
= get_last_insn ();
7042 for (i
= 0; i
< nops
; i
++)
7043 if (!maybe_legitimize_operand (icode
, opno
+ i
, &ops
[i
]))
7045 delete_insns_since (last
);
7051 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
7052 as its operands. Return the instruction pattern on success,
7053 and emit any necessary set-up code. Return null and emit no
7057 maybe_gen_insn (enum insn_code icode
, unsigned int nops
,
7058 struct expand_operand
*ops
)
7060 gcc_assert (nops
== (unsigned int) insn_data
[(int) icode
].n_generator_args
);
7061 if (!maybe_legitimize_operands (icode
, 0, nops
, ops
))
7067 return GEN_FCN (icode
) (ops
[0].value
);
7069 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
);
7071 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
);
7073 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7076 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7077 ops
[3].value
, ops
[4].value
);
7079 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7080 ops
[3].value
, ops
[4].value
, ops
[5].value
);
7082 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7083 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7086 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7087 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7088 ops
[6].value
, ops
[7].value
);
7090 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7091 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7092 ops
[6].value
, ops
[7].value
, ops
[8].value
);
7097 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7098 as its operands. Return true on success and emit no code on failure. */
7101 maybe_expand_insn (enum insn_code icode
, unsigned int nops
,
7102 struct expand_operand
*ops
)
7104 rtx_insn
*pat
= maybe_gen_insn (icode
, nops
, ops
);
7113 /* Like maybe_expand_insn, but for jumps. */
7116 maybe_expand_jump_insn (enum insn_code icode
, unsigned int nops
,
7117 struct expand_operand
*ops
)
7119 rtx_insn
*pat
= maybe_gen_insn (icode
, nops
, ops
);
7122 emit_jump_insn (pat
);
7128 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7132 expand_insn (enum insn_code icode
, unsigned int nops
,
7133 struct expand_operand
*ops
)
7135 if (!maybe_expand_insn (icode
, nops
, ops
))
7139 /* Like expand_insn, but for jumps. */
7142 expand_jump_insn (enum insn_code icode
, unsigned int nops
,
7143 struct expand_operand
*ops
)
7145 if (!maybe_expand_jump_insn (icode
, nops
, ops
))