1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
35 #include "diagnostic-core.h"
37 /* Include insn-config.h before expr.h so that HAVE_conditional_move
38 is properly defined. */
39 #include "stor-layout.h"
44 #include "optabs-tree.h"
47 static void prepare_float_lib_cmp (rtx
, rtx
, enum rtx_code
, rtx
*,
49 static rtx
expand_unop_direct (machine_mode
, optab
, rtx
, rtx
, int);
50 static void emit_libcall_block_1 (rtx_insn
*, rtx
, rtx
, rtx
, bool);
52 /* Debug facility for use in GDB. */
53 void debug_optab_libfuncs (void);
55 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
56 the result of operation CODE applied to OP0 (and OP1 if it is a binary
59 If the last insn does not set TARGET, don't do anything, but return 1.
61 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
62 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
63 try again, ensuring that TARGET is not one of the operands. */
66 add_equal_note (rtx_insn
*insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
72 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
74 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
75 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
76 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
77 && GET_RTX_CLASS (code
) != RTX_COMPARE
78 && GET_RTX_CLASS (code
) != RTX_UNARY
)
81 if (GET_CODE (target
) == ZERO_EXTRACT
)
84 for (last_insn
= insns
;
85 NEXT_INSN (last_insn
) != NULL_RTX
;
86 last_insn
= NEXT_INSN (last_insn
))
89 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
90 a value changing in the insn, so the note would be invalid for CSE. */
91 if (reg_overlap_mentioned_p (target
, op0
)
92 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
95 && (rtx_equal_p (target
, op0
)
96 || (op1
&& rtx_equal_p (target
, op1
))))
98 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
99 over expanding it as temp = MEM op X, MEM = temp. If the target
100 supports MEM = MEM op X instructions, it is sometimes too hard
101 to reconstruct that form later, especially if X is also a memory,
102 and due to multiple occurrences of addresses the address might
103 be forced into register unnecessarily.
104 Note that not emitting the REG_EQUIV note might inhibit
105 CSE in some cases. */
106 set
= single_set (last_insn
);
108 && GET_CODE (SET_SRC (set
)) == code
109 && MEM_P (SET_DEST (set
))
110 && (rtx_equal_p (SET_DEST (set
), XEXP (SET_SRC (set
), 0))
111 || (op1
&& rtx_equal_p (SET_DEST (set
),
112 XEXP (SET_SRC (set
), 1)))))
118 set
= set_for_reg_notes (last_insn
);
122 if (! rtx_equal_p (SET_DEST (set
), target
)
123 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
124 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
125 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
128 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
138 if (GET_MODE (op0
) != VOIDmode
&& GET_MODE (target
) != GET_MODE (op0
))
140 note
= gen_rtx_fmt_e (code
, GET_MODE (op0
), copy_rtx (op0
));
141 if (GET_MODE_SIZE (GET_MODE (op0
))
142 > GET_MODE_SIZE (GET_MODE (target
)))
143 note
= simplify_gen_unary (TRUNCATE
, GET_MODE (target
),
144 note
, GET_MODE (op0
));
146 note
= simplify_gen_unary (ZERO_EXTEND
, GET_MODE (target
),
147 note
, GET_MODE (op0
));
152 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
156 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
158 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
163 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
164 for a widening operation would be. In most cases this would be OP0, but if
165 that's a constant it'll be VOIDmode, which isn't useful. */
168 widened_mode (machine_mode to_mode
, rtx op0
, rtx op1
)
170 machine_mode m0
= GET_MODE (op0
);
171 machine_mode m1
= GET_MODE (op1
);
174 if (m0
== VOIDmode
&& m1
== VOIDmode
)
176 else if (m0
== VOIDmode
|| GET_MODE_SIZE (m0
) < GET_MODE_SIZE (m1
))
181 if (GET_MODE_SIZE (result
) > GET_MODE_SIZE (to_mode
))
187 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
188 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
189 not actually do a sign-extend or zero-extend, but can leave the
190 higher-order bits of the result rtx undefined, for example, in the case
191 of logical operations, but not right shifts. */
194 widen_operand (rtx op
, machine_mode mode
, machine_mode oldmode
,
195 int unsignedp
, int no_extend
)
199 /* If we don't have to extend and this is a constant, return it. */
200 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
203 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
204 extend since it will be more efficient to do so unless the signedness of
205 a promoted object differs from our extension. */
207 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
208 && SUBREG_CHECK_PROMOTED_SIGN (op
, unsignedp
)))
209 return convert_modes (mode
, oldmode
, op
, unsignedp
);
211 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
213 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
214 return gen_lowpart (mode
, force_reg (GET_MODE (op
), op
));
216 /* Otherwise, get an object of MODE, clobber it, and set the low-order
219 result
= gen_reg_rtx (mode
);
220 emit_clobber (result
);
221 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
225 /* Expand vector widening operations.
227 There are two different classes of operations handled here:
228 1) Operations whose result is wider than all the arguments to the operation.
229 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
230 In this case OP0 and optionally OP1 would be initialized,
231 but WIDE_OP wouldn't (not relevant for this case).
232 2) Operations whose result is of the same size as the last argument to the
233 operation, but wider than all the other arguments to the operation.
234 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
235 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
237 E.g, when called to expand the following operations, this is how
238 the arguments will be initialized:
240 widening-sum 2 oprnd0 - oprnd1
241 widening-dot-product 3 oprnd0 oprnd1 oprnd2
242 widening-mult 2 oprnd0 oprnd1 -
243 type-promotion (vec-unpack) 1 oprnd0 - - */
246 expand_widen_pattern_expr (sepops ops
, rtx op0
, rtx op1
, rtx wide_op
,
247 rtx target
, int unsignedp
)
249 struct expand_operand eops
[4];
250 tree oprnd0
, oprnd1
, oprnd2
;
251 machine_mode wmode
= VOIDmode
, tmode0
, tmode1
= VOIDmode
;
252 optab widen_pattern_optab
;
253 enum insn_code icode
;
254 int nops
= TREE_CODE_LENGTH (ops
->code
);
258 tmode0
= TYPE_MODE (TREE_TYPE (oprnd0
));
259 widen_pattern_optab
=
260 optab_for_tree_code (ops
->code
, TREE_TYPE (oprnd0
), optab_default
);
261 if (ops
->code
== WIDEN_MULT_PLUS_EXPR
262 || ops
->code
== WIDEN_MULT_MINUS_EXPR
)
263 icode
= find_widening_optab_handler (widen_pattern_optab
,
264 TYPE_MODE (TREE_TYPE (ops
->op2
)),
267 icode
= optab_handler (widen_pattern_optab
, tmode0
);
268 gcc_assert (icode
!= CODE_FOR_nothing
);
273 tmode1
= TYPE_MODE (TREE_TYPE (oprnd1
));
276 /* The last operand is of a wider mode than the rest of the operands. */
281 gcc_assert (tmode1
== tmode0
);
284 wmode
= TYPE_MODE (TREE_TYPE (oprnd2
));
288 create_output_operand (&eops
[op
++], target
, TYPE_MODE (ops
->type
));
289 create_convert_operand_from (&eops
[op
++], op0
, tmode0
, unsignedp
);
291 create_convert_operand_from (&eops
[op
++], op1
, tmode1
, unsignedp
);
293 create_convert_operand_from (&eops
[op
++], wide_op
, wmode
, unsignedp
);
294 expand_insn (icode
, op
, eops
);
295 return eops
[0].value
;
298 /* Generate code to perform an operation specified by TERNARY_OPTAB
299 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
301 UNSIGNEDP is for the case where we have to widen the operands
302 to perform the operation. It says to use zero-extension.
304 If TARGET is nonzero, the value
305 is generated there, if it is convenient to do so.
306 In all cases an rtx is returned for the locus of the value;
307 this may or may not be TARGET. */
310 expand_ternary_op (machine_mode mode
, optab ternary_optab
, rtx op0
,
311 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
313 struct expand_operand ops
[4];
314 enum insn_code icode
= optab_handler (ternary_optab
, mode
);
316 gcc_assert (optab_handler (ternary_optab
, mode
) != CODE_FOR_nothing
);
318 create_output_operand (&ops
[0], target
, mode
);
319 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
320 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
321 create_convert_operand_from (&ops
[3], op2
, mode
, unsignedp
);
322 expand_insn (icode
, 4, ops
);
327 /* Like expand_binop, but return a constant rtx if the result can be
328 calculated at compile time. The arguments and return value are
329 otherwise the same as for expand_binop. */
332 simplify_expand_binop (machine_mode mode
, optab binoptab
,
333 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
334 enum optab_methods methods
)
336 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
338 rtx x
= simplify_binary_operation (optab_to_code (binoptab
),
344 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
347 /* Like simplify_expand_binop, but always put the result in TARGET.
348 Return true if the expansion succeeded. */
351 force_expand_binop (machine_mode mode
, optab binoptab
,
352 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
353 enum optab_methods methods
)
355 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
356 target
, unsignedp
, methods
);
360 emit_move_insn (target
, x
);
364 /* Create a new vector value in VMODE with all elements set to OP. The
365 mode of OP must be the element mode of VMODE. If OP is a constant,
366 then the return value will be a constant. */
369 expand_vector_broadcast (machine_mode vmode
, rtx op
)
371 enum insn_code icode
;
376 gcc_checking_assert (VECTOR_MODE_P (vmode
));
378 n
= GET_MODE_NUNITS (vmode
);
379 vec
= rtvec_alloc (n
);
380 for (i
= 0; i
< n
; ++i
)
381 RTVEC_ELT (vec
, i
) = op
;
384 return gen_rtx_CONST_VECTOR (vmode
, vec
);
386 /* ??? If the target doesn't have a vec_init, then we have no easy way
387 of performing this operation. Most of this sort of generic support
388 is hidden away in the vector lowering support in gimple. */
389 icode
= convert_optab_handler (vec_init_optab
, vmode
,
390 GET_MODE_INNER (vmode
));
391 if (icode
== CODE_FOR_nothing
)
394 ret
= gen_reg_rtx (vmode
);
395 emit_insn (GEN_FCN (icode
) (ret
, gen_rtx_PARALLEL (vmode
, vec
)));
400 /* This subroutine of expand_doubleword_shift handles the cases in which
401 the effective shift value is >= BITS_PER_WORD. The arguments and return
402 value are the same as for the parent routine, except that SUPERWORD_OP1
403 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
404 INTO_TARGET may be null if the caller has decided to calculate it. */
407 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
408 rtx outof_target
, rtx into_target
,
409 int unsignedp
, enum optab_methods methods
)
411 if (into_target
!= 0)
412 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
413 into_target
, unsignedp
, methods
))
416 if (outof_target
!= 0)
418 /* For a signed right shift, we must fill OUTOF_TARGET with copies
419 of the sign bit, otherwise we must fill it with zeros. */
420 if (binoptab
!= ashr_optab
)
421 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
423 if (!force_expand_binop (word_mode
, binoptab
,
424 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
425 outof_target
, unsignedp
, methods
))
431 /* This subroutine of expand_doubleword_shift handles the cases in which
432 the effective shift value is < BITS_PER_WORD. The arguments and return
433 value are the same as for the parent routine. */
436 expand_subword_shift (machine_mode op1_mode
, optab binoptab
,
437 rtx outof_input
, rtx into_input
, rtx op1
,
438 rtx outof_target
, rtx into_target
,
439 int unsignedp
, enum optab_methods methods
,
440 unsigned HOST_WIDE_INT shift_mask
)
442 optab reverse_unsigned_shift
, unsigned_shift
;
445 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
446 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
448 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
449 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
450 the opposite direction to BINOPTAB. */
451 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
453 carries
= outof_input
;
454 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
,
455 op1_mode
), op1_mode
);
456 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
461 /* We must avoid shifting by BITS_PER_WORD bits since that is either
462 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
463 has unknown behavior. Do a single shift first, then shift by the
464 remainder. It's OK to use ~OP1 as the remainder if shift counts
465 are truncated to the mode size. */
466 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
467 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
468 if (shift_mask
== BITS_PER_WORD
- 1)
470 tmp
= immed_wide_int_const
471 (wi::minus_one (GET_MODE_PRECISION (op1_mode
)), op1_mode
);
472 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
477 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
- 1,
478 op1_mode
), op1_mode
);
479 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
483 if (tmp
== 0 || carries
== 0)
485 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
486 carries
, tmp
, 0, unsignedp
, methods
);
490 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
491 so the result can go directly into INTO_TARGET if convenient. */
492 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
493 into_target
, unsignedp
, methods
);
497 /* Now OR in the bits carried over from OUTOF_INPUT. */
498 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
499 into_target
, unsignedp
, methods
))
502 /* Use a standard word_mode shift for the out-of half. */
503 if (outof_target
!= 0)
504 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
505 outof_target
, unsignedp
, methods
))
512 /* Try implementing expand_doubleword_shift using conditional moves.
513 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
514 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
515 are the shift counts to use in the former and latter case. All other
516 arguments are the same as the parent routine. */
519 expand_doubleword_shift_condmove (machine_mode op1_mode
, optab binoptab
,
520 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
521 rtx outof_input
, rtx into_input
,
522 rtx subword_op1
, rtx superword_op1
,
523 rtx outof_target
, rtx into_target
,
524 int unsignedp
, enum optab_methods methods
,
525 unsigned HOST_WIDE_INT shift_mask
)
527 rtx outof_superword
, into_superword
;
529 /* Put the superword version of the output into OUTOF_SUPERWORD and
531 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
532 if (outof_target
!= 0 && subword_op1
== superword_op1
)
534 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
535 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
536 into_superword
= outof_target
;
537 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
538 outof_superword
, 0, unsignedp
, methods
))
543 into_superword
= gen_reg_rtx (word_mode
);
544 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
545 outof_superword
, into_superword
,
550 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
551 if (!expand_subword_shift (op1_mode
, binoptab
,
552 outof_input
, into_input
, subword_op1
,
553 outof_target
, into_target
,
554 unsignedp
, methods
, shift_mask
))
557 /* Select between them. Do the INTO half first because INTO_SUPERWORD
558 might be the current value of OUTOF_TARGET. */
559 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
560 into_target
, into_superword
, word_mode
, false))
563 if (outof_target
!= 0)
564 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
565 outof_target
, outof_superword
,
572 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
573 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
574 input operand; the shift moves bits in the direction OUTOF_INPUT->
575 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
576 of the target. OP1 is the shift count and OP1_MODE is its mode.
577 If OP1 is constant, it will have been truncated as appropriate
578 and is known to be nonzero.
580 If SHIFT_MASK is zero, the result of word shifts is undefined when the
581 shift count is outside the range [0, BITS_PER_WORD). This routine must
582 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
584 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
585 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
586 fill with zeros or sign bits as appropriate.
588 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
589 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
590 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
591 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
594 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
595 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
596 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
597 function wants to calculate it itself.
599 Return true if the shift could be successfully synthesized. */
602 expand_doubleword_shift (machine_mode op1_mode
, optab binoptab
,
603 rtx outof_input
, rtx into_input
, rtx op1
,
604 rtx outof_target
, rtx into_target
,
605 int unsignedp
, enum optab_methods methods
,
606 unsigned HOST_WIDE_INT shift_mask
)
608 rtx superword_op1
, tmp
, cmp1
, cmp2
;
609 enum rtx_code cmp_code
;
611 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
612 fill the result with sign or zero bits as appropriate. If so, the value
613 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
614 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
615 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
617 This isn't worthwhile for constant shifts since the optimizers will
618 cope better with in-range shift counts. */
619 if (shift_mask
>= BITS_PER_WORD
621 && !CONSTANT_P (op1
))
623 if (!expand_doubleword_shift (op1_mode
, binoptab
,
624 outof_input
, into_input
, op1
,
626 unsignedp
, methods
, shift_mask
))
628 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
629 outof_target
, unsignedp
, methods
))
634 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
635 is true when the effective shift value is less than BITS_PER_WORD.
636 Set SUPERWORD_OP1 to the shift count that should be used to shift
637 OUTOF_INPUT into INTO_TARGET when the condition is false. */
638 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
, op1_mode
), op1_mode
);
639 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
641 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
642 is a subword shift count. */
643 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
645 cmp2
= CONST0_RTX (op1_mode
);
651 /* Set CMP1 to OP1 - BITS_PER_WORD. */
652 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
654 cmp2
= CONST0_RTX (op1_mode
);
656 superword_op1
= cmp1
;
661 /* If we can compute the condition at compile time, pick the
662 appropriate subroutine. */
663 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
664 if (tmp
!= 0 && CONST_INT_P (tmp
))
666 if (tmp
== const0_rtx
)
667 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
668 outof_target
, into_target
,
671 return expand_subword_shift (op1_mode
, binoptab
,
672 outof_input
, into_input
, op1
,
673 outof_target
, into_target
,
674 unsignedp
, methods
, shift_mask
);
677 /* Try using conditional moves to generate straight-line code. */
678 if (HAVE_conditional_move
)
680 rtx_insn
*start
= get_last_insn ();
681 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
682 cmp_code
, cmp1
, cmp2
,
683 outof_input
, into_input
,
685 outof_target
, into_target
,
686 unsignedp
, methods
, shift_mask
))
688 delete_insns_since (start
);
691 /* As a last resort, use branches to select the correct alternative. */
692 rtx_code_label
*subword_label
= gen_label_rtx ();
693 rtx_code_label
*done_label
= gen_label_rtx ();
696 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
698 profile_probability::uninitialized ());
701 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
702 outof_target
, into_target
,
706 emit_jump_insn (targetm
.gen_jump (done_label
));
708 emit_label (subword_label
);
710 if (!expand_subword_shift (op1_mode
, binoptab
,
711 outof_input
, into_input
, op1
,
712 outof_target
, into_target
,
713 unsignedp
, methods
, shift_mask
))
716 emit_label (done_label
);
720 /* Subroutine of expand_binop. Perform a double word multiplication of
721 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
722 as the target's word_mode. This function return NULL_RTX if anything
723 goes wrong, in which case it may have already emitted instructions
724 which need to be deleted.
726 If we want to multiply two two-word values and have normal and widening
727 multiplies of single-word values, we can do this with three smaller
730 The multiplication proceeds as follows:
731 _______________________
732 [__op0_high_|__op0_low__]
733 _______________________
734 * [__op1_high_|__op1_low__]
735 _______________________________________________
736 _______________________
737 (1) [__op0_low__*__op1_low__]
738 _______________________
739 (2a) [__op0_low__*__op1_high_]
740 _______________________
741 (2b) [__op0_high_*__op1_low__]
742 _______________________
743 (3) [__op0_high_*__op1_high_]
746 This gives a 4-word result. Since we are only interested in the
747 lower 2 words, partial result (3) and the upper words of (2a) and
748 (2b) don't need to be calculated. Hence (2a) and (2b) can be
749 calculated using non-widening multiplication.
751 (1), however, needs to be calculated with an unsigned widening
752 multiplication. If this operation is not directly supported we
753 try using a signed widening multiplication and adjust the result.
754 This adjustment works as follows:
756 If both operands are positive then no adjustment is needed.
758 If the operands have different signs, for example op0_low < 0 and
759 op1_low >= 0, the instruction treats the most significant bit of
760 op0_low as a sign bit instead of a bit with significance
761 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
762 with 2**BITS_PER_WORD - op0_low, and two's complements the
763 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
766 Similarly, if both operands are negative, we need to add
767 (op0_low + op1_low) * 2**BITS_PER_WORD.
769 We use a trick to adjust quickly. We logically shift op0_low right
770 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
771 op0_high (op1_high) before it is used to calculate 2b (2a). If no
772 logical shift exists, we do an arithmetic right shift and subtract
776 expand_doubleword_mult (machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
777 bool umulp
, enum optab_methods methods
)
779 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
780 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
781 rtx wordm1
= umulp
? NULL_RTX
: GEN_INT (BITS_PER_WORD
- 1);
782 rtx product
, adjust
, product_high
, temp
;
784 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
785 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
786 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
787 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
789 /* If we're using an unsigned multiply to directly compute the product
790 of the low-order words of the operands and perform any required
791 adjustments of the operands, we begin by trying two more multiplications
792 and then computing the appropriate sum.
794 We have checked above that the required addition is provided.
795 Full-word addition will normally always succeed, especially if
796 it is provided at all, so we don't worry about its failure. The
797 multiplication may well fail, however, so we do handle that. */
801 /* ??? This could be done with emit_store_flag where available. */
802 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
803 NULL_RTX
, 1, methods
);
805 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
806 NULL_RTX
, 0, OPTAB_DIRECT
);
809 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
810 NULL_RTX
, 0, methods
);
813 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
814 NULL_RTX
, 0, OPTAB_DIRECT
);
821 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
822 NULL_RTX
, 0, OPTAB_DIRECT
);
826 /* OP0_HIGH should now be dead. */
830 /* ??? This could be done with emit_store_flag where available. */
831 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
832 NULL_RTX
, 1, methods
);
834 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
835 NULL_RTX
, 0, OPTAB_DIRECT
);
838 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
839 NULL_RTX
, 0, methods
);
842 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
843 NULL_RTX
, 0, OPTAB_DIRECT
);
850 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
851 NULL_RTX
, 0, OPTAB_DIRECT
);
855 /* OP1_HIGH should now be dead. */
857 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
858 NULL_RTX
, 0, OPTAB_DIRECT
);
860 if (target
&& !REG_P (target
))
864 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
865 target
, 1, OPTAB_DIRECT
);
867 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
868 target
, 1, OPTAB_DIRECT
);
873 product_high
= operand_subword (product
, high
, 1, mode
);
874 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
875 NULL_RTX
, 0, OPTAB_DIRECT
);
876 emit_move_insn (product_high
, adjust
);
880 /* Wrapper around expand_binop which takes an rtx code to specify
881 the operation to perform, not an optab pointer. All other
882 arguments are the same. */
884 expand_simple_binop (machine_mode mode
, enum rtx_code code
, rtx op0
,
885 rtx op1
, rtx target
, int unsignedp
,
886 enum optab_methods methods
)
888 optab binop
= code_to_optab (code
);
891 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
894 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
895 binop. Order them according to commutative_operand_precedence and, if
896 possible, try to put TARGET or a pseudo first. */
898 swap_commutative_operands_with_target (rtx target
, rtx op0
, rtx op1
)
900 int op0_prec
= commutative_operand_precedence (op0
);
901 int op1_prec
= commutative_operand_precedence (op1
);
903 if (op0_prec
< op1_prec
)
906 if (op0_prec
> op1_prec
)
909 /* With equal precedence, both orders are ok, but it is better if the
910 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
911 if (target
== 0 || REG_P (target
))
912 return (REG_P (op1
) && !REG_P (op0
)) || target
== op1
;
914 return rtx_equal_p (op1
, target
);
917 /* Return true if BINOPTAB implements a shift operation. */
920 shift_optab_p (optab binoptab
)
922 switch (optab_to_code (binoptab
))
938 /* Return true if BINOPTAB implements a commutative binary operation. */
941 commutative_optab_p (optab binoptab
)
943 return (GET_RTX_CLASS (optab_to_code (binoptab
)) == RTX_COMM_ARITH
944 || binoptab
== smul_widen_optab
945 || binoptab
== umul_widen_optab
946 || binoptab
== smul_highpart_optab
947 || binoptab
== umul_highpart_optab
);
950 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
951 optimizing, and if the operand is a constant that costs more than
952 1 instruction, force the constant into a register and return that
953 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
956 avoid_expensive_constant (machine_mode mode
, optab binoptab
,
957 int opn
, rtx x
, bool unsignedp
)
959 bool speed
= optimize_insn_for_speed_p ();
964 && (rtx_cost (x
, mode
, optab_to_code (binoptab
), opn
, speed
)
965 > set_src_cost (x
, mode
, speed
)))
969 HOST_WIDE_INT intval
= trunc_int_for_mode (INTVAL (x
), mode
);
970 if (intval
!= INTVAL (x
))
971 x
= GEN_INT (intval
);
974 x
= convert_modes (mode
, VOIDmode
, x
, unsignedp
);
975 x
= force_reg (mode
, x
);
980 /* Helper function for expand_binop: handle the case where there
981 is an insn that directly implements the indicated operation.
982 Returns null if this is not possible. */
984 expand_binop_directly (machine_mode mode
, optab binoptab
,
986 rtx target
, int unsignedp
, enum optab_methods methods
,
989 machine_mode from_mode
= widened_mode (mode
, op0
, op1
);
990 enum insn_code icode
= find_widening_optab_handler (binoptab
, mode
,
992 machine_mode xmode0
= insn_data
[(int) icode
].operand
[1].mode
;
993 machine_mode xmode1
= insn_data
[(int) icode
].operand
[2].mode
;
994 machine_mode mode0
, mode1
, tmp_mode
;
995 struct expand_operand ops
[3];
998 rtx xop0
= op0
, xop1
= op1
;
999 bool canonicalize_op1
= false;
1001 /* If it is a commutative operator and the modes would match
1002 if we would swap the operands, we can save the conversions. */
1003 commutative_p
= commutative_optab_p (binoptab
);
1005 && GET_MODE (xop0
) != xmode0
&& GET_MODE (xop1
) != xmode1
1006 && GET_MODE (xop0
) == xmode1
&& GET_MODE (xop1
) == xmode1
)
1007 std::swap (xop0
, xop1
);
1009 /* If we are optimizing, force expensive constants into a register. */
1010 xop0
= avoid_expensive_constant (xmode0
, binoptab
, 0, xop0
, unsignedp
);
1011 if (!shift_optab_p (binoptab
))
1012 xop1
= avoid_expensive_constant (xmode1
, binoptab
, 1, xop1
, unsignedp
);
1014 /* Shifts and rotates often use a different mode for op1 from op0;
1015 for VOIDmode constants we don't know the mode, so force it
1016 to be canonicalized using convert_modes. */
1017 canonicalize_op1
= true;
1019 /* In case the insn wants input operands in modes different from
1020 those of the actual operands, convert the operands. It would
1021 seem that we don't need to convert CONST_INTs, but we do, so
1022 that they're properly zero-extended, sign-extended or truncated
1025 mode0
= GET_MODE (xop0
) != VOIDmode
? GET_MODE (xop0
) : mode
;
1026 if (xmode0
!= VOIDmode
&& xmode0
!= mode0
)
1028 xop0
= convert_modes (xmode0
, mode0
, xop0
, unsignedp
);
1032 mode1
= ((GET_MODE (xop1
) != VOIDmode
|| canonicalize_op1
)
1033 ? GET_MODE (xop1
) : mode
);
1034 if (xmode1
!= VOIDmode
&& xmode1
!= mode1
)
1036 xop1
= convert_modes (xmode1
, mode1
, xop1
, unsignedp
);
1040 /* If operation is commutative,
1041 try to make the first operand a register.
1042 Even better, try to make it the same as the target.
1043 Also try to make the last operand a constant. */
1045 && swap_commutative_operands_with_target (target
, xop0
, xop1
))
1046 std::swap (xop0
, xop1
);
1048 /* Now, if insn's predicates don't allow our operands, put them into
1051 if (binoptab
== vec_pack_trunc_optab
1052 || binoptab
== vec_pack_usat_optab
1053 || binoptab
== vec_pack_ssat_optab
1054 || binoptab
== vec_pack_ufix_trunc_optab
1055 || binoptab
== vec_pack_sfix_trunc_optab
)
1057 /* The mode of the result is different then the mode of the
1059 tmp_mode
= insn_data
[(int) icode
].operand
[0].mode
;
1060 if (VECTOR_MODE_P (mode
)
1061 && GET_MODE_NUNITS (tmp_mode
) != 2 * GET_MODE_NUNITS (mode
))
1063 delete_insns_since (last
);
1070 create_output_operand (&ops
[0], target
, tmp_mode
);
1071 create_input_operand (&ops
[1], xop0
, mode0
);
1072 create_input_operand (&ops
[2], xop1
, mode1
);
1073 pat
= maybe_gen_insn (icode
, 3, ops
);
1076 /* If PAT is composed of more than one insn, try to add an appropriate
1077 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1078 operand, call expand_binop again, this time without a target. */
1079 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1080 && ! add_equal_note (pat
, ops
[0].value
,
1081 optab_to_code (binoptab
),
1082 ops
[1].value
, ops
[2].value
))
1084 delete_insns_since (last
);
1085 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1086 unsignedp
, methods
);
1090 return ops
[0].value
;
1092 delete_insns_since (last
);
1096 /* Generate code to perform an operation specified by BINOPTAB
1097 on operands OP0 and OP1, with result having machine-mode MODE.
1099 UNSIGNEDP is for the case where we have to widen the operands
1100 to perform the operation. It says to use zero-extension.
1102 If TARGET is nonzero, the value
1103 is generated there, if it is convenient to do so.
1104 In all cases an rtx is returned for the locus of the value;
1105 this may or may not be TARGET. */
1108 expand_binop (machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1109 rtx target
, int unsignedp
, enum optab_methods methods
)
1111 enum optab_methods next_methods
1112 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1113 ? OPTAB_WIDEN
: methods
);
1114 enum mode_class mclass
;
1115 machine_mode wider_mode
;
1118 rtx_insn
*entry_last
= get_last_insn ();
1121 mclass
= GET_MODE_CLASS (mode
);
1123 /* If subtracting an integer constant, convert this into an addition of
1124 the negated constant. */
1126 if (binoptab
== sub_optab
&& CONST_INT_P (op1
))
1128 op1
= negate_rtx (mode
, op1
);
1129 binoptab
= add_optab
;
1131 /* For shifts, constant invalid op1 might be expanded from different
1132 mode than MODE. As those are invalid, force them to a register
1133 to avoid further problems during expansion. */
1134 else if (CONST_INT_P (op1
)
1135 && shift_optab_p (binoptab
)
1136 && UINTVAL (op1
) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode
)))
1138 op1
= gen_int_mode (INTVAL (op1
), GET_MODE_INNER (mode
));
1139 op1
= force_reg (GET_MODE_INNER (mode
), op1
);
1142 /* Record where to delete back to if we backtrack. */
1143 last
= get_last_insn ();
1145 /* If we can do it with a three-operand insn, do so. */
1147 if (methods
!= OPTAB_MUST_WIDEN
1148 && find_widening_optab_handler (binoptab
, mode
,
1149 widened_mode (mode
, op0
, op1
), 1)
1150 != CODE_FOR_nothing
)
1152 temp
= expand_binop_directly (mode
, binoptab
, op0
, op1
, target
,
1153 unsignedp
, methods
, last
);
1158 /* If we were trying to rotate, and that didn't work, try rotating
1159 the other direction before falling back to shifts and bitwise-or. */
1160 if (((binoptab
== rotl_optab
1161 && optab_handler (rotr_optab
, mode
) != CODE_FOR_nothing
)
1162 || (binoptab
== rotr_optab
1163 && optab_handler (rotl_optab
, mode
) != CODE_FOR_nothing
))
1164 && mclass
== MODE_INT
)
1166 optab otheroptab
= (binoptab
== rotl_optab
? rotr_optab
: rotl_optab
);
1168 unsigned int bits
= GET_MODE_PRECISION (mode
);
1170 if (CONST_INT_P (op1
))
1171 newop1
= GEN_INT (bits
- INTVAL (op1
));
1172 else if (targetm
.shift_truncation_mask (mode
) == bits
- 1)
1173 newop1
= negate_rtx (GET_MODE (op1
), op1
);
1175 newop1
= expand_binop (GET_MODE (op1
), sub_optab
,
1176 gen_int_mode (bits
, GET_MODE (op1
)), op1
,
1177 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1179 temp
= expand_binop_directly (mode
, otheroptab
, op0
, newop1
,
1180 target
, unsignedp
, methods
, last
);
1185 /* If this is a multiply, see if we can do a widening operation that
1186 takes operands of this mode and makes a wider mode. */
1188 if (binoptab
== smul_optab
1189 && GET_MODE_2XWIDER_MODE (mode
).exists (&wider_mode
)
1190 && (convert_optab_handler ((unsignedp
1192 : smul_widen_optab
),
1193 wider_mode
, mode
) != CODE_FOR_nothing
))
1195 temp
= expand_binop (wider_mode
,
1196 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1197 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1201 if (GET_MODE_CLASS (mode
) == MODE_INT
1202 && TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (temp
)))
1203 return gen_lowpart (mode
, temp
);
1205 return convert_to_mode (mode
, temp
, unsignedp
);
1209 /* If this is a vector shift by a scalar, see if we can do a vector
1210 shift by a vector. If so, broadcast the scalar into a vector. */
1211 if (mclass
== MODE_VECTOR_INT
)
1213 optab otheroptab
= unknown_optab
;
1215 if (binoptab
== ashl_optab
)
1216 otheroptab
= vashl_optab
;
1217 else if (binoptab
== ashr_optab
)
1218 otheroptab
= vashr_optab
;
1219 else if (binoptab
== lshr_optab
)
1220 otheroptab
= vlshr_optab
;
1221 else if (binoptab
== rotl_optab
)
1222 otheroptab
= vrotl_optab
;
1223 else if (binoptab
== rotr_optab
)
1224 otheroptab
= vrotr_optab
;
1226 if (otheroptab
&& optab_handler (otheroptab
, mode
) != CODE_FOR_nothing
)
1228 /* The scalar may have been extended to be too wide. Truncate
1229 it back to the proper size to fit in the broadcast vector. */
1230 machine_mode inner_mode
= GET_MODE_INNER (mode
);
1231 if (!CONST_INT_P (op1
)
1232 && (GET_MODE_BITSIZE (inner_mode
)
1233 < GET_MODE_BITSIZE (GET_MODE (op1
))))
1234 op1
= force_reg (inner_mode
,
1235 simplify_gen_unary (TRUNCATE
, inner_mode
, op1
,
1237 rtx vop1
= expand_vector_broadcast (mode
, op1
);
1240 temp
= expand_binop_directly (mode
, otheroptab
, op0
, vop1
,
1241 target
, unsignedp
, methods
, last
);
1248 /* Look for a wider mode of the same class for which we think we
1249 can open-code the operation. Check for a widening multiply at the
1250 wider mode as well. */
1252 if (CLASS_HAS_WIDER_MODES_P (mclass
)
1253 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1254 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
1256 machine_mode next_mode
;
1257 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
1258 || (binoptab
== smul_optab
1259 && GET_MODE_WIDER_MODE (wider_mode
).exists (&next_mode
)
1260 && (find_widening_optab_handler ((unsignedp
1262 : smul_widen_optab
),
1264 != CODE_FOR_nothing
)))
1266 rtx xop0
= op0
, xop1
= op1
;
1269 /* For certain integer operations, we need not actually extend
1270 the narrow operands, as long as we will truncate
1271 the results to the same narrowness. */
1273 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1274 || binoptab
== xor_optab
1275 || binoptab
== add_optab
|| binoptab
== sub_optab
1276 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1277 && mclass
== MODE_INT
)
1280 xop0
= avoid_expensive_constant (mode
, binoptab
, 0,
1282 if (binoptab
!= ashl_optab
)
1283 xop1
= avoid_expensive_constant (mode
, binoptab
, 1,
1287 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1289 /* The second operand of a shift must always be extended. */
1290 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1291 no_extend
&& binoptab
!= ashl_optab
);
1293 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1294 unsignedp
, OPTAB_DIRECT
);
1297 if (mclass
!= MODE_INT
1298 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
1301 target
= gen_reg_rtx (mode
);
1302 convert_move (target
, temp
, 0);
1306 return gen_lowpart (mode
, temp
);
1309 delete_insns_since (last
);
1313 /* If operation is commutative,
1314 try to make the first operand a register.
1315 Even better, try to make it the same as the target.
1316 Also try to make the last operand a constant. */
1317 if (commutative_optab_p (binoptab
)
1318 && swap_commutative_operands_with_target (target
, op0
, op1
))
1319 std::swap (op0
, op1
);
1321 /* These can be done a word at a time. */
1322 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1323 && mclass
== MODE_INT
1324 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1325 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1330 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1331 won't be accurate, so use a new target. */
1335 || !valid_multiword_target_p (target
))
1336 target
= gen_reg_rtx (mode
);
1340 /* Do the actual arithmetic. */
1341 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1343 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1344 rtx x
= expand_binop (word_mode
, binoptab
,
1345 operand_subword_force (op0
, i
, mode
),
1346 operand_subword_force (op1
, i
, mode
),
1347 target_piece
, unsignedp
, next_methods
);
1352 if (target_piece
!= x
)
1353 emit_move_insn (target_piece
, x
);
1356 insns
= get_insns ();
1359 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1366 /* Synthesize double word shifts from single word shifts. */
1367 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1368 || binoptab
== ashr_optab
)
1369 && mclass
== MODE_INT
1370 && (CONST_INT_P (op1
) || optimize_insn_for_speed_p ())
1371 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1372 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
)
1373 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
1374 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1375 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1377 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1378 machine_mode op1_mode
;
1380 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1381 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1382 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1384 /* Apply the truncation to constant shifts. */
1385 if (double_shift_mask
> 0 && CONST_INT_P (op1
))
1386 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1388 if (op1
== CONST0_RTX (op1_mode
))
1391 /* Make sure that this is a combination that expand_doubleword_shift
1392 can handle. See the comments there for details. */
1393 if (double_shift_mask
== 0
1394 || (shift_mask
== BITS_PER_WORD
- 1
1395 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1398 rtx into_target
, outof_target
;
1399 rtx into_input
, outof_input
;
1400 int left_shift
, outof_word
;
1402 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1403 won't be accurate, so use a new target. */
1407 || !valid_multiword_target_p (target
))
1408 target
= gen_reg_rtx (mode
);
1412 /* OUTOF_* is the word we are shifting bits away from, and
1413 INTO_* is the word that we are shifting bits towards, thus
1414 they differ depending on the direction of the shift and
1415 WORDS_BIG_ENDIAN. */
1417 left_shift
= binoptab
== ashl_optab
;
1418 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1420 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1421 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1423 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1424 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1426 if (expand_doubleword_shift (op1_mode
, binoptab
,
1427 outof_input
, into_input
, op1
,
1428 outof_target
, into_target
,
1429 unsignedp
, next_methods
, shift_mask
))
1431 insns
= get_insns ();
1441 /* Synthesize double word rotates from single word shifts. */
1442 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1443 && mclass
== MODE_INT
1444 && CONST_INT_P (op1
)
1445 && GET_MODE_PRECISION (mode
) == 2 * BITS_PER_WORD
1446 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1447 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1450 rtx into_target
, outof_target
;
1451 rtx into_input
, outof_input
;
1453 int shift_count
, left_shift
, outof_word
;
1455 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1456 won't be accurate, so use a new target. Do this also if target is not
1457 a REG, first because having a register instead may open optimization
1458 opportunities, and second because if target and op0 happen to be MEMs
1459 designating the same location, we would risk clobbering it too early
1460 in the code sequence we generate below. */
1465 || !valid_multiword_target_p (target
))
1466 target
= gen_reg_rtx (mode
);
1470 shift_count
= INTVAL (op1
);
1472 /* OUTOF_* is the word we are shifting bits away from, and
1473 INTO_* is the word that we are shifting bits towards, thus
1474 they differ depending on the direction of the shift and
1475 WORDS_BIG_ENDIAN. */
1477 left_shift
= (binoptab
== rotl_optab
);
1478 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1480 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1481 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1483 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1484 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1486 if (shift_count
== BITS_PER_WORD
)
1488 /* This is just a word swap. */
1489 emit_move_insn (outof_target
, into_input
);
1490 emit_move_insn (into_target
, outof_input
);
1495 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1496 rtx first_shift_count
, second_shift_count
;
1497 optab reverse_unsigned_shift
, unsigned_shift
;
1499 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1500 ? lshr_optab
: ashl_optab
);
1502 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1503 ? ashl_optab
: lshr_optab
);
1505 if (shift_count
> BITS_PER_WORD
)
1507 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1508 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1512 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1513 second_shift_count
= GEN_INT (shift_count
);
1516 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1517 outof_input
, first_shift_count
,
1518 NULL_RTX
, unsignedp
, next_methods
);
1519 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1520 into_input
, second_shift_count
,
1521 NULL_RTX
, unsignedp
, next_methods
);
1523 if (into_temp1
!= 0 && into_temp2
!= 0)
1524 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1525 into_target
, unsignedp
, next_methods
);
1529 if (inter
!= 0 && inter
!= into_target
)
1530 emit_move_insn (into_target
, inter
);
1532 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1533 into_input
, first_shift_count
,
1534 NULL_RTX
, unsignedp
, next_methods
);
1535 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1536 outof_input
, second_shift_count
,
1537 NULL_RTX
, unsignedp
, next_methods
);
1539 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1540 inter
= expand_binop (word_mode
, ior_optab
,
1541 outof_temp1
, outof_temp2
,
1542 outof_target
, unsignedp
, next_methods
);
1544 if (inter
!= 0 && inter
!= outof_target
)
1545 emit_move_insn (outof_target
, inter
);
1548 insns
= get_insns ();
1558 /* These can be done a word at a time by propagating carries. */
1559 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1560 && mclass
== MODE_INT
1561 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1562 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1565 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1566 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1567 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1568 rtx xop0
, xop1
, xtarget
;
1570 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1571 value is one of those, use it. Otherwise, use 1 since it is the
1572 one easiest to get. */
1573 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1574 int normalizep
= STORE_FLAG_VALUE
;
1579 /* Prepare the operands. */
1580 xop0
= force_reg (mode
, op0
);
1581 xop1
= force_reg (mode
, op1
);
1583 xtarget
= gen_reg_rtx (mode
);
1585 if (target
== 0 || !REG_P (target
) || !valid_multiword_target_p (target
))
1588 /* Indicate for flow that the entire target reg is being set. */
1590 emit_clobber (xtarget
);
1592 /* Do the actual arithmetic. */
1593 for (i
= 0; i
< nwords
; i
++)
1595 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1596 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1597 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1598 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1601 /* Main add/subtract of the input operands. */
1602 x
= expand_binop (word_mode
, binoptab
,
1603 op0_piece
, op1_piece
,
1604 target_piece
, unsignedp
, next_methods
);
1610 /* Store carry from main add/subtract. */
1611 carry_out
= gen_reg_rtx (word_mode
);
1612 carry_out
= emit_store_flag_force (carry_out
,
1613 (binoptab
== add_optab
1616 word_mode
, 1, normalizep
);
1623 /* Add/subtract previous carry to main result. */
1624 newx
= expand_binop (word_mode
,
1625 normalizep
== 1 ? binoptab
: otheroptab
,
1627 NULL_RTX
, 1, next_methods
);
1631 /* Get out carry from adding/subtracting carry in. */
1632 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1633 carry_tmp
= emit_store_flag_force (carry_tmp
,
1634 (binoptab
== add_optab
1637 word_mode
, 1, normalizep
);
1639 /* Logical-ior the two poss. carry together. */
1640 carry_out
= expand_binop (word_mode
, ior_optab
,
1641 carry_out
, carry_tmp
,
1642 carry_out
, 0, next_methods
);
1646 emit_move_insn (target_piece
, newx
);
1650 if (x
!= target_piece
)
1651 emit_move_insn (target_piece
, x
);
1654 carry_in
= carry_out
;
1657 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1659 if (optab_handler (mov_optab
, mode
) != CODE_FOR_nothing
1660 || ! rtx_equal_p (target
, xtarget
))
1662 rtx_insn
*temp
= emit_move_insn (target
, xtarget
);
1664 set_dst_reg_note (temp
, REG_EQUAL
,
1665 gen_rtx_fmt_ee (optab_to_code (binoptab
),
1666 mode
, copy_rtx (xop0
),
1677 delete_insns_since (last
);
1680 /* Attempt to synthesize double word multiplies using a sequence of word
1681 mode multiplications. We first attempt to generate a sequence using a
1682 more efficient unsigned widening multiply, and if that fails we then
1683 try using a signed widening multiply. */
1685 if (binoptab
== smul_optab
1686 && mclass
== MODE_INT
1687 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1688 && optab_handler (smul_optab
, word_mode
) != CODE_FOR_nothing
1689 && optab_handler (add_optab
, word_mode
) != CODE_FOR_nothing
)
1691 rtx product
= NULL_RTX
;
1692 if (widening_optab_handler (umul_widen_optab
, mode
, word_mode
)
1693 != CODE_FOR_nothing
)
1695 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1698 delete_insns_since (last
);
1701 if (product
== NULL_RTX
1702 && widening_optab_handler (smul_widen_optab
, mode
, word_mode
)
1703 != CODE_FOR_nothing
)
1705 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1708 delete_insns_since (last
);
1711 if (product
!= NULL_RTX
)
1713 if (optab_handler (mov_optab
, mode
) != CODE_FOR_nothing
)
1715 rtx_insn
*move
= emit_move_insn (target
? target
: product
,
1717 set_dst_reg_note (move
,
1719 gen_rtx_fmt_ee (MULT
, mode
,
1722 target
? target
: product
);
1728 /* It can't be open-coded in this mode.
1729 Use a library call if one is available and caller says that's ok. */
1731 libfunc
= optab_libfunc (binoptab
, mode
);
1733 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1737 machine_mode op1_mode
= mode
;
1742 if (shift_optab_p (binoptab
))
1744 op1_mode
= targetm
.libgcc_shift_count_mode ();
1745 /* Specify unsigned here,
1746 since negative shift counts are meaningless. */
1747 op1x
= convert_to_mode (op1_mode
, op1
, 1);
1750 if (GET_MODE (op0
) != VOIDmode
1751 && GET_MODE (op0
) != mode
)
1752 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1754 /* Pass 1 for NO_QUEUE so we don't lose any increments
1755 if the libcall is cse'd or moved. */
1756 value
= emit_library_call_value (libfunc
,
1757 NULL_RTX
, LCT_CONST
, mode
, 2,
1758 op0
, mode
, op1x
, op1_mode
);
1760 insns
= get_insns ();
1763 bool trapv
= trapv_binoptab_p (binoptab
);
1764 target
= gen_reg_rtx (mode
);
1765 emit_libcall_block_1 (insns
, target
, value
,
1767 : gen_rtx_fmt_ee (optab_to_code (binoptab
),
1768 mode
, op0
, op1
), trapv
);
1773 delete_insns_since (last
);
1775 /* It can't be done in this mode. Can we do it in a wider mode? */
1777 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1778 || methods
== OPTAB_MUST_WIDEN
))
1780 /* Caller says, don't even try. */
1781 delete_insns_since (entry_last
);
1785 /* Compute the value of METHODS to pass to recursive calls.
1786 Don't allow widening to be tried recursively. */
1788 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1790 /* Look for a wider mode of the same class for which it appears we can do
1793 if (CLASS_HAS_WIDER_MODES_P (mclass
))
1795 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
1797 if (find_widening_optab_handler (binoptab
, wider_mode
, mode
, 1)
1799 || (methods
== OPTAB_LIB
1800 && optab_libfunc (binoptab
, wider_mode
)))
1802 rtx xop0
= op0
, xop1
= op1
;
1805 /* For certain integer operations, we need not actually extend
1806 the narrow operands, as long as we will truncate
1807 the results to the same narrowness. */
1809 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1810 || binoptab
== xor_optab
1811 || binoptab
== add_optab
|| binoptab
== sub_optab
1812 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1813 && mclass
== MODE_INT
)
1816 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1817 unsignedp
, no_extend
);
1819 /* The second operand of a shift must always be extended. */
1820 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1821 no_extend
&& binoptab
!= ashl_optab
);
1823 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1824 unsignedp
, methods
);
1827 if (mclass
!= MODE_INT
1828 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
1831 target
= gen_reg_rtx (mode
);
1832 convert_move (target
, temp
, 0);
1836 return gen_lowpart (mode
, temp
);
1839 delete_insns_since (last
);
1844 delete_insns_since (entry_last
);
1848 /* Expand a binary operator which has both signed and unsigned forms.
1849 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1852 If we widen unsigned operands, we may use a signed wider operation instead
1853 of an unsigned wider operation, since the result would be the same. */
1856 sign_expand_binop (machine_mode mode
, optab uoptab
, optab soptab
,
1857 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
1858 enum optab_methods methods
)
1861 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1864 /* Do it without widening, if possible. */
1865 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1866 unsignedp
, OPTAB_DIRECT
);
1867 if (temp
|| methods
== OPTAB_DIRECT
)
1870 /* Try widening to a signed int. Disable any direct use of any
1871 signed insn in the current mode. */
1872 save_enable
= swap_optab_enable (soptab
, mode
, false);
1874 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
1875 unsignedp
, OPTAB_WIDEN
);
1877 /* For unsigned operands, try widening to an unsigned int. */
1878 if (!temp
&& unsignedp
)
1879 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1880 unsignedp
, OPTAB_WIDEN
);
1881 if (temp
|| methods
== OPTAB_WIDEN
)
1884 /* Use the right width libcall if that exists. */
1885 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1886 unsignedp
, OPTAB_LIB
);
1887 if (temp
|| methods
== OPTAB_LIB
)
1890 /* Must widen and use a libcall, use either signed or unsigned. */
1891 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
1892 unsignedp
, methods
);
1893 if (!temp
&& unsignedp
)
1894 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1895 unsignedp
, methods
);
1898 /* Undo the fiddling above. */
1900 swap_optab_enable (soptab
, mode
, true);
1904 /* Generate code to perform an operation specified by UNOPPTAB
1905 on operand OP0, with two results to TARG0 and TARG1.
1906 We assume that the order of the operands for the instruction
1907 is TARG0, TARG1, OP0.
1909 Either TARG0 or TARG1 may be zero, but what that means is that
1910 the result is not actually wanted. We will generate it into
1911 a dummy pseudo-reg and discard it. They may not both be zero.
1913 Returns 1 if this operation can be performed; 0 if not. */
1916 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
1919 machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1920 enum mode_class mclass
;
1921 machine_mode wider_mode
;
1922 rtx_insn
*entry_last
= get_last_insn ();
1925 mclass
= GET_MODE_CLASS (mode
);
1928 targ0
= gen_reg_rtx (mode
);
1930 targ1
= gen_reg_rtx (mode
);
1932 /* Record where to go back to if we fail. */
1933 last
= get_last_insn ();
1935 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
1937 struct expand_operand ops
[3];
1938 enum insn_code icode
= optab_handler (unoptab
, mode
);
1940 create_fixed_operand (&ops
[0], targ0
);
1941 create_fixed_operand (&ops
[1], targ1
);
1942 create_convert_operand_from (&ops
[2], op0
, mode
, unsignedp
);
1943 if (maybe_expand_insn (icode
, 3, ops
))
1947 /* It can't be done in this mode. Can we do it in a wider mode? */
1949 if (CLASS_HAS_WIDER_MODES_P (mclass
))
1951 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
1953 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
1955 rtx t0
= gen_reg_rtx (wider_mode
);
1956 rtx t1
= gen_reg_rtx (wider_mode
);
1957 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1959 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
1961 convert_move (targ0
, t0
, unsignedp
);
1962 convert_move (targ1
, t1
, unsignedp
);
1966 delete_insns_since (last
);
1971 delete_insns_since (entry_last
);
1975 /* Generate code to perform an operation specified by BINOPTAB
1976 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1977 We assume that the order of the operands for the instruction
1978 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1979 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1981 Either TARG0 or TARG1 may be zero, but what that means is that
1982 the result is not actually wanted. We will generate it into
1983 a dummy pseudo-reg and discard it. They may not both be zero.
1985 Returns 1 if this operation can be performed; 0 if not. */
1988 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
1991 machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1992 enum mode_class mclass
;
1993 machine_mode wider_mode
;
1994 rtx_insn
*entry_last
= get_last_insn ();
1997 mclass
= GET_MODE_CLASS (mode
);
2000 targ0
= gen_reg_rtx (mode
);
2002 targ1
= gen_reg_rtx (mode
);
2004 /* Record where to go back to if we fail. */
2005 last
= get_last_insn ();
2007 if (optab_handler (binoptab
, mode
) != CODE_FOR_nothing
)
2009 struct expand_operand ops
[4];
2010 enum insn_code icode
= optab_handler (binoptab
, mode
);
2011 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2012 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2013 rtx xop0
= op0
, xop1
= op1
;
2015 /* If we are optimizing, force expensive constants into a register. */
2016 xop0
= avoid_expensive_constant (mode0
, binoptab
, 0, xop0
, unsignedp
);
2017 xop1
= avoid_expensive_constant (mode1
, binoptab
, 1, xop1
, unsignedp
);
2019 create_fixed_operand (&ops
[0], targ0
);
2020 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
2021 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
2022 create_fixed_operand (&ops
[3], targ1
);
2023 if (maybe_expand_insn (icode
, 4, ops
))
2025 delete_insns_since (last
);
2028 /* It can't be done in this mode. Can we do it in a wider mode? */
2030 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2032 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2034 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
)
2036 rtx t0
= gen_reg_rtx (wider_mode
);
2037 rtx t1
= gen_reg_rtx (wider_mode
);
2038 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2039 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2041 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2044 convert_move (targ0
, t0
, unsignedp
);
2045 convert_move (targ1
, t1
, unsignedp
);
2049 delete_insns_since (last
);
2054 delete_insns_since (entry_last
);
2058 /* Expand the two-valued library call indicated by BINOPTAB, but
2059 preserve only one of the values. If TARG0 is non-NULL, the first
2060 value is placed into TARG0; otherwise the second value is placed
2061 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2062 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2063 This routine assumes that the value returned by the library call is
2064 as if the return value was of an integral mode twice as wide as the
2065 mode of OP0. Returns 1 if the call was successful. */
2068 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2069 rtx targ0
, rtx targ1
, enum rtx_code code
)
2072 machine_mode libval_mode
;
2077 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2078 gcc_assert (!targ0
!= !targ1
);
2080 mode
= GET_MODE (op0
);
2081 libfunc
= optab_libfunc (binoptab
, mode
);
2085 /* The value returned by the library function will have twice as
2086 many bits as the nominal MODE. */
2087 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2090 libval
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
2094 /* Get the part of VAL containing the value that we want. */
2095 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2096 targ0
? 0 : GET_MODE_SIZE (mode
));
2097 insns
= get_insns ();
2099 /* Move the into the desired location. */
2100 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2101 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2107 /* Wrapper around expand_unop which takes an rtx code to specify
2108 the operation to perform, not an optab pointer. All other
2109 arguments are the same. */
2111 expand_simple_unop (machine_mode mode
, enum rtx_code code
, rtx op0
,
2112 rtx target
, int unsignedp
)
2114 optab unop
= code_to_optab (code
);
2117 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2123 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2125 A similar operation can be used for clrsb. UNOPTAB says which operation
2126 we are trying to expand. */
2128 widen_leading (machine_mode mode
, rtx op0
, rtx target
, optab unoptab
)
2130 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2131 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2133 machine_mode wider_mode
;
2134 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2136 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2141 last
= get_last_insn ();
2144 target
= gen_reg_rtx (mode
);
2145 xop0
= widen_operand (op0
, wider_mode
, mode
,
2146 unoptab
!= clrsb_optab
, false);
2147 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2148 unoptab
!= clrsb_optab
);
2151 (wider_mode
, sub_optab
, temp
,
2152 gen_int_mode (GET_MODE_PRECISION (wider_mode
)
2153 - GET_MODE_PRECISION (mode
),
2155 target
, true, OPTAB_DIRECT
);
2157 delete_insns_since (last
);
2166 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2167 quantities, choosing which based on whether the high word is nonzero. */
2169 expand_doubleword_clz (machine_mode mode
, rtx op0
, rtx target
)
2171 rtx xop0
= force_reg (mode
, op0
);
2172 rtx subhi
= gen_highpart (word_mode
, xop0
);
2173 rtx sublo
= gen_lowpart (word_mode
, xop0
);
2174 rtx_code_label
*hi0_label
= gen_label_rtx ();
2175 rtx_code_label
*after_label
= gen_label_rtx ();
2179 /* If we were not given a target, use a word_mode register, not a
2180 'mode' register. The result will fit, and nobody is expecting
2181 anything bigger (the return type of __builtin_clz* is int). */
2183 target
= gen_reg_rtx (word_mode
);
2185 /* In any case, write to a word_mode scratch in both branches of the
2186 conditional, so we can ensure there is a single move insn setting
2187 'target' to tag a REG_EQUAL note on. */
2188 result
= gen_reg_rtx (word_mode
);
2192 /* If the high word is not equal to zero,
2193 then clz of the full value is clz of the high word. */
2194 emit_cmp_and_jump_insns (subhi
, CONST0_RTX (word_mode
), EQ
, 0,
2195 word_mode
, true, hi0_label
);
2197 temp
= expand_unop_direct (word_mode
, clz_optab
, subhi
, result
, true);
2202 convert_move (result
, temp
, true);
2204 emit_jump_insn (targetm
.gen_jump (after_label
));
2207 /* Else clz of the full value is clz of the low word plus the number
2208 of bits in the high word. */
2209 emit_label (hi0_label
);
2211 temp
= expand_unop_direct (word_mode
, clz_optab
, sublo
, 0, true);
2214 temp
= expand_binop (word_mode
, add_optab
, temp
,
2215 gen_int_mode (GET_MODE_BITSIZE (word_mode
), word_mode
),
2216 result
, true, OPTAB_DIRECT
);
2220 convert_move (result
, temp
, true);
2222 emit_label (after_label
);
2223 convert_move (target
, result
, true);
2228 add_equal_note (seq
, target
, CLZ
, xop0
, 0);
2237 /* Try calculating popcount of a double-word quantity as two popcount's of
2238 word-sized quantities and summing up the results. */
2240 expand_doubleword_popcount (machine_mode mode
, rtx op0
, rtx target
)
2247 t0
= expand_unop_direct (word_mode
, popcount_optab
,
2248 operand_subword_force (op0
, 0, mode
), NULL_RTX
,
2250 t1
= expand_unop_direct (word_mode
, popcount_optab
,
2251 operand_subword_force (op0
, 1, mode
), NULL_RTX
,
2259 /* If we were not given a target, use a word_mode register, not a
2260 'mode' register. The result will fit, and nobody is expecting
2261 anything bigger (the return type of __builtin_popcount* is int). */
2263 target
= gen_reg_rtx (word_mode
);
2265 t
= expand_binop (word_mode
, add_optab
, t0
, t1
, target
, 0, OPTAB_DIRECT
);
2270 add_equal_note (seq
, t
, POPCOUNT
, op0
, 0);
2278 (parity:narrow (low (x) ^ high (x))) */
2280 expand_doubleword_parity (machine_mode mode
, rtx op0
, rtx target
)
2282 rtx t
= expand_binop (word_mode
, xor_optab
,
2283 operand_subword_force (op0
, 0, mode
),
2284 operand_subword_force (op0
, 1, mode
),
2285 NULL_RTX
, 0, OPTAB_DIRECT
);
2286 return expand_unop (word_mode
, parity_optab
, t
, target
, true);
2292 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2294 widen_bswap (machine_mode mode
, rtx op0
, rtx target
)
2296 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2297 machine_mode wider_mode
;
2301 if (!CLASS_HAS_WIDER_MODES_P (mclass
))
2304 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2305 if (optab_handler (bswap_optab
, wider_mode
) != CODE_FOR_nothing
)
2310 last
= get_last_insn ();
2312 x
= widen_operand (op0
, wider_mode
, mode
, true, true);
2313 x
= expand_unop (wider_mode
, bswap_optab
, x
, NULL_RTX
, true);
2315 gcc_assert (GET_MODE_PRECISION (wider_mode
) == GET_MODE_BITSIZE (wider_mode
)
2316 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
));
2318 x
= expand_shift (RSHIFT_EXPR
, wider_mode
, x
,
2319 GET_MODE_BITSIZE (wider_mode
)
2320 - GET_MODE_BITSIZE (mode
),
2326 target
= gen_reg_rtx (mode
);
2327 emit_move_insn (target
, gen_lowpart (mode
, x
));
2330 delete_insns_since (last
);
2335 /* Try calculating bswap as two bswaps of two word-sized operands. */
2338 expand_doubleword_bswap (machine_mode mode
, rtx op
, rtx target
)
2342 t1
= expand_unop (word_mode
, bswap_optab
,
2343 operand_subword_force (op
, 0, mode
), NULL_RTX
, true);
2344 t0
= expand_unop (word_mode
, bswap_optab
,
2345 operand_subword_force (op
, 1, mode
), NULL_RTX
, true);
2347 if (target
== 0 || !valid_multiword_target_p (target
))
2348 target
= gen_reg_rtx (mode
);
2350 emit_clobber (target
);
2351 emit_move_insn (operand_subword (target
, 0, 1, mode
), t0
);
2352 emit_move_insn (operand_subword (target
, 1, 1, mode
), t1
);
2357 /* Try calculating (parity x) as (and (popcount x) 1), where
2358 popcount can also be done in a wider mode. */
2360 expand_parity (machine_mode mode
, rtx op0
, rtx target
)
2362 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2363 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2365 machine_mode wider_mode
;
2366 FOR_EACH_MODE_FROM (wider_mode
, mode
)
2368 if (optab_handler (popcount_optab
, wider_mode
) != CODE_FOR_nothing
)
2373 last
= get_last_insn ();
2375 if (target
== 0 || GET_MODE (target
) != wider_mode
)
2376 target
= gen_reg_rtx (wider_mode
);
2378 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2379 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2382 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2383 target
, true, OPTAB_DIRECT
);
2387 if (mclass
!= MODE_INT
2388 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2389 return convert_to_mode (mode
, temp
, 0);
2391 return gen_lowpart (mode
, temp
);
2394 delete_insns_since (last
);
2401 /* Try calculating ctz(x) as K - clz(x & -x) ,
2402 where K is GET_MODE_PRECISION(mode) - 1.
2404 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2405 don't have to worry about what the hardware does in that case. (If
2406 the clz instruction produces the usual value at 0, which is K, the
2407 result of this code sequence will be -1; expand_ffs, below, relies
2408 on this. It might be nice to have it be K instead, for consistency
2409 with the (very few) processors that provide a ctz with a defined
2410 value, but that would take one more instruction, and it would be
2411 less convenient for expand_ffs anyway. */
2414 expand_ctz (machine_mode mode
, rtx op0
, rtx target
)
2419 if (optab_handler (clz_optab
, mode
) == CODE_FOR_nothing
)
2424 temp
= expand_unop_direct (mode
, neg_optab
, op0
, NULL_RTX
, true);
2426 temp
= expand_binop (mode
, and_optab
, op0
, temp
, NULL_RTX
,
2427 true, OPTAB_DIRECT
);
2429 temp
= expand_unop_direct (mode
, clz_optab
, temp
, NULL_RTX
, true);
2431 temp
= expand_binop (mode
, sub_optab
,
2432 gen_int_mode (GET_MODE_PRECISION (mode
) - 1, mode
),
2434 true, OPTAB_DIRECT
);
2444 add_equal_note (seq
, temp
, CTZ
, op0
, 0);
2450 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2451 else with the sequence used by expand_clz.
2453 The ffs builtin promises to return zero for a zero value and ctz/clz
2454 may have an undefined value in that case. If they do not give us a
2455 convenient value, we have to generate a test and branch. */
2457 expand_ffs (machine_mode mode
, rtx op0
, rtx target
)
2459 HOST_WIDE_INT val
= 0;
2460 bool defined_at_zero
= false;
2464 if (optab_handler (ctz_optab
, mode
) != CODE_FOR_nothing
)
2468 temp
= expand_unop_direct (mode
, ctz_optab
, op0
, 0, true);
2472 defined_at_zero
= (CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2);
2474 else if (optab_handler (clz_optab
, mode
) != CODE_FOR_nothing
)
2477 temp
= expand_ctz (mode
, op0
, 0);
2481 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2)
2483 defined_at_zero
= true;
2484 val
= (GET_MODE_PRECISION (mode
) - 1) - val
;
2490 if (defined_at_zero
&& val
== -1)
2491 /* No correction needed at zero. */;
2494 /* We don't try to do anything clever with the situation found
2495 on some processors (eg Alpha) where ctz(0:mode) ==
2496 bitsize(mode). If someone can think of a way to send N to -1
2497 and leave alone all values in the range 0..N-1 (where N is a
2498 power of two), cheaper than this test-and-branch, please add it.
2500 The test-and-branch is done after the operation itself, in case
2501 the operation sets condition codes that can be recycled for this.
2502 (This is true on i386, for instance.) */
2504 rtx_code_label
*nonzero_label
= gen_label_rtx ();
2505 emit_cmp_and_jump_insns (op0
, CONST0_RTX (mode
), NE
, 0,
2506 mode
, true, nonzero_label
);
2508 convert_move (temp
, GEN_INT (-1), false);
2509 emit_label (nonzero_label
);
2512 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2513 to produce a value in the range 0..bitsize. */
2514 temp
= expand_binop (mode
, add_optab
, temp
, gen_int_mode (1, mode
),
2515 target
, false, OPTAB_DIRECT
);
2522 add_equal_note (seq
, temp
, FFS
, op0
, 0);
2531 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2532 conditions, VAL may already be a SUBREG against which we cannot generate
2533 a further SUBREG. In this case, we expect forcing the value into a
2534 register will work around the situation. */
2537 lowpart_subreg_maybe_copy (machine_mode omode
, rtx val
,
2541 ret
= lowpart_subreg (omode
, val
, imode
);
2544 val
= force_reg (imode
, val
);
2545 ret
= lowpart_subreg (omode
, val
, imode
);
2546 gcc_assert (ret
!= NULL
);
2551 /* Expand a floating point absolute value or negation operation via a
2552 logical operation on the sign bit. */
2555 expand_absneg_bit (enum rtx_code code
, machine_mode mode
,
2556 rtx op0
, rtx target
)
2558 const struct real_format
*fmt
;
2559 int bitpos
, word
, nwords
, i
;
2564 /* The format has to have a simple sign bit. */
2565 fmt
= REAL_MODE_FORMAT (mode
);
2569 bitpos
= fmt
->signbit_rw
;
2573 /* Don't create negative zeros if the format doesn't support them. */
2574 if (code
== NEG
&& !fmt
->has_signed_zero
)
2577 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2579 imode
= int_mode_for_mode (mode
);
2580 if (imode
== BLKmode
)
2589 if (FLOAT_WORDS_BIG_ENDIAN
)
2590 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2592 word
= bitpos
/ BITS_PER_WORD
;
2593 bitpos
= bitpos
% BITS_PER_WORD
;
2594 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2597 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
2603 || (nwords
> 1 && !valid_multiword_target_p (target
)))
2604 target
= gen_reg_rtx (mode
);
2610 for (i
= 0; i
< nwords
; ++i
)
2612 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2613 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2617 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2619 immed_wide_int_const (mask
, imode
),
2620 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2621 if (temp
!= targ_piece
)
2622 emit_move_insn (targ_piece
, temp
);
2625 emit_move_insn (targ_piece
, op0_piece
);
2628 insns
= get_insns ();
2635 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2636 gen_lowpart (imode
, op0
),
2637 immed_wide_int_const (mask
, imode
),
2638 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2639 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2641 set_dst_reg_note (get_last_insn (), REG_EQUAL
,
2642 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)),
2649 /* As expand_unop, but will fail rather than attempt the operation in a
2650 different mode or with a libcall. */
2652 expand_unop_direct (machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2655 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
2657 struct expand_operand ops
[2];
2658 enum insn_code icode
= optab_handler (unoptab
, mode
);
2659 rtx_insn
*last
= get_last_insn ();
2662 create_output_operand (&ops
[0], target
, mode
);
2663 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
2664 pat
= maybe_gen_insn (icode
, 2, ops
);
2667 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2668 && ! add_equal_note (pat
, ops
[0].value
,
2669 optab_to_code (unoptab
),
2670 ops
[1].value
, NULL_RTX
))
2672 delete_insns_since (last
);
2673 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2678 return ops
[0].value
;
2684 /* Generate code to perform an operation specified by UNOPTAB
2685 on operand OP0, with result having machine-mode MODE.
2687 UNSIGNEDP is for the case where we have to widen the operands
2688 to perform the operation. It says to use zero-extension.
2690 If TARGET is nonzero, the value
2691 is generated there, if it is convenient to do so.
2692 In all cases an rtx is returned for the locus of the value;
2693 this may or may not be TARGET. */
2696 expand_unop (machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2699 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2700 machine_mode wider_mode
;
2704 temp
= expand_unop_direct (mode
, unoptab
, op0
, target
, unsignedp
);
2708 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2710 /* Widening (or narrowing) clz needs special treatment. */
2711 if (unoptab
== clz_optab
)
2713 temp
= widen_leading (mode
, op0
, target
, unoptab
);
2717 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2718 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2720 temp
= expand_doubleword_clz (mode
, op0
, target
);
2728 if (unoptab
== clrsb_optab
)
2730 temp
= widen_leading (mode
, op0
, target
, unoptab
);
2736 if (unoptab
== popcount_optab
2737 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2738 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
2739 && optimize_insn_for_speed_p ())
2741 temp
= expand_doubleword_popcount (mode
, op0
, target
);
2746 if (unoptab
== parity_optab
2747 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2748 && (optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
2749 || optab_handler (popcount_optab
, word_mode
) != CODE_FOR_nothing
)
2750 && optimize_insn_for_speed_p ())
2752 temp
= expand_doubleword_parity (mode
, op0
, target
);
2757 /* Widening (or narrowing) bswap needs special treatment. */
2758 if (unoptab
== bswap_optab
)
2760 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
2761 or ROTATERT. First try these directly; if this fails, then try the
2762 obvious pair of shifts with allowed widening, as this will probably
2763 be always more efficient than the other fallback methods. */
2769 if (optab_handler (rotl_optab
, mode
) != CODE_FOR_nothing
)
2771 temp
= expand_binop (mode
, rotl_optab
, op0
, GEN_INT (8), target
,
2772 unsignedp
, OPTAB_DIRECT
);
2777 if (optab_handler (rotr_optab
, mode
) != CODE_FOR_nothing
)
2779 temp
= expand_binop (mode
, rotr_optab
, op0
, GEN_INT (8), target
,
2780 unsignedp
, OPTAB_DIRECT
);
2785 last
= get_last_insn ();
2787 temp1
= expand_binop (mode
, ashl_optab
, op0
, GEN_INT (8), NULL_RTX
,
2788 unsignedp
, OPTAB_WIDEN
);
2789 temp2
= expand_binop (mode
, lshr_optab
, op0
, GEN_INT (8), NULL_RTX
,
2790 unsignedp
, OPTAB_WIDEN
);
2793 temp
= expand_binop (mode
, ior_optab
, temp1
, temp2
, target
,
2794 unsignedp
, OPTAB_WIDEN
);
2799 delete_insns_since (last
);
2802 temp
= widen_bswap (mode
, op0
, target
);
2806 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2807 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2809 temp
= expand_doubleword_bswap (mode
, op0
, target
);
2817 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2818 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2820 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2823 rtx_insn
*last
= get_last_insn ();
2825 /* For certain operations, we need not actually extend
2826 the narrow operand, as long as we will truncate the
2827 results to the same narrowness. */
2829 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2830 (unoptab
== neg_optab
2831 || unoptab
== one_cmpl_optab
)
2832 && mclass
== MODE_INT
);
2834 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2839 if (mclass
!= MODE_INT
2840 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2843 target
= gen_reg_rtx (mode
);
2844 convert_move (target
, temp
, 0);
2848 return gen_lowpart (mode
, temp
);
2851 delete_insns_since (last
);
2855 /* These can be done a word at a time. */
2856 if (unoptab
== one_cmpl_optab
2857 && mclass
== MODE_INT
2858 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2859 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2864 if (target
== 0 || target
== op0
|| !valid_multiword_target_p (target
))
2865 target
= gen_reg_rtx (mode
);
2869 /* Do the actual arithmetic. */
2870 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2872 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2873 rtx x
= expand_unop (word_mode
, unoptab
,
2874 operand_subword_force (op0
, i
, mode
),
2875 target_piece
, unsignedp
);
2877 if (target_piece
!= x
)
2878 emit_move_insn (target_piece
, x
);
2881 insns
= get_insns ();
2888 if (optab_to_code (unoptab
) == NEG
)
2890 /* Try negating floating point values by flipping the sign bit. */
2891 if (SCALAR_FLOAT_MODE_P (mode
))
2893 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
2898 /* If there is no negation pattern, and we have no negative zero,
2899 try subtracting from zero. */
2900 if (!HONOR_SIGNED_ZEROS (mode
))
2902 temp
= expand_binop (mode
, (unoptab
== negv_optab
2903 ? subv_optab
: sub_optab
),
2904 CONST0_RTX (mode
), op0
, target
,
2905 unsignedp
, OPTAB_DIRECT
);
2911 /* Try calculating parity (x) as popcount (x) % 2. */
2912 if (unoptab
== parity_optab
)
2914 temp
= expand_parity (mode
, op0
, target
);
2919 /* Try implementing ffs (x) in terms of clz (x). */
2920 if (unoptab
== ffs_optab
)
2922 temp
= expand_ffs (mode
, op0
, target
);
2927 /* Try implementing ctz (x) in terms of clz (x). */
2928 if (unoptab
== ctz_optab
)
2930 temp
= expand_ctz (mode
, op0
, target
);
2936 /* Now try a library call in this mode. */
2937 libfunc
= optab_libfunc (unoptab
, mode
);
2943 machine_mode outmode
= mode
;
2945 /* All of these functions return small values. Thus we choose to
2946 have them return something that isn't a double-word. */
2947 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2948 || unoptab
== clrsb_optab
|| unoptab
== popcount_optab
2949 || unoptab
== parity_optab
)
2951 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
),
2952 optab_libfunc (unoptab
, mode
)));
2956 /* Pass 1 for NO_QUEUE so we don't lose any increments
2957 if the libcall is cse'd or moved. */
2958 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, outmode
,
2960 insns
= get_insns ();
2963 target
= gen_reg_rtx (outmode
);
2964 bool trapv
= trapv_unoptab_p (unoptab
);
2966 eq_value
= NULL_RTX
;
2969 eq_value
= gen_rtx_fmt_e (optab_to_code (unoptab
), mode
, op0
);
2970 if (GET_MODE_SIZE (outmode
) < GET_MODE_SIZE (mode
))
2971 eq_value
= simplify_gen_unary (TRUNCATE
, outmode
, eq_value
, mode
);
2972 else if (GET_MODE_SIZE (outmode
) > GET_MODE_SIZE (mode
))
2973 eq_value
= simplify_gen_unary (ZERO_EXTEND
,
2974 outmode
, eq_value
, mode
);
2976 emit_libcall_block_1 (insns
, target
, value
, eq_value
, trapv
);
2981 /* It can't be done in this mode. Can we do it in a wider mode? */
2983 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2985 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2987 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
2988 || optab_libfunc (unoptab
, wider_mode
))
2991 rtx_insn
*last
= get_last_insn ();
2993 /* For certain operations, we need not actually extend
2994 the narrow operand, as long as we will truncate the
2995 results to the same narrowness. */
2996 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2997 (unoptab
== neg_optab
2998 || unoptab
== one_cmpl_optab
2999 || unoptab
== bswap_optab
)
3000 && mclass
== MODE_INT
);
3002 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3005 /* If we are generating clz using wider mode, adjust the
3006 result. Similarly for clrsb. */
3007 if ((unoptab
== clz_optab
|| unoptab
== clrsb_optab
)
3010 (wider_mode
, sub_optab
, temp
,
3011 gen_int_mode (GET_MODE_PRECISION (wider_mode
)
3012 - GET_MODE_PRECISION (mode
),
3014 target
, true, OPTAB_DIRECT
);
3016 /* Likewise for bswap. */
3017 if (unoptab
== bswap_optab
&& temp
!= 0)
3019 gcc_assert (GET_MODE_PRECISION (wider_mode
)
3020 == GET_MODE_BITSIZE (wider_mode
)
3021 && GET_MODE_PRECISION (mode
)
3022 == GET_MODE_BITSIZE (mode
));
3024 temp
= expand_shift (RSHIFT_EXPR
, wider_mode
, temp
,
3025 GET_MODE_BITSIZE (wider_mode
)
3026 - GET_MODE_BITSIZE (mode
),
3032 if (mclass
!= MODE_INT
)
3035 target
= gen_reg_rtx (mode
);
3036 convert_move (target
, temp
, 0);
3040 return gen_lowpart (mode
, temp
);
3043 delete_insns_since (last
);
3048 /* One final attempt at implementing negation via subtraction,
3049 this time allowing widening of the operand. */
3050 if (optab_to_code (unoptab
) == NEG
&& !HONOR_SIGNED_ZEROS (mode
))
3053 temp
= expand_binop (mode
,
3054 unoptab
== negv_optab
? subv_optab
: sub_optab
,
3055 CONST0_RTX (mode
), op0
,
3056 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3064 /* Emit code to compute the absolute value of OP0, with result to
3065 TARGET if convenient. (TARGET may be 0.) The return value says
3066 where the result actually is to be found.
3068 MODE is the mode of the operand; the mode of the result is
3069 different but can be deduced from MODE.
3074 expand_abs_nojump (machine_mode mode
, rtx op0
, rtx target
,
3075 int result_unsignedp
)
3079 if (GET_MODE_CLASS (mode
) != MODE_INT
3081 result_unsignedp
= 1;
3083 /* First try to do it with a special abs instruction. */
3084 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
3089 /* For floating point modes, try clearing the sign bit. */
3090 if (SCALAR_FLOAT_MODE_P (mode
))
3092 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
3097 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3098 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
3099 && !HONOR_SIGNED_ZEROS (mode
))
3101 rtx_insn
*last
= get_last_insn ();
3103 temp
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3106 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3112 delete_insns_since (last
);
3115 /* If this machine has expensive jumps, we can do integer absolute
3116 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3117 where W is the width of MODE. */
3119 if (GET_MODE_CLASS (mode
) == MODE_INT
3120 && BRANCH_COST (optimize_insn_for_speed_p (),
3123 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3124 GET_MODE_PRECISION (mode
) - 1,
3127 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
3130 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
3131 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
3141 expand_abs (machine_mode mode
, rtx op0
, rtx target
,
3142 int result_unsignedp
, int safe
)
3145 rtx_code_label
*op1
;
3147 if (GET_MODE_CLASS (mode
) != MODE_INT
3149 result_unsignedp
= 1;
3151 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
3155 /* If that does not win, use conditional jump and negate. */
3157 /* It is safe to use the target if it is the same
3158 as the source if this is also a pseudo register */
3159 if (op0
== target
&& REG_P (op0
)
3160 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
3163 op1
= gen_label_rtx ();
3164 if (target
== 0 || ! safe
3165 || GET_MODE (target
) != mode
3166 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
3168 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
3169 target
= gen_reg_rtx (mode
);
3171 emit_move_insn (target
, op0
);
3174 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
3175 NULL_RTX
, NULL
, op1
,
3176 profile_probability::uninitialized ());
3178 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3181 emit_move_insn (target
, op0
);
3187 /* Emit code to compute the one's complement absolute value of OP0
3188 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3189 (TARGET may be NULL_RTX.) The return value says where the result
3190 actually is to be found.
3192 MODE is the mode of the operand; the mode of the result is
3193 different but can be deduced from MODE. */
3196 expand_one_cmpl_abs_nojump (machine_mode mode
, rtx op0
, rtx target
)
3200 /* Not applicable for floating point modes. */
3201 if (FLOAT_MODE_P (mode
))
3204 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3205 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
)
3207 rtx_insn
*last
= get_last_insn ();
3209 temp
= expand_unop (mode
, one_cmpl_optab
, op0
, NULL_RTX
, 0);
3211 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3217 delete_insns_since (last
);
3220 /* If this machine has expensive jumps, we can do one's complement
3221 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3223 if (GET_MODE_CLASS (mode
) == MODE_INT
3224 && BRANCH_COST (optimize_insn_for_speed_p (),
3227 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3228 GET_MODE_PRECISION (mode
) - 1,
3231 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
3241 /* A subroutine of expand_copysign, perform the copysign operation using the
3242 abs and neg primitives advertised to exist on the target. The assumption
3243 is that we have a split register file, and leaving op0 in fp registers,
3244 and not playing with subregs so much, will help the register allocator. */
3247 expand_copysign_absneg (machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3248 int bitpos
, bool op0_is_abs
)
3251 enum insn_code icode
;
3253 rtx_code_label
*label
;
3258 /* Check if the back end provides an insn that handles signbit for the
3260 icode
= optab_handler (signbit_optab
, mode
);
3261 if (icode
!= CODE_FOR_nothing
)
3263 imode
= insn_data
[(int) icode
].operand
[0].mode
;
3264 sign
= gen_reg_rtx (imode
);
3265 emit_unop_insn (icode
, sign
, op1
, UNKNOWN
);
3269 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3271 imode
= int_mode_for_mode (mode
);
3272 if (imode
== BLKmode
)
3274 op1
= gen_lowpart (imode
, op1
);
3281 if (FLOAT_WORDS_BIG_ENDIAN
)
3282 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3284 word
= bitpos
/ BITS_PER_WORD
;
3285 bitpos
= bitpos
% BITS_PER_WORD
;
3286 op1
= operand_subword_force (op1
, word
, mode
);
3289 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3290 sign
= expand_binop (imode
, and_optab
, op1
,
3291 immed_wide_int_const (mask
, imode
),
3292 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3297 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
3304 if (target
== NULL_RTX
)
3305 target
= copy_to_reg (op0
);
3307 emit_move_insn (target
, op0
);
3310 label
= gen_label_rtx ();
3311 emit_cmp_and_jump_insns (sign
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
3313 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
3314 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3316 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
3318 emit_move_insn (target
, op0
);
3326 /* A subroutine of expand_copysign, perform the entire copysign operation
3327 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3328 is true if op0 is known to have its sign bit clear. */
3331 expand_copysign_bit (machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3332 int bitpos
, bool op0_is_abs
)
3335 int word
, nwords
, i
;
3339 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3341 imode
= int_mode_for_mode (mode
);
3342 if (imode
== BLKmode
)
3351 if (FLOAT_WORDS_BIG_ENDIAN
)
3352 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3354 word
= bitpos
/ BITS_PER_WORD
;
3355 bitpos
= bitpos
% BITS_PER_WORD
;
3356 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3359 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3364 || (nwords
> 1 && !valid_multiword_target_p (target
)))
3365 target
= gen_reg_rtx (mode
);
3371 for (i
= 0; i
< nwords
; ++i
)
3373 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3374 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3380 = expand_binop (imode
, and_optab
, op0_piece
,
3381 immed_wide_int_const (~mask
, imode
),
3382 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3383 op1
= expand_binop (imode
, and_optab
,
3384 operand_subword_force (op1
, i
, mode
),
3385 immed_wide_int_const (mask
, imode
),
3386 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3388 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
3389 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3390 if (temp
!= targ_piece
)
3391 emit_move_insn (targ_piece
, temp
);
3394 emit_move_insn (targ_piece
, op0_piece
);
3397 insns
= get_insns ();
3404 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
3405 immed_wide_int_const (mask
, imode
),
3406 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3408 op0
= gen_lowpart (imode
, op0
);
3410 op0
= expand_binop (imode
, and_optab
, op0
,
3411 immed_wide_int_const (~mask
, imode
),
3412 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3414 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
3415 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3416 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3422 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3423 scalar floating point mode. Return NULL if we do not know how to
3424 expand the operation inline. */
3427 expand_copysign (rtx op0
, rtx op1
, rtx target
)
3429 machine_mode mode
= GET_MODE (op0
);
3430 const struct real_format
*fmt
;
3434 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
3435 gcc_assert (GET_MODE (op1
) == mode
);
3437 /* First try to do it with a special instruction. */
3438 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
3439 target
, 0, OPTAB_DIRECT
);
3443 fmt
= REAL_MODE_FORMAT (mode
);
3444 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
3448 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
3450 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
3451 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
3455 if (fmt
->signbit_ro
>= 0
3456 && (CONST_DOUBLE_AS_FLOAT_P (op0
)
3457 || (optab_handler (neg_optab
, mode
) != CODE_FOR_nothing
3458 && optab_handler (abs_optab
, mode
) != CODE_FOR_nothing
)))
3460 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
3461 fmt
->signbit_ro
, op0_is_abs
);
3466 if (fmt
->signbit_rw
< 0)
3468 return expand_copysign_bit (mode
, op0
, op1
, target
,
3469 fmt
->signbit_rw
, op0_is_abs
);
3472 /* Generate an instruction whose insn-code is INSN_CODE,
3473 with two operands: an output TARGET and an input OP0.
3474 TARGET *must* be nonzero, and the output is always stored there.
3475 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3476 the value that is stored into TARGET.
3478 Return false if expansion failed. */
3481 maybe_emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
,
3484 struct expand_operand ops
[2];
3487 create_output_operand (&ops
[0], target
, GET_MODE (target
));
3488 create_input_operand (&ops
[1], op0
, GET_MODE (op0
));
3489 pat
= maybe_gen_insn (icode
, 2, ops
);
3493 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
3495 add_equal_note (pat
, ops
[0].value
, code
, ops
[1].value
, NULL_RTX
);
3499 if (ops
[0].value
!= target
)
3500 emit_move_insn (target
, ops
[0].value
);
3503 /* Generate an instruction whose insn-code is INSN_CODE,
3504 with two operands: an output TARGET and an input OP0.
3505 TARGET *must* be nonzero, and the output is always stored there.
3506 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3507 the value that is stored into TARGET. */
3510 emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
, enum rtx_code code
)
3512 bool ok
= maybe_emit_unop_insn (icode
, target
, op0
, code
);
3516 struct no_conflict_data
3519 rtx_insn
*first
, *insn
;
3523 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3524 the currently examined clobber / store has to stay in the list of
3525 insns that constitute the actual libcall block. */
3527 no_conflict_move_test (rtx dest
, const_rtx set
, void *p0
)
3529 struct no_conflict_data
*p
= (struct no_conflict_data
*) p0
;
3531 /* If this inns directly contributes to setting the target, it must stay. */
3532 if (reg_overlap_mentioned_p (p
->target
, dest
))
3533 p
->must_stay
= true;
3534 /* If we haven't committed to keeping any other insns in the list yet,
3535 there is nothing more to check. */
3536 else if (p
->insn
== p
->first
)
3538 /* If this insn sets / clobbers a register that feeds one of the insns
3539 already in the list, this insn has to stay too. */
3540 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
3541 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
3542 || reg_used_between_p (dest
, p
->first
, p
->insn
)
3543 /* Likewise if this insn depends on a register set by a previous
3544 insn in the list, or if it sets a result (presumably a hard
3545 register) that is set or clobbered by a previous insn.
3546 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3547 SET_DEST perform the former check on the address, and the latter
3548 check on the MEM. */
3549 || (GET_CODE (set
) == SET
3550 && (modified_in_p (SET_SRC (set
), p
->first
)
3551 || modified_in_p (SET_DEST (set
), p
->first
)
3552 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
3553 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
3554 p
->must_stay
= true;
3558 /* Emit code to make a call to a constant function or a library call.
3560 INSNS is a list containing all insns emitted in the call.
3561 These insns leave the result in RESULT. Our block is to copy RESULT
3562 to TARGET, which is logically equivalent to EQUIV.
3564 We first emit any insns that set a pseudo on the assumption that these are
3565 loading constants into registers; doing so allows them to be safely cse'ed
3566 between blocks. Then we emit all the other insns in the block, followed by
3567 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3568 note with an operand of EQUIV. */
3571 emit_libcall_block_1 (rtx_insn
*insns
, rtx target
, rtx result
, rtx equiv
,
3572 bool equiv_may_trap
)
3574 rtx final_dest
= target
;
3575 rtx_insn
*next
, *last
, *insn
;
3577 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3578 into a MEM later. Protect the libcall block from this change. */
3579 if (! REG_P (target
) || REG_USERVAR_P (target
))
3580 target
= gen_reg_rtx (GET_MODE (target
));
3582 /* If we're using non-call exceptions, a libcall corresponding to an
3583 operation that may trap may also trap. */
3584 /* ??? See the comment in front of make_reg_eh_region_note. */
3585 if (cfun
->can_throw_non_call_exceptions
3586 && (equiv_may_trap
|| may_trap_p (equiv
)))
3588 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3591 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3594 int lp_nr
= INTVAL (XEXP (note
, 0));
3595 if (lp_nr
== 0 || lp_nr
== INT_MIN
)
3596 remove_note (insn
, note
);
3602 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3603 reg note to indicate that this call cannot throw or execute a nonlocal
3604 goto (unless there is already a REG_EH_REGION note, in which case
3606 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3608 make_reg_eh_region_note_nothrow_nononlocal (insn
);
3611 /* First emit all insns that set pseudos. Remove them from the list as
3612 we go. Avoid insns that set pseudos which were referenced in previous
3613 insns. These can be generated by move_by_pieces, for example,
3614 to update an address. Similarly, avoid insns that reference things
3615 set in previous insns. */
3617 for (insn
= insns
; insn
; insn
= next
)
3619 rtx set
= single_set (insn
);
3621 next
= NEXT_INSN (insn
);
3623 if (set
!= 0 && REG_P (SET_DEST (set
))
3624 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3626 struct no_conflict_data data
;
3628 data
.target
= const0_rtx
;
3632 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3633 if (! data
.must_stay
)
3635 if (PREV_INSN (insn
))
3636 SET_NEXT_INSN (PREV_INSN (insn
)) = next
;
3641 SET_PREV_INSN (next
) = PREV_INSN (insn
);
3647 /* Some ports use a loop to copy large arguments onto the stack.
3648 Don't move anything outside such a loop. */
3653 /* Write the remaining insns followed by the final copy. */
3654 for (insn
= insns
; insn
; insn
= next
)
3656 next
= NEXT_INSN (insn
);
3661 last
= emit_move_insn (target
, result
);
3663 set_dst_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
), target
);
3665 if (final_dest
!= target
)
3666 emit_move_insn (final_dest
, target
);
3670 emit_libcall_block (rtx_insn
*insns
, rtx target
, rtx result
, rtx equiv
)
3672 emit_libcall_block_1 (insns
, target
, result
, equiv
, false);
3675 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3676 PURPOSE describes how this comparison will be used. CODE is the rtx
3677 comparison code we will be using.
3679 ??? Actually, CODE is slightly weaker than that. A target is still
3680 required to implement all of the normal bcc operations, but not
3681 required to implement all (or any) of the unordered bcc operations. */
3684 can_compare_p (enum rtx_code code
, machine_mode mode
,
3685 enum can_compare_purpose purpose
)
3688 test
= gen_rtx_fmt_ee (code
, mode
, const0_rtx
, const0_rtx
);
3691 enum insn_code icode
;
3693 if (purpose
== ccp_jump
3694 && (icode
= optab_handler (cbranch_optab
, mode
)) != CODE_FOR_nothing
3695 && insn_operand_matches (icode
, 0, test
))
3697 if (purpose
== ccp_store_flag
3698 && (icode
= optab_handler (cstore_optab
, mode
)) != CODE_FOR_nothing
3699 && insn_operand_matches (icode
, 1, test
))
3701 if (purpose
== ccp_cmov
3702 && optab_handler (cmov_optab
, mode
) != CODE_FOR_nothing
)
3705 mode
= GET_MODE_WIDER_MODE (mode
).else_void ();
3706 PUT_MODE (test
, mode
);
3708 while (mode
!= VOIDmode
);
3713 /* This function is called when we are going to emit a compare instruction that
3714 compares the values found in X and Y, using the rtl operator COMPARISON.
3716 If they have mode BLKmode, then SIZE specifies the size of both operands.
3718 UNSIGNEDP nonzero says that the operands are unsigned;
3719 this matters if they need to be widened (as given by METHODS).
3721 *PTEST is where the resulting comparison RTX is returned or NULL_RTX
3722 if we failed to produce one.
3724 *PMODE is the mode of the inputs (in case they are const_int).
3726 This function performs all the setup necessary so that the caller only has
3727 to emit a single comparison insn. This setup can involve doing a BLKmode
3728 comparison or emitting a library call to perform the comparison if no insn
3729 is available to handle it.
3730 The values which are passed in through pointers can be modified; the caller
3731 should perform the comparison on the modified values. Constant
3732 comparisons must have already been folded. */
3735 prepare_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3736 int unsignedp
, enum optab_methods methods
,
3737 rtx
*ptest
, machine_mode
*pmode
)
3739 machine_mode mode
= *pmode
;
3741 machine_mode cmp_mode
;
3742 enum mode_class mclass
;
3744 /* The other methods are not needed. */
3745 gcc_assert (methods
== OPTAB_DIRECT
|| methods
== OPTAB_WIDEN
3746 || methods
== OPTAB_LIB_WIDEN
);
3748 /* If we are optimizing, force expensive constants into a register. */
3749 if (CONSTANT_P (x
) && optimize
3750 && (rtx_cost (x
, mode
, COMPARE
, 0, optimize_insn_for_speed_p ())
3751 > COSTS_N_INSNS (1)))
3752 x
= force_reg (mode
, x
);
3754 if (CONSTANT_P (y
) && optimize
3755 && (rtx_cost (y
, mode
, COMPARE
, 1, optimize_insn_for_speed_p ())
3756 > COSTS_N_INSNS (1)))
3757 y
= force_reg (mode
, y
);
3760 /* Make sure if we have a canonical comparison. The RTL
3761 documentation states that canonical comparisons are required only
3762 for targets which have cc0. */
3763 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
3766 /* Don't let both operands fail to indicate the mode. */
3767 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3768 x
= force_reg (mode
, x
);
3769 if (mode
== VOIDmode
)
3770 mode
= GET_MODE (x
) != VOIDmode
? GET_MODE (x
) : GET_MODE (y
);
3772 /* Handle all BLKmode compares. */
3774 if (mode
== BLKmode
)
3776 machine_mode result_mode
;
3777 enum insn_code cmp_code
;
3780 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3784 /* Try to use a memory block compare insn - either cmpstr
3785 or cmpmem will do. */
3786 FOR_EACH_MODE_IN_CLASS (cmp_mode
, MODE_INT
)
3788 cmp_code
= direct_optab_handler (cmpmem_optab
, cmp_mode
);
3789 if (cmp_code
== CODE_FOR_nothing
)
3790 cmp_code
= direct_optab_handler (cmpstr_optab
, cmp_mode
);
3791 if (cmp_code
== CODE_FOR_nothing
)
3792 cmp_code
= direct_optab_handler (cmpstrn_optab
, cmp_mode
);
3793 if (cmp_code
== CODE_FOR_nothing
)
3796 /* Must make sure the size fits the insn's mode. */
3797 if ((CONST_INT_P (size
)
3798 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
3799 || (GET_MODE_BITSIZE (GET_MODE (size
))
3800 > GET_MODE_BITSIZE (cmp_mode
)))
3803 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3804 result
= gen_reg_rtx (result_mode
);
3805 size
= convert_to_mode (cmp_mode
, size
, 1);
3806 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3808 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, result
, const0_rtx
);
3809 *pmode
= result_mode
;
3813 if (methods
!= OPTAB_LIB
&& methods
!= OPTAB_LIB_WIDEN
)
3816 /* Otherwise call a library function. */
3817 result
= emit_block_comp_via_libcall (XEXP (x
, 0), XEXP (y
, 0), size
);
3821 mode
= TYPE_MODE (integer_type_node
);
3822 methods
= OPTAB_LIB_WIDEN
;
3826 /* Don't allow operands to the compare to trap, as that can put the
3827 compare and branch in different basic blocks. */
3828 if (cfun
->can_throw_non_call_exceptions
)
3831 x
= copy_to_reg (x
);
3833 y
= copy_to_reg (y
);
3836 if (GET_MODE_CLASS (mode
) == MODE_CC
)
3838 enum insn_code icode
= optab_handler (cbranch_optab
, CCmode
);
3839 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
3840 gcc_assert (icode
!= CODE_FOR_nothing
3841 && insn_operand_matches (icode
, 0, test
));
3846 mclass
= GET_MODE_CLASS (mode
);
3847 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
3848 FOR_EACH_MODE_FROM (cmp_mode
, mode
)
3850 enum insn_code icode
;
3851 icode
= optab_handler (cbranch_optab
, cmp_mode
);
3852 if (icode
!= CODE_FOR_nothing
3853 && insn_operand_matches (icode
, 0, test
))
3855 rtx_insn
*last
= get_last_insn ();
3856 rtx op0
= prepare_operand (icode
, x
, 1, mode
, cmp_mode
, unsignedp
);
3857 rtx op1
= prepare_operand (icode
, y
, 2, mode
, cmp_mode
, unsignedp
);
3859 && insn_operand_matches (icode
, 1, op0
)
3860 && insn_operand_matches (icode
, 2, op1
))
3862 XEXP (test
, 0) = op0
;
3863 XEXP (test
, 1) = op1
;
3868 delete_insns_since (last
);
3871 if (methods
== OPTAB_DIRECT
|| !CLASS_HAS_WIDER_MODES_P (mclass
))
3875 if (methods
!= OPTAB_LIB_WIDEN
)
3878 if (!SCALAR_FLOAT_MODE_P (mode
))
3881 machine_mode ret_mode
;
3883 /* Handle a libcall just for the mode we are using. */
3884 libfunc
= optab_libfunc (cmp_optab
, mode
);
3885 gcc_assert (libfunc
);
3887 /* If we want unsigned, and this mode has a distinct unsigned
3888 comparison routine, use that. */
3891 rtx ulibfunc
= optab_libfunc (ucmp_optab
, mode
);
3896 ret_mode
= targetm
.libgcc_cmp_return_mode ();
3897 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
3898 ret_mode
, 2, x
, mode
, y
, mode
);
3900 /* There are two kinds of comparison routines. Biased routines
3901 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3902 of gcc expect that the comparison operation is equivalent
3903 to the modified comparison. For signed comparisons compare the
3904 result against 1 in the biased case, and zero in the unbiased
3905 case. For unsigned comparisons always compare against 1 after
3906 biasing the unbiased result by adding 1. This gives us a way to
3908 The comparisons in the fixed-point helper library are always
3913 if (!TARGET_LIB_INT_CMP_BIASED
&& !ALL_FIXED_POINT_MODE_P (mode
))
3916 x
= plus_constant (ret_mode
, result
, 1);
3922 prepare_cmp_insn (x
, y
, comparison
, NULL_RTX
, unsignedp
, methods
,
3926 prepare_float_lib_cmp (x
, y
, comparison
, ptest
, pmode
);
3934 /* Before emitting an insn with code ICODE, make sure that X, which is going
3935 to be used for operand OPNUM of the insn, is converted from mode MODE to
3936 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3937 that it is accepted by the operand predicate. Return the new value. */
3940 prepare_operand (enum insn_code icode
, rtx x
, int opnum
, machine_mode mode
,
3941 machine_mode wider_mode
, int unsignedp
)
3943 if (mode
!= wider_mode
)
3944 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3946 if (!insn_operand_matches (icode
, opnum
, x
))
3948 machine_mode op_mode
= insn_data
[(int) icode
].operand
[opnum
].mode
;
3949 if (reload_completed
)
3951 if (GET_MODE (x
) != op_mode
&& GET_MODE (x
) != VOIDmode
)
3953 x
= copy_to_mode_reg (op_mode
, x
);
3959 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3960 we can do the branch. */
3963 emit_cmp_and_jump_insn_1 (rtx test
, machine_mode mode
, rtx label
,
3964 profile_probability prob
)
3966 machine_mode optab_mode
;
3967 enum mode_class mclass
;
3968 enum insn_code icode
;
3971 mclass
= GET_MODE_CLASS (mode
);
3972 optab_mode
= (mclass
== MODE_CC
) ? CCmode
: mode
;
3973 icode
= optab_handler (cbranch_optab
, optab_mode
);
3975 gcc_assert (icode
!= CODE_FOR_nothing
);
3976 gcc_assert (insn_operand_matches (icode
, 0, test
));
3977 insn
= emit_jump_insn (GEN_FCN (icode
) (test
, XEXP (test
, 0),
3978 XEXP (test
, 1), label
));
3979 if (prob
.initialized_p ()
3980 && profile_status_for_fn (cfun
) != PROFILE_ABSENT
3983 && any_condjump_p (insn
)
3984 && !find_reg_note (insn
, REG_BR_PROB
, 0))
3985 add_reg_br_prob_note (insn
, prob
);
3988 /* Generate code to compare X with Y so that the condition codes are
3989 set and to jump to LABEL if the condition is true. If X is a
3990 constant and Y is not a constant, then the comparison is swapped to
3991 ensure that the comparison RTL has the canonical form.
3993 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3994 need to be widened. UNSIGNEDP is also used to select the proper
3995 branch condition code.
3997 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3999 MODE is the mode of the inputs (in case they are const_int).
4001 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4002 It will be potentially converted into an unsigned variant based on
4003 UNSIGNEDP to select a proper jump instruction.
4005 PROB is the probability of jumping to LABEL. */
4008 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4009 machine_mode mode
, int unsignedp
, rtx label
,
4010 profile_probability prob
)
4012 rtx op0
= x
, op1
= y
;
4015 /* Swap operands and condition to ensure canonical RTL. */
4016 if (swap_commutative_operands_p (x
, y
)
4017 && can_compare_p (swap_condition (comparison
), mode
, ccp_jump
))
4020 comparison
= swap_condition (comparison
);
4023 /* If OP0 is still a constant, then both X and Y must be constants
4024 or the opposite comparison is not supported. Force X into a register
4025 to create canonical RTL. */
4026 if (CONSTANT_P (op0
))
4027 op0
= force_reg (mode
, op0
);
4030 comparison
= unsigned_condition (comparison
);
4032 prepare_cmp_insn (op0
, op1
, comparison
, size
, unsignedp
, OPTAB_LIB_WIDEN
,
4034 emit_cmp_and_jump_insn_1 (test
, mode
, label
, prob
);
4038 /* Emit a library call comparison between floating point X and Y.
4039 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4042 prepare_float_lib_cmp (rtx x
, rtx y
, enum rtx_code comparison
,
4043 rtx
*ptest
, machine_mode
*pmode
)
4045 enum rtx_code swapped
= swap_condition (comparison
);
4046 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
4047 machine_mode orig_mode
= GET_MODE (x
);
4048 machine_mode mode
, cmp_mode
;
4049 rtx true_rtx
, false_rtx
;
4050 rtx value
, target
, equiv
;
4053 bool reversed_p
= false;
4054 cmp_mode
= targetm
.libgcc_cmp_return_mode ();
4056 FOR_EACH_MODE_FROM (mode
, orig_mode
)
4058 if (code_to_optab (comparison
)
4059 && (libfunc
= optab_libfunc (code_to_optab (comparison
), mode
)))
4062 if (code_to_optab (swapped
)
4063 && (libfunc
= optab_libfunc (code_to_optab (swapped
), mode
)))
4066 comparison
= swapped
;
4070 if (code_to_optab (reversed
)
4071 && (libfunc
= optab_libfunc (code_to_optab (reversed
), mode
)))
4073 comparison
= reversed
;
4079 gcc_assert (mode
!= VOIDmode
);
4081 if (mode
!= orig_mode
)
4083 x
= convert_to_mode (mode
, x
, 0);
4084 y
= convert_to_mode (mode
, y
, 0);
4087 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4088 the RTL. The allows the RTL optimizers to delete the libcall if the
4089 condition can be determined at compile-time. */
4090 if (comparison
== UNORDERED
4091 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4093 true_rtx
= const_true_rtx
;
4094 false_rtx
= const0_rtx
;
4101 true_rtx
= const0_rtx
;
4102 false_rtx
= const_true_rtx
;
4106 true_rtx
= const_true_rtx
;
4107 false_rtx
= const0_rtx
;
4111 true_rtx
= const1_rtx
;
4112 false_rtx
= const0_rtx
;
4116 true_rtx
= const0_rtx
;
4117 false_rtx
= constm1_rtx
;
4121 true_rtx
= constm1_rtx
;
4122 false_rtx
= const0_rtx
;
4126 true_rtx
= const0_rtx
;
4127 false_rtx
= const1_rtx
;
4135 if (comparison
== UNORDERED
)
4137 rtx temp
= simplify_gen_relational (NE
, cmp_mode
, mode
, x
, x
);
4138 equiv
= simplify_gen_relational (NE
, cmp_mode
, mode
, y
, y
);
4139 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4140 temp
, const_true_rtx
, equiv
);
4144 equiv
= simplify_gen_relational (comparison
, cmp_mode
, mode
, x
, y
);
4145 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4146 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4147 equiv
, true_rtx
, false_rtx
);
4151 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4152 cmp_mode
, 2, x
, mode
, y
, mode
);
4153 insns
= get_insns ();
4156 target
= gen_reg_rtx (cmp_mode
);
4157 emit_libcall_block (insns
, target
, value
, equiv
);
4159 if (comparison
== UNORDERED
4160 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
)
4162 *ptest
= gen_rtx_fmt_ee (reversed_p
? EQ
: NE
, VOIDmode
, target
, false_rtx
);
4164 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, target
, const0_rtx
);
4169 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4172 emit_indirect_jump (rtx loc
)
4174 if (!targetm
.have_indirect_jump ())
4175 sorry ("indirect jumps are not available on this target");
4178 struct expand_operand ops
[1];
4179 create_address_operand (&ops
[0], loc
);
4180 expand_jump_insn (targetm
.code_for_indirect_jump
, 1, ops
);
4186 /* Emit a conditional move instruction if the machine supports one for that
4187 condition and machine mode.
4189 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4190 the mode to use should they be constants. If it is VOIDmode, they cannot
4193 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4194 should be stored there. MODE is the mode to use should they be constants.
4195 If it is VOIDmode, they cannot both be constants.
4197 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4198 is not supported. */
4201 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4202 machine_mode cmode
, rtx op2
, rtx op3
,
4203 machine_mode mode
, int unsignedp
)
4207 enum insn_code icode
;
4208 enum rtx_code reversed
;
4210 /* If the two source operands are identical, that's just a move. */
4212 if (rtx_equal_p (op2
, op3
))
4215 target
= gen_reg_rtx (mode
);
4217 emit_move_insn (target
, op3
);
4221 /* If one operand is constant, make it the second one. Only do this
4222 if the other operand is not constant as well. */
4224 if (swap_commutative_operands_p (op0
, op1
))
4226 std::swap (op0
, op1
);
4227 code
= swap_condition (code
);
4230 /* get_condition will prefer to generate LT and GT even if the old
4231 comparison was against zero, so undo that canonicalization here since
4232 comparisons against zero are cheaper. */
4233 if (code
== LT
&& op1
== const1_rtx
)
4234 code
= LE
, op1
= const0_rtx
;
4235 else if (code
== GT
&& op1
== constm1_rtx
)
4236 code
= GE
, op1
= const0_rtx
;
4238 if (cmode
== VOIDmode
)
4239 cmode
= GET_MODE (op0
);
4241 enum rtx_code orig_code
= code
;
4242 bool swapped
= false;
4243 if (swap_commutative_operands_p (op2
, op3
)
4244 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4247 std::swap (op2
, op3
);
4252 if (mode
== VOIDmode
)
4253 mode
= GET_MODE (op2
);
4255 icode
= direct_optab_handler (movcc_optab
, mode
);
4257 if (icode
== CODE_FOR_nothing
)
4261 target
= gen_reg_rtx (mode
);
4263 for (int pass
= 0; ; pass
++)
4265 code
= unsignedp
? unsigned_condition (code
) : code
;
4266 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4268 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4269 punt and let the caller figure out how best to deal with this
4271 if (COMPARISON_P (comparison
))
4273 saved_pending_stack_adjust save
;
4274 save_pending_stack_adjust (&save
);
4275 last
= get_last_insn ();
4276 do_pending_stack_adjust ();
4277 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4278 GET_CODE (comparison
), NULL_RTX
, unsignedp
,
4279 OPTAB_WIDEN
, &comparison
, &cmode
);
4282 struct expand_operand ops
[4];
4284 create_output_operand (&ops
[0], target
, mode
);
4285 create_fixed_operand (&ops
[1], comparison
);
4286 create_input_operand (&ops
[2], op2
, mode
);
4287 create_input_operand (&ops
[3], op3
, mode
);
4288 if (maybe_expand_insn (icode
, 4, ops
))
4290 if (ops
[0].value
!= target
)
4291 convert_move (target
, ops
[0].value
, false);
4295 delete_insns_since (last
);
4296 restore_pending_stack_adjust (&save
);
4302 /* If the preferred op2/op3 order is not usable, retry with other
4303 operand order, perhaps it will expand successfully. */
4306 else if ((reversed
= reversed_comparison_code_parts (orig_code
, op0
, op1
,
4312 std::swap (op2
, op3
);
4317 /* Emit a conditional negate or bitwise complement using the
4318 negcc or notcc optabs if available. Return NULL_RTX if such operations
4319 are not available. Otherwise return the RTX holding the result.
4320 TARGET is the desired destination of the result. COMP is the comparison
4321 on which to negate. If COND is true move into TARGET the negation
4322 or bitwise complement of OP1. Otherwise move OP2 into TARGET.
4323 CODE is either NEG or NOT. MODE is the machine mode in which the
4324 operation is performed. */
4327 emit_conditional_neg_or_complement (rtx target
, rtx_code code
,
4328 machine_mode mode
, rtx cond
, rtx op1
,
4331 optab op
= unknown_optab
;
4334 else if (code
== NOT
)
4339 insn_code icode
= direct_optab_handler (op
, mode
);
4341 if (icode
== CODE_FOR_nothing
)
4345 target
= gen_reg_rtx (mode
);
4347 rtx_insn
*last
= get_last_insn ();
4348 struct expand_operand ops
[4];
4350 create_output_operand (&ops
[0], target
, mode
);
4351 create_fixed_operand (&ops
[1], cond
);
4352 create_input_operand (&ops
[2], op1
, mode
);
4353 create_input_operand (&ops
[3], op2
, mode
);
4355 if (maybe_expand_insn (icode
, 4, ops
))
4357 if (ops
[0].value
!= target
)
4358 convert_move (target
, ops
[0].value
, false);
4362 delete_insns_since (last
);
4366 /* Emit a conditional addition instruction if the machine supports one for that
4367 condition and machine mode.
4369 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4370 the mode to use should they be constants. If it is VOIDmode, they cannot
4373 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
4374 should be stored there. MODE is the mode to use should they be constants.
4375 If it is VOIDmode, they cannot both be constants.
4377 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4378 is not supported. */
4381 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4382 machine_mode cmode
, rtx op2
, rtx op3
,
4383 machine_mode mode
, int unsignedp
)
4387 enum insn_code icode
;
4389 /* If one operand is constant, make it the second one. Only do this
4390 if the other operand is not constant as well. */
4392 if (swap_commutative_operands_p (op0
, op1
))
4394 std::swap (op0
, op1
);
4395 code
= swap_condition (code
);
4398 /* get_condition will prefer to generate LT and GT even if the old
4399 comparison was against zero, so undo that canonicalization here since
4400 comparisons against zero are cheaper. */
4401 if (code
== LT
&& op1
== const1_rtx
)
4402 code
= LE
, op1
= const0_rtx
;
4403 else if (code
== GT
&& op1
== constm1_rtx
)
4404 code
= GE
, op1
= const0_rtx
;
4406 if (cmode
== VOIDmode
)
4407 cmode
= GET_MODE (op0
);
4409 if (mode
== VOIDmode
)
4410 mode
= GET_MODE (op2
);
4412 icode
= optab_handler (addcc_optab
, mode
);
4414 if (icode
== CODE_FOR_nothing
)
4418 target
= gen_reg_rtx (mode
);
4420 code
= unsignedp
? unsigned_condition (code
) : code
;
4421 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4423 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4424 return NULL and let the caller figure out how best to deal with this
4426 if (!COMPARISON_P (comparison
))
4429 do_pending_stack_adjust ();
4430 last
= get_last_insn ();
4431 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4432 GET_CODE (comparison
), NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4433 &comparison
, &cmode
);
4436 struct expand_operand ops
[4];
4438 create_output_operand (&ops
[0], target
, mode
);
4439 create_fixed_operand (&ops
[1], comparison
);
4440 create_input_operand (&ops
[2], op2
, mode
);
4441 create_input_operand (&ops
[3], op3
, mode
);
4442 if (maybe_expand_insn (icode
, 4, ops
))
4444 if (ops
[0].value
!= target
)
4445 convert_move (target
, ops
[0].value
, false);
4449 delete_insns_since (last
);
4453 /* These functions attempt to generate an insn body, rather than
4454 emitting the insn, but if the gen function already emits them, we
4455 make no attempt to turn them back into naked patterns. */
4457 /* Generate and return an insn body to add Y to X. */
4460 gen_add2_insn (rtx x
, rtx y
)
4462 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (x
));
4464 gcc_assert (insn_operand_matches (icode
, 0, x
));
4465 gcc_assert (insn_operand_matches (icode
, 1, x
));
4466 gcc_assert (insn_operand_matches (icode
, 2, y
));
4468 return GEN_FCN (icode
) (x
, x
, y
);
4471 /* Generate and return an insn body to add r1 and c,
4472 storing the result in r0. */
4475 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4477 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (r0
));
4479 if (icode
== CODE_FOR_nothing
4480 || !insn_operand_matches (icode
, 0, r0
)
4481 || !insn_operand_matches (icode
, 1, r1
)
4482 || !insn_operand_matches (icode
, 2, c
))
4485 return GEN_FCN (icode
) (r0
, r1
, c
);
4489 have_add2_insn (rtx x
, rtx y
)
4491 enum insn_code icode
;
4493 gcc_assert (GET_MODE (x
) != VOIDmode
);
4495 icode
= optab_handler (add_optab
, GET_MODE (x
));
4497 if (icode
== CODE_FOR_nothing
)
4500 if (!insn_operand_matches (icode
, 0, x
)
4501 || !insn_operand_matches (icode
, 1, x
)
4502 || !insn_operand_matches (icode
, 2, y
))
4508 /* Generate and return an insn body to add Y to X. */
4511 gen_addptr3_insn (rtx x
, rtx y
, rtx z
)
4513 enum insn_code icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
4515 gcc_assert (insn_operand_matches (icode
, 0, x
));
4516 gcc_assert (insn_operand_matches (icode
, 1, y
));
4517 gcc_assert (insn_operand_matches (icode
, 2, z
));
4519 return GEN_FCN (icode
) (x
, y
, z
);
4522 /* Return true if the target implements an addptr pattern and X, Y,
4523 and Z are valid for the pattern predicates. */
4526 have_addptr3_insn (rtx x
, rtx y
, rtx z
)
4528 enum insn_code icode
;
4530 gcc_assert (GET_MODE (x
) != VOIDmode
);
4532 icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
4534 if (icode
== CODE_FOR_nothing
)
4537 if (!insn_operand_matches (icode
, 0, x
)
4538 || !insn_operand_matches (icode
, 1, y
)
4539 || !insn_operand_matches (icode
, 2, z
))
4545 /* Generate and return an insn body to subtract Y from X. */
4548 gen_sub2_insn (rtx x
, rtx y
)
4550 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (x
));
4552 gcc_assert (insn_operand_matches (icode
, 0, x
));
4553 gcc_assert (insn_operand_matches (icode
, 1, x
));
4554 gcc_assert (insn_operand_matches (icode
, 2, y
));
4556 return GEN_FCN (icode
) (x
, x
, y
);
4559 /* Generate and return an insn body to subtract r1 and c,
4560 storing the result in r0. */
4563 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4565 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (r0
));
4567 if (icode
== CODE_FOR_nothing
4568 || !insn_operand_matches (icode
, 0, r0
)
4569 || !insn_operand_matches (icode
, 1, r1
)
4570 || !insn_operand_matches (icode
, 2, c
))
4573 return GEN_FCN (icode
) (r0
, r1
, c
);
4577 have_sub2_insn (rtx x
, rtx y
)
4579 enum insn_code icode
;
4581 gcc_assert (GET_MODE (x
) != VOIDmode
);
4583 icode
= optab_handler (sub_optab
, GET_MODE (x
));
4585 if (icode
== CODE_FOR_nothing
)
4588 if (!insn_operand_matches (icode
, 0, x
)
4589 || !insn_operand_matches (icode
, 1, x
)
4590 || !insn_operand_matches (icode
, 2, y
))
4596 /* Generate the body of an insn to extend Y (with mode MFROM)
4597 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4600 gen_extend_insn (rtx x
, rtx y
, machine_mode mto
,
4601 machine_mode mfrom
, int unsignedp
)
4603 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4604 return GEN_FCN (icode
) (x
, y
);
4607 /* Generate code to convert FROM to floating point
4608 and store in TO. FROM must be fixed point and not VOIDmode.
4609 UNSIGNEDP nonzero means regard FROM as unsigned.
4610 Normally this is done by correcting the final value
4611 if it is negative. */
4614 expand_float (rtx to
, rtx from
, int unsignedp
)
4616 enum insn_code icode
;
4618 machine_mode fmode
, imode
;
4619 bool can_do_signed
= false;
4621 /* Crash now, because we won't be able to decide which mode to use. */
4622 gcc_assert (GET_MODE (from
) != VOIDmode
);
4624 /* Look for an insn to do the conversion. Do it in the specified
4625 modes if possible; otherwise convert either input, output or both to
4626 wider mode. If the integer mode is wider than the mode of FROM,
4627 we can do the conversion signed even if the input is unsigned. */
4629 FOR_EACH_MODE_FROM (fmode
, GET_MODE (to
))
4630 FOR_EACH_MODE_FROM (imode
, GET_MODE (from
))
4632 int doing_unsigned
= unsignedp
;
4634 if (fmode
!= GET_MODE (to
)
4635 && significand_size (fmode
) < GET_MODE_PRECISION (GET_MODE (from
)))
4638 icode
= can_float_p (fmode
, imode
, unsignedp
);
4639 if (icode
== CODE_FOR_nothing
&& unsignedp
)
4641 enum insn_code scode
= can_float_p (fmode
, imode
, 0);
4642 if (scode
!= CODE_FOR_nothing
)
4643 can_do_signed
= true;
4644 if (imode
!= GET_MODE (from
))
4645 icode
= scode
, doing_unsigned
= 0;
4648 if (icode
!= CODE_FOR_nothing
)
4650 if (imode
!= GET_MODE (from
))
4651 from
= convert_to_mode (imode
, from
, unsignedp
);
4653 if (fmode
!= GET_MODE (to
))
4654 target
= gen_reg_rtx (fmode
);
4656 emit_unop_insn (icode
, target
, from
,
4657 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4660 convert_move (to
, target
, 0);
4665 /* Unsigned integer, and no way to convert directly. Convert as signed,
4666 then unconditionally adjust the result. */
4667 if (unsignedp
&& can_do_signed
)
4669 rtx_code_label
*label
= gen_label_rtx ();
4671 REAL_VALUE_TYPE offset
;
4673 /* Look for a usable floating mode FMODE wider than the source and at
4674 least as wide as the target. Using FMODE will avoid rounding woes
4675 with unsigned values greater than the signed maximum value. */
4677 FOR_EACH_MODE_FROM (fmode
, GET_MODE (to
))
4678 if (GET_MODE_PRECISION (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4679 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4682 if (fmode
== VOIDmode
)
4684 /* There is no such mode. Pretend the target is wide enough. */
4685 fmode
= GET_MODE (to
);
4687 /* Avoid double-rounding when TO is narrower than FROM. */
4688 if ((significand_size (fmode
) + 1)
4689 < GET_MODE_PRECISION (GET_MODE (from
)))
4692 rtx_code_label
*neglabel
= gen_label_rtx ();
4694 /* Don't use TARGET if it isn't a register, is a hard register,
4695 or is the wrong mode. */
4697 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4698 || GET_MODE (target
) != fmode
)
4699 target
= gen_reg_rtx (fmode
);
4701 imode
= GET_MODE (from
);
4702 do_pending_stack_adjust ();
4704 /* Test whether the sign bit is set. */
4705 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4708 /* The sign bit is not set. Convert as signed. */
4709 expand_float (target
, from
, 0);
4710 emit_jump_insn (targetm
.gen_jump (label
));
4713 /* The sign bit is set.
4714 Convert to a usable (positive signed) value by shifting right
4715 one bit, while remembering if a nonzero bit was shifted
4716 out; i.e., compute (from & 1) | (from >> 1). */
4718 emit_label (neglabel
);
4719 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4720 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4721 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, 1, NULL_RTX
, 1);
4722 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4724 expand_float (target
, temp
, 0);
4726 /* Multiply by 2 to undo the shift above. */
4727 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4728 target
, 0, OPTAB_LIB_WIDEN
);
4730 emit_move_insn (target
, temp
);
4732 do_pending_stack_adjust ();
4738 /* If we are about to do some arithmetic to correct for an
4739 unsigned operand, do it in a pseudo-register. */
4741 if (GET_MODE (to
) != fmode
4742 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4743 target
= gen_reg_rtx (fmode
);
4745 /* Convert as signed integer to floating. */
4746 expand_float (target
, from
, 0);
4748 /* If FROM is negative (and therefore TO is negative),
4749 correct its value by 2**bitwidth. */
4751 do_pending_stack_adjust ();
4752 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4756 real_2expN (&offset
, GET_MODE_PRECISION (GET_MODE (from
)), fmode
);
4757 temp
= expand_binop (fmode
, add_optab
, target
,
4758 const_double_from_real_value (offset
, fmode
),
4759 target
, 0, OPTAB_LIB_WIDEN
);
4761 emit_move_insn (target
, temp
);
4763 do_pending_stack_adjust ();
4768 /* No hardware instruction available; call a library routine. */
4773 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4775 if (GET_MODE_PRECISION (GET_MODE (from
)) < GET_MODE_PRECISION (SImode
))
4776 from
= convert_to_mode (SImode
, from
, unsignedp
);
4778 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
4779 gcc_assert (libfunc
);
4783 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4784 GET_MODE (to
), 1, from
,
4786 insns
= get_insns ();
4789 emit_libcall_block (insns
, target
, value
,
4790 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FLOAT
: FLOAT
,
4791 GET_MODE (to
), from
));
4796 /* Copy result to requested destination
4797 if we have been computing in a temp location. */
4801 if (GET_MODE (target
) == GET_MODE (to
))
4802 emit_move_insn (to
, target
);
4804 convert_move (to
, target
, 0);
4808 /* Generate code to convert FROM to fixed point and store in TO. FROM
4809 must be floating point. */
4812 expand_fix (rtx to
, rtx from
, int unsignedp
)
4814 enum insn_code icode
;
4816 machine_mode fmode
, imode
;
4817 bool must_trunc
= false;
4819 /* We first try to find a pair of modes, one real and one integer, at
4820 least as wide as FROM and TO, respectively, in which we can open-code
4821 this conversion. If the integer mode is wider than the mode of TO,
4822 we can do the conversion either signed or unsigned. */
4824 FOR_EACH_MODE_FROM (fmode
, GET_MODE (from
))
4825 FOR_EACH_MODE_FROM (imode
, GET_MODE (to
))
4827 int doing_unsigned
= unsignedp
;
4829 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4830 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4831 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4833 if (icode
!= CODE_FOR_nothing
)
4835 rtx_insn
*last
= get_last_insn ();
4836 if (fmode
!= GET_MODE (from
))
4837 from
= convert_to_mode (fmode
, from
, 0);
4841 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4842 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
4846 if (imode
!= GET_MODE (to
))
4847 target
= gen_reg_rtx (imode
);
4849 if (maybe_emit_unop_insn (icode
, target
, from
,
4850 doing_unsigned
? UNSIGNED_FIX
: FIX
))
4853 convert_move (to
, target
, unsignedp
);
4856 delete_insns_since (last
);
4860 /* For an unsigned conversion, there is one more way to do it.
4861 If we have a signed conversion, we generate code that compares
4862 the real value to the largest representable positive number. If if
4863 is smaller, the conversion is done normally. Otherwise, subtract
4864 one plus the highest signed number, convert, and add it back.
4866 We only need to check all real modes, since we know we didn't find
4867 anything with a wider integer mode.
4869 This code used to extend FP value into mode wider than the destination.
4870 This is needed for decimal float modes which cannot accurately
4871 represent one plus the highest signed number of the same size, but
4872 not for binary modes. Consider, for instance conversion from SFmode
4875 The hot path through the code is dealing with inputs smaller than 2^63
4876 and doing just the conversion, so there is no bits to lose.
4878 In the other path we know the value is positive in the range 2^63..2^64-1
4879 inclusive. (as for other input overflow happens and result is undefined)
4880 So we know that the most important bit set in mantissa corresponds to
4881 2^63. The subtraction of 2^63 should not generate any rounding as it
4882 simply clears out that bit. The rest is trivial. */
4884 if (unsignedp
&& GET_MODE_PRECISION (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4885 FOR_EACH_MODE_FROM (fmode
, GET_MODE (from
))
4886 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0, &must_trunc
)
4887 && (!DECIMAL_FLOAT_MODE_P (fmode
)
4888 || GET_MODE_BITSIZE (fmode
) > GET_MODE_PRECISION (GET_MODE (to
))))
4891 REAL_VALUE_TYPE offset
;
4893 rtx_code_label
*lab1
, *lab2
;
4896 bitsize
= GET_MODE_PRECISION (GET_MODE (to
));
4897 real_2expN (&offset
, bitsize
- 1, fmode
);
4898 limit
= const_double_from_real_value (offset
, fmode
);
4899 lab1
= gen_label_rtx ();
4900 lab2
= gen_label_rtx ();
4902 if (fmode
!= GET_MODE (from
))
4903 from
= convert_to_mode (fmode
, from
, 0);
4905 /* See if we need to do the subtraction. */
4906 do_pending_stack_adjust ();
4907 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4910 /* If not, do the signed "fix" and branch around fixup code. */
4911 expand_fix (to
, from
, 0);
4912 emit_jump_insn (targetm
.gen_jump (lab2
));
4915 /* Otherwise, subtract 2**(N-1), convert to signed number,
4916 then add 2**(N-1). Do the addition using XOR since this
4917 will often generate better code. */
4919 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4920 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4921 expand_fix (to
, target
, 0);
4922 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4924 (HOST_WIDE_INT_1
<< (bitsize
- 1),
4926 to
, 1, OPTAB_LIB_WIDEN
);
4929 emit_move_insn (to
, target
);
4933 if (optab_handler (mov_optab
, GET_MODE (to
)) != CODE_FOR_nothing
)
4935 /* Make a place for a REG_NOTE and add it. */
4936 insn
= emit_move_insn (to
, to
);
4937 set_dst_reg_note (insn
, REG_EQUAL
,
4938 gen_rtx_fmt_e (UNSIGNED_FIX
, GET_MODE (to
),
4946 /* We can't do it with an insn, so use a library call. But first ensure
4947 that the mode of TO is at least as wide as SImode, since those are the
4948 only library calls we know about. */
4950 if (GET_MODE_PRECISION (GET_MODE (to
)) < GET_MODE_PRECISION (SImode
))
4952 target
= gen_reg_rtx (SImode
);
4954 expand_fix (target
, from
, unsignedp
);
4962 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
4963 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
4964 gcc_assert (libfunc
);
4968 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4969 GET_MODE (to
), 1, from
,
4971 insns
= get_insns ();
4974 emit_libcall_block (insns
, target
, value
,
4975 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4976 GET_MODE (to
), from
));
4981 if (GET_MODE (to
) == GET_MODE (target
))
4982 emit_move_insn (to
, target
);
4984 convert_move (to
, target
, 0);
4989 /* Promote integer arguments for a libcall if necessary.
4990 emit_library_call_value cannot do the promotion because it does not
4991 know if it should do a signed or unsigned promotion. This is because
4992 there are no tree types defined for libcalls. */
4995 prepare_libcall_arg (rtx arg
, int uintp
)
4997 machine_mode mode
= GET_MODE (arg
);
4998 machine_mode arg_mode
;
4999 if (SCALAR_INT_MODE_P (mode
))
5001 /* If we need to promote the integer function argument we need to do
5002 it here instead of inside emit_library_call_value because in
5003 emit_library_call_value we don't know if we should do a signed or
5004 unsigned promotion. */
5007 arg_mode
= promote_function_mode (NULL_TREE
, mode
,
5008 &unsigned_p
, NULL_TREE
, 0);
5009 if (arg_mode
!= mode
)
5010 return convert_to_mode (arg_mode
, arg
, uintp
);
5015 /* Generate code to convert FROM or TO a fixed-point.
5016 If UINTP is true, either TO or FROM is an unsigned integer.
5017 If SATP is true, we need to saturate the result. */
5020 expand_fixed_convert (rtx to
, rtx from
, int uintp
, int satp
)
5022 machine_mode to_mode
= GET_MODE (to
);
5023 machine_mode from_mode
= GET_MODE (from
);
5025 enum rtx_code this_code
;
5026 enum insn_code code
;
5031 if (to_mode
== from_mode
)
5033 emit_move_insn (to
, from
);
5039 tab
= satp
? satfractuns_optab
: fractuns_optab
;
5040 this_code
= satp
? UNSIGNED_SAT_FRACT
: UNSIGNED_FRACT_CONVERT
;
5044 tab
= satp
? satfract_optab
: fract_optab
;
5045 this_code
= satp
? SAT_FRACT
: FRACT_CONVERT
;
5047 code
= convert_optab_handler (tab
, to_mode
, from_mode
);
5048 if (code
!= CODE_FOR_nothing
)
5050 emit_unop_insn (code
, to
, from
, this_code
);
5054 libfunc
= convert_optab_libfunc (tab
, to_mode
, from_mode
);
5055 gcc_assert (libfunc
);
5057 from
= prepare_libcall_arg (from
, uintp
);
5058 from_mode
= GET_MODE (from
);
5061 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, to_mode
,
5062 1, from
, from_mode
);
5063 insns
= get_insns ();
5066 emit_libcall_block (insns
, to
, value
,
5067 gen_rtx_fmt_e (optab_to_code (tab
), to_mode
, from
));
5070 /* Generate code to convert FROM to fixed point and store in TO. FROM
5071 must be floating point, TO must be signed. Use the conversion optab
5072 TAB to do the conversion. */
5075 expand_sfix_optab (rtx to
, rtx from
, convert_optab tab
)
5077 enum insn_code icode
;
5079 machine_mode fmode
, imode
;
5081 /* We first try to find a pair of modes, one real and one integer, at
5082 least as wide as FROM and TO, respectively, in which we can open-code
5083 this conversion. If the integer mode is wider than the mode of TO,
5084 we can do the conversion either signed or unsigned. */
5086 FOR_EACH_MODE_FROM (fmode
, GET_MODE (from
))
5087 FOR_EACH_MODE_FROM (imode
, GET_MODE (to
))
5089 icode
= convert_optab_handler (tab
, imode
, fmode
);
5090 if (icode
!= CODE_FOR_nothing
)
5092 rtx_insn
*last
= get_last_insn ();
5093 if (fmode
!= GET_MODE (from
))
5094 from
= convert_to_mode (fmode
, from
, 0);
5096 if (imode
!= GET_MODE (to
))
5097 target
= gen_reg_rtx (imode
);
5099 if (!maybe_emit_unop_insn (icode
, target
, from
, UNKNOWN
))
5101 delete_insns_since (last
);
5105 convert_move (to
, target
, 0);
5113 /* Report whether we have an instruction to perform the operation
5114 specified by CODE on operands of mode MODE. */
5116 have_insn_for (enum rtx_code code
, machine_mode mode
)
5118 return (code_to_optab (code
)
5119 && (optab_handler (code_to_optab (code
), mode
)
5120 != CODE_FOR_nothing
));
5123 /* Print information about the current contents of the optabs on
5127 debug_optab_libfuncs (void)
5131 /* Dump the arithmetic optabs. */
5132 for (i
= FIRST_NORM_OPTAB
; i
<= LAST_NORMLIB_OPTAB
; ++i
)
5133 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5135 rtx l
= optab_libfunc ((optab
) i
, (machine_mode
) j
);
5138 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
5139 fprintf (stderr
, "%s\t%s:\t%s\n",
5140 GET_RTX_NAME (optab_to_code ((optab
) i
)),
5146 /* Dump the conversion optabs. */
5147 for (i
= FIRST_CONV_OPTAB
; i
<= LAST_CONVLIB_OPTAB
; ++i
)
5148 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5149 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
5151 rtx l
= convert_optab_libfunc ((optab
) i
, (machine_mode
) j
,
5155 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
5156 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
5157 GET_RTX_NAME (optab_to_code ((optab
) i
)),
5165 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5166 CODE. Return 0 on failure. */
5169 gen_cond_trap (enum rtx_code code
, rtx op1
, rtx op2
, rtx tcode
)
5171 machine_mode mode
= GET_MODE (op1
);
5172 enum insn_code icode
;
5176 if (mode
== VOIDmode
)
5179 icode
= optab_handler (ctrap_optab
, mode
);
5180 if (icode
== CODE_FOR_nothing
)
5183 /* Some targets only accept a zero trap code. */
5184 if (!insn_operand_matches (icode
, 3, tcode
))
5187 do_pending_stack_adjust ();
5189 prepare_cmp_insn (op1
, op2
, code
, NULL_RTX
, false, OPTAB_DIRECT
,
5194 insn
= GEN_FCN (icode
) (trap_rtx
, XEXP (trap_rtx
, 0), XEXP (trap_rtx
, 1),
5197 /* If that failed, then give up. */
5205 insn
= get_insns ();
5210 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5211 or unsigned operation code. */
5214 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5226 code
= unsignedp
? LTU
: LT
;
5229 code
= unsignedp
? LEU
: LE
;
5232 code
= unsignedp
? GTU
: GT
;
5235 code
= unsignedp
? GEU
: GE
;
5238 case UNORDERED_EXPR
:
5277 /* Return a comparison rtx of mode CMP_MODE for COND. Use UNSIGNEDP to
5278 select signed or unsigned operators. OPNO holds the index of the
5279 first comparison operand for insn ICODE. Do not generate the
5280 compare instruction itself. */
5283 vector_compare_rtx (machine_mode cmp_mode
, enum tree_code tcode
,
5284 tree t_op0
, tree t_op1
, bool unsignedp
,
5285 enum insn_code icode
, unsigned int opno
)
5287 struct expand_operand ops
[2];
5288 rtx rtx_op0
, rtx_op1
;
5289 machine_mode m0
, m1
;
5290 enum rtx_code rcode
= get_rtx_code (tcode
, unsignedp
);
5292 gcc_assert (TREE_CODE_CLASS (tcode
) == tcc_comparison
);
5294 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
5295 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
5296 cases, use the original mode. */
5297 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)),
5299 m0
= GET_MODE (rtx_op0
);
5301 m0
= TYPE_MODE (TREE_TYPE (t_op0
));
5303 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)),
5305 m1
= GET_MODE (rtx_op1
);
5307 m1
= TYPE_MODE (TREE_TYPE (t_op1
));
5309 create_input_operand (&ops
[0], rtx_op0
, m0
);
5310 create_input_operand (&ops
[1], rtx_op1
, m1
);
5311 if (!maybe_legitimize_operands (icode
, opno
, 2, ops
))
5313 return gen_rtx_fmt_ee (rcode
, cmp_mode
, ops
[0].value
, ops
[1].value
);
5316 /* Checks if vec_perm mask SEL is a constant equivalent to a shift of the first
5317 vec_perm operand, assuming the second operand is a constant vector of zeroes.
5318 Return the shift distance in bits if so, or NULL_RTX if the vec_perm is not a
5321 shift_amt_for_vec_perm_mask (rtx sel
)
5323 unsigned int i
, first
, nelt
= GET_MODE_NUNITS (GET_MODE (sel
));
5324 unsigned int bitsize
= GET_MODE_UNIT_BITSIZE (GET_MODE (sel
));
5326 if (GET_CODE (sel
) != CONST_VECTOR
)
5329 first
= INTVAL (CONST_VECTOR_ELT (sel
, 0));
5332 for (i
= 1; i
< nelt
; i
++)
5334 int idx
= INTVAL (CONST_VECTOR_ELT (sel
, i
));
5335 unsigned int expected
= i
+ first
;
5336 /* Indices into the second vector are all equivalent. */
5337 if (idx
< 0 || (MIN (nelt
, (unsigned) idx
) != MIN (nelt
, expected
)))
5341 return GEN_INT (first
* bitsize
);
5344 /* A subroutine of expand_vec_perm for expanding one vec_perm insn. */
5347 expand_vec_perm_1 (enum insn_code icode
, rtx target
,
5348 rtx v0
, rtx v1
, rtx sel
)
5350 machine_mode tmode
= GET_MODE (target
);
5351 machine_mode smode
= GET_MODE (sel
);
5352 struct expand_operand ops
[4];
5354 create_output_operand (&ops
[0], target
, tmode
);
5355 create_input_operand (&ops
[3], sel
, smode
);
5357 /* Make an effort to preserve v0 == v1. The target expander is able to
5358 rely on this to determine if we're permuting a single input operand. */
5359 if (rtx_equal_p (v0
, v1
))
5361 if (!insn_operand_matches (icode
, 1, v0
))
5362 v0
= force_reg (tmode
, v0
);
5363 gcc_checking_assert (insn_operand_matches (icode
, 1, v0
));
5364 gcc_checking_assert (insn_operand_matches (icode
, 2, v0
));
5366 create_fixed_operand (&ops
[1], v0
);
5367 create_fixed_operand (&ops
[2], v0
);
5371 create_input_operand (&ops
[1], v0
, tmode
);
5372 create_input_operand (&ops
[2], v1
, tmode
);
5375 if (maybe_expand_insn (icode
, 4, ops
))
5376 return ops
[0].value
;
5380 /* Generate instructions for vec_perm optab given its mode
5381 and three operands. */
5384 expand_vec_perm (machine_mode mode
, rtx v0
, rtx v1
, rtx sel
, rtx target
)
5386 enum insn_code icode
;
5387 machine_mode qimode
;
5388 unsigned int i
, w
, e
, u
;
5389 rtx tmp
, sel_qi
= NULL
;
5392 if (!target
|| GET_MODE (target
) != mode
)
5393 target
= gen_reg_rtx (mode
);
5395 w
= GET_MODE_SIZE (mode
);
5396 e
= GET_MODE_NUNITS (mode
);
5397 u
= GET_MODE_UNIT_SIZE (mode
);
5399 /* Set QIMODE to a different vector mode with byte elements.
5400 If no such mode, or if MODE already has byte elements, use VOIDmode. */
5402 if (GET_MODE_INNER (mode
) != QImode
)
5404 qimode
= mode_for_vector (QImode
, w
);
5405 if (!VECTOR_MODE_P (qimode
))
5409 /* If the input is a constant, expand it specially. */
5410 gcc_assert (GET_MODE_CLASS (GET_MODE (sel
)) == MODE_VECTOR_INT
);
5411 if (GET_CODE (sel
) == CONST_VECTOR
)
5413 /* See if this can be handled with a vec_shr. We only do this if the
5414 second vector is all zeroes. */
5415 enum insn_code shift_code
= optab_handler (vec_shr_optab
, mode
);
5416 enum insn_code shift_code_qi
= ((qimode
!= VOIDmode
&& qimode
!= mode
)
5417 ? optab_handler (vec_shr_optab
, qimode
)
5418 : CODE_FOR_nothing
);
5419 rtx shift_amt
= NULL_RTX
;
5420 if (v1
== CONST0_RTX (GET_MODE (v1
))
5421 && (shift_code
!= CODE_FOR_nothing
5422 || shift_code_qi
!= CODE_FOR_nothing
))
5424 shift_amt
= shift_amt_for_vec_perm_mask (sel
);
5427 struct expand_operand ops
[3];
5428 if (shift_code
!= CODE_FOR_nothing
)
5430 create_output_operand (&ops
[0], target
, mode
);
5431 create_input_operand (&ops
[1], v0
, mode
);
5432 create_convert_operand_from_type (&ops
[2], shift_amt
,
5434 if (maybe_expand_insn (shift_code
, 3, ops
))
5435 return ops
[0].value
;
5437 if (shift_code_qi
!= CODE_FOR_nothing
)
5439 tmp
= gen_reg_rtx (qimode
);
5440 create_output_operand (&ops
[0], tmp
, qimode
);
5441 create_input_operand (&ops
[1], gen_lowpart (qimode
, v0
),
5443 create_convert_operand_from_type (&ops
[2], shift_amt
,
5445 if (maybe_expand_insn (shift_code_qi
, 3, ops
))
5446 return gen_lowpart (mode
, ops
[0].value
);
5451 icode
= direct_optab_handler (vec_perm_const_optab
, mode
);
5452 if (icode
!= CODE_FOR_nothing
)
5454 tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel
);
5459 /* Fall back to a constant byte-based permutation. */
5460 if (qimode
!= VOIDmode
)
5462 vec
= rtvec_alloc (w
);
5463 for (i
= 0; i
< e
; ++i
)
5465 unsigned int j
, this_e
;
5467 this_e
= INTVAL (CONST_VECTOR_ELT (sel
, i
));
5468 this_e
&= 2 * e
- 1;
5471 for (j
= 0; j
< u
; ++j
)
5472 RTVEC_ELT (vec
, i
* u
+ j
) = GEN_INT (this_e
+ j
);
5474 sel_qi
= gen_rtx_CONST_VECTOR (qimode
, vec
);
5476 icode
= direct_optab_handler (vec_perm_const_optab
, qimode
);
5477 if (icode
!= CODE_FOR_nothing
)
5479 tmp
= mode
!= qimode
? gen_reg_rtx (qimode
) : target
;
5480 tmp
= expand_vec_perm_1 (icode
, tmp
, gen_lowpart (qimode
, v0
),
5481 gen_lowpart (qimode
, v1
), sel_qi
);
5483 return gen_lowpart (mode
, tmp
);
5488 /* Otherwise expand as a fully variable permuation. */
5489 icode
= direct_optab_handler (vec_perm_optab
, mode
);
5490 if (icode
!= CODE_FOR_nothing
)
5492 tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel
);
5497 /* As a special case to aid several targets, lower the element-based
5498 permutation to a byte-based permutation and try again. */
5499 if (qimode
== VOIDmode
)
5501 icode
= direct_optab_handler (vec_perm_optab
, qimode
);
5502 if (icode
== CODE_FOR_nothing
)
5507 /* Multiply each element by its byte size. */
5508 machine_mode selmode
= GET_MODE (sel
);
5510 sel
= expand_simple_binop (selmode
, PLUS
, sel
, sel
,
5511 NULL
, 0, OPTAB_DIRECT
);
5513 sel
= expand_simple_binop (selmode
, ASHIFT
, sel
,
5514 GEN_INT (exact_log2 (u
)),
5515 NULL
, 0, OPTAB_DIRECT
);
5516 gcc_assert (sel
!= NULL
);
5518 /* Broadcast the low byte each element into each of its bytes. */
5519 vec
= rtvec_alloc (w
);
5520 for (i
= 0; i
< w
; ++i
)
5522 int this_e
= i
/ u
* u
;
5523 if (BYTES_BIG_ENDIAN
)
5525 RTVEC_ELT (vec
, i
) = GEN_INT (this_e
);
5527 tmp
= gen_rtx_CONST_VECTOR (qimode
, vec
);
5528 sel
= gen_lowpart (qimode
, sel
);
5529 sel
= expand_vec_perm (qimode
, sel
, sel
, tmp
, NULL
);
5530 gcc_assert (sel
!= NULL
);
5532 /* Add the byte offset to each byte element. */
5533 /* Note that the definition of the indicies here is memory ordering,
5534 so there should be no difference between big and little endian. */
5535 vec
= rtvec_alloc (w
);
5536 for (i
= 0; i
< w
; ++i
)
5537 RTVEC_ELT (vec
, i
) = GEN_INT (i
% u
);
5538 tmp
= gen_rtx_CONST_VECTOR (qimode
, vec
);
5539 sel_qi
= expand_simple_binop (qimode
, PLUS
, sel
, tmp
,
5540 sel
, 0, OPTAB_DIRECT
);
5541 gcc_assert (sel_qi
!= NULL
);
5544 tmp
= mode
!= qimode
? gen_reg_rtx (qimode
) : target
;
5545 tmp
= expand_vec_perm_1 (icode
, tmp
, gen_lowpart (qimode
, v0
),
5546 gen_lowpart (qimode
, v1
), sel_qi
);
5548 tmp
= gen_lowpart (mode
, tmp
);
5552 /* Generate insns for a VEC_COND_EXPR with mask, given its TYPE and its
5556 expand_vec_cond_mask_expr (tree vec_cond_type
, tree op0
, tree op1
, tree op2
,
5559 struct expand_operand ops
[4];
5560 machine_mode mode
= TYPE_MODE (vec_cond_type
);
5561 machine_mode mask_mode
= TYPE_MODE (TREE_TYPE (op0
));
5562 enum insn_code icode
= get_vcond_mask_icode (mode
, mask_mode
);
5563 rtx mask
, rtx_op1
, rtx_op2
;
5565 if (icode
== CODE_FOR_nothing
)
5568 mask
= expand_normal (op0
);
5569 rtx_op1
= expand_normal (op1
);
5570 rtx_op2
= expand_normal (op2
);
5572 mask
= force_reg (mask_mode
, mask
);
5573 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
5575 create_output_operand (&ops
[0], target
, mode
);
5576 create_input_operand (&ops
[1], rtx_op1
, mode
);
5577 create_input_operand (&ops
[2], rtx_op2
, mode
);
5578 create_input_operand (&ops
[3], mask
, mask_mode
);
5579 expand_insn (icode
, 4, ops
);
5581 return ops
[0].value
;
5584 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
5588 expand_vec_cond_expr (tree vec_cond_type
, tree op0
, tree op1
, tree op2
,
5591 struct expand_operand ops
[6];
5592 enum insn_code icode
;
5593 rtx comparison
, rtx_op1
, rtx_op2
;
5594 machine_mode mode
= TYPE_MODE (vec_cond_type
);
5595 machine_mode cmp_op_mode
;
5598 enum tree_code tcode
;
5600 if (COMPARISON_CLASS_P (op0
))
5602 op0a
= TREE_OPERAND (op0
, 0);
5603 op0b
= TREE_OPERAND (op0
, 1);
5604 tcode
= TREE_CODE (op0
);
5608 gcc_assert (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (op0
)));
5609 if (get_vcond_mask_icode (mode
, TYPE_MODE (TREE_TYPE (op0
)))
5610 != CODE_FOR_nothing
)
5611 return expand_vec_cond_mask_expr (vec_cond_type
, op0
, op1
,
5616 gcc_assert (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (op0
)))
5617 == MODE_VECTOR_INT
);
5619 op0b
= build_zero_cst (TREE_TYPE (op0
));
5623 cmp_op_mode
= TYPE_MODE (TREE_TYPE (op0a
));
5624 unsignedp
= TYPE_UNSIGNED (TREE_TYPE (op0a
));
5627 gcc_assert (GET_MODE_SIZE (mode
) == GET_MODE_SIZE (cmp_op_mode
)
5628 && GET_MODE_NUNITS (mode
) == GET_MODE_NUNITS (cmp_op_mode
));
5630 icode
= get_vcond_icode (mode
, cmp_op_mode
, unsignedp
);
5631 if (icode
== CODE_FOR_nothing
)
5633 if (tcode
== EQ_EXPR
|| tcode
== NE_EXPR
)
5634 icode
= get_vcond_eq_icode (mode
, cmp_op_mode
);
5635 if (icode
== CODE_FOR_nothing
)
5639 comparison
= vector_compare_rtx (VOIDmode
, tcode
, op0a
, op0b
, unsignedp
,
5641 rtx_op1
= expand_normal (op1
);
5642 rtx_op2
= expand_normal (op2
);
5644 create_output_operand (&ops
[0], target
, mode
);
5645 create_input_operand (&ops
[1], rtx_op1
, mode
);
5646 create_input_operand (&ops
[2], rtx_op2
, mode
);
5647 create_fixed_operand (&ops
[3], comparison
);
5648 create_fixed_operand (&ops
[4], XEXP (comparison
, 0));
5649 create_fixed_operand (&ops
[5], XEXP (comparison
, 1));
5650 expand_insn (icode
, 6, ops
);
5651 return ops
[0].value
;
5654 /* Generate insns for a vector comparison into a mask. */
5657 expand_vec_cmp_expr (tree type
, tree exp
, rtx target
)
5659 struct expand_operand ops
[4];
5660 enum insn_code icode
;
5662 machine_mode mask_mode
= TYPE_MODE (type
);
5666 enum tree_code tcode
;
5668 op0a
= TREE_OPERAND (exp
, 0);
5669 op0b
= TREE_OPERAND (exp
, 1);
5670 tcode
= TREE_CODE (exp
);
5672 unsignedp
= TYPE_UNSIGNED (TREE_TYPE (op0a
));
5673 vmode
= TYPE_MODE (TREE_TYPE (op0a
));
5675 icode
= get_vec_cmp_icode (vmode
, mask_mode
, unsignedp
);
5676 if (icode
== CODE_FOR_nothing
)
5678 if (tcode
== EQ_EXPR
|| tcode
== NE_EXPR
)
5679 icode
= get_vec_cmp_eq_icode (vmode
, mask_mode
);
5680 if (icode
== CODE_FOR_nothing
)
5684 comparison
= vector_compare_rtx (mask_mode
, tcode
, op0a
, op0b
,
5685 unsignedp
, icode
, 2);
5686 create_output_operand (&ops
[0], target
, mask_mode
);
5687 create_fixed_operand (&ops
[1], comparison
);
5688 create_fixed_operand (&ops
[2], XEXP (comparison
, 0));
5689 create_fixed_operand (&ops
[3], XEXP (comparison
, 1));
5690 expand_insn (icode
, 4, ops
);
5691 return ops
[0].value
;
5694 /* Expand a highpart multiply. */
5697 expand_mult_highpart (machine_mode mode
, rtx op0
, rtx op1
,
5698 rtx target
, bool uns_p
)
5700 struct expand_operand eops
[3];
5701 enum insn_code icode
;
5702 int method
, i
, nunits
;
5708 method
= can_mult_highpart_p (mode
, uns_p
);
5714 tab1
= uns_p
? umul_highpart_optab
: smul_highpart_optab
;
5715 return expand_binop (mode
, tab1
, op0
, op1
, target
, uns_p
,
5718 tab1
= uns_p
? vec_widen_umult_even_optab
: vec_widen_smult_even_optab
;
5719 tab2
= uns_p
? vec_widen_umult_odd_optab
: vec_widen_smult_odd_optab
;
5722 tab1
= uns_p
? vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
5723 tab2
= uns_p
? vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
5724 if (BYTES_BIG_ENDIAN
)
5725 std::swap (tab1
, tab2
);
5731 icode
= optab_handler (tab1
, mode
);
5732 nunits
= GET_MODE_NUNITS (mode
);
5733 wmode
= insn_data
[icode
].operand
[0].mode
;
5734 gcc_checking_assert (2 * GET_MODE_NUNITS (wmode
) == nunits
);
5735 gcc_checking_assert (GET_MODE_SIZE (wmode
) == GET_MODE_SIZE (mode
));
5737 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
5738 create_input_operand (&eops
[1], op0
, mode
);
5739 create_input_operand (&eops
[2], op1
, mode
);
5740 expand_insn (icode
, 3, eops
);
5741 m1
= gen_lowpart (mode
, eops
[0].value
);
5743 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
5744 create_input_operand (&eops
[1], op0
, mode
);
5745 create_input_operand (&eops
[2], op1
, mode
);
5746 expand_insn (optab_handler (tab2
, mode
), 3, eops
);
5747 m2
= gen_lowpart (mode
, eops
[0].value
);
5749 v
= rtvec_alloc (nunits
);
5752 for (i
= 0; i
< nunits
; ++i
)
5753 RTVEC_ELT (v
, i
) = GEN_INT (!BYTES_BIG_ENDIAN
+ (i
& ~1)
5754 + ((i
& 1) ? nunits
: 0));
5758 for (i
= 0; i
< nunits
; ++i
)
5759 RTVEC_ELT (v
, i
) = GEN_INT (2 * i
+ (BYTES_BIG_ENDIAN
? 0 : 1));
5761 perm
= gen_rtx_CONST_VECTOR (mode
, v
);
5763 return expand_vec_perm (mode
, m1
, m2
, perm
, target
);
5766 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
5770 find_cc_set (rtx x
, const_rtx pat
, void *data
)
5772 if (REG_P (x
) && GET_MODE_CLASS (GET_MODE (x
)) == MODE_CC
5773 && GET_CODE (pat
) == SET
)
5775 rtx
*p_cc_reg
= (rtx
*) data
;
5776 gcc_assert (!*p_cc_reg
);
5781 /* This is a helper function for the other atomic operations. This function
5782 emits a loop that contains SEQ that iterates until a compare-and-swap
5783 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5784 a set of instructions that takes a value from OLD_REG as an input and
5785 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5786 set to the current contents of MEM. After SEQ, a compare-and-swap will
5787 attempt to update MEM with NEW_REG. The function returns true when the
5788 loop was generated successfully. */
5791 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
5793 machine_mode mode
= GET_MODE (mem
);
5794 rtx_code_label
*label
;
5795 rtx cmp_reg
, success
, oldval
;
5797 /* The loop we want to generate looks like
5803 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
5807 Note that we only do the plain load from memory once. Subsequent
5808 iterations use the value loaded by the compare-and-swap pattern. */
5810 label
= gen_label_rtx ();
5811 cmp_reg
= gen_reg_rtx (mode
);
5813 emit_move_insn (cmp_reg
, mem
);
5815 emit_move_insn (old_reg
, cmp_reg
);
5821 if (!expand_atomic_compare_and_swap (&success
, &oldval
, mem
, old_reg
,
5822 new_reg
, false, MEMMODEL_SYNC_SEQ_CST
,
5826 if (oldval
!= cmp_reg
)
5827 emit_move_insn (cmp_reg
, oldval
);
5829 /* Mark this jump predicted not taken. */
5830 emit_cmp_and_jump_insns (success
, const0_rtx
, EQ
, const0_rtx
,
5831 GET_MODE (success
), 1, label
,
5832 profile_probability::guessed_never ());
5837 /* This function tries to emit an atomic_exchange intruction. VAL is written
5838 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
5839 using TARGET if possible. */
5842 maybe_emit_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
5844 machine_mode mode
= GET_MODE (mem
);
5845 enum insn_code icode
;
5847 /* If the target supports the exchange directly, great. */
5848 icode
= direct_optab_handler (atomic_exchange_optab
, mode
);
5849 if (icode
!= CODE_FOR_nothing
)
5851 struct expand_operand ops
[4];
5853 create_output_operand (&ops
[0], target
, mode
);
5854 create_fixed_operand (&ops
[1], mem
);
5855 create_input_operand (&ops
[2], val
, mode
);
5856 create_integer_operand (&ops
[3], model
);
5857 if (maybe_expand_insn (icode
, 4, ops
))
5858 return ops
[0].value
;
5864 /* This function tries to implement an atomic exchange operation using
5865 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
5866 The previous contents of *MEM are returned, using TARGET if possible.
5867 Since this instructionn is an acquire barrier only, stronger memory
5868 models may require additional barriers to be emitted. */
5871 maybe_emit_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
,
5872 enum memmodel model
)
5874 machine_mode mode
= GET_MODE (mem
);
5875 enum insn_code icode
;
5876 rtx_insn
*last_insn
= get_last_insn ();
5878 icode
= optab_handler (sync_lock_test_and_set_optab
, mode
);
5880 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
5881 exists, and the memory model is stronger than acquire, add a release
5882 barrier before the instruction. */
5884 if (is_mm_seq_cst (model
) || is_mm_release (model
) || is_mm_acq_rel (model
))
5885 expand_mem_thread_fence (model
);
5887 if (icode
!= CODE_FOR_nothing
)
5889 struct expand_operand ops
[3];
5890 create_output_operand (&ops
[0], target
, mode
);
5891 create_fixed_operand (&ops
[1], mem
);
5892 create_input_operand (&ops
[2], val
, mode
);
5893 if (maybe_expand_insn (icode
, 3, ops
))
5894 return ops
[0].value
;
5897 /* If an external test-and-set libcall is provided, use that instead of
5898 any external compare-and-swap that we might get from the compare-and-
5899 swap-loop expansion later. */
5900 if (!can_compare_and_swap_p (mode
, false))
5902 rtx libfunc
= optab_libfunc (sync_lock_test_and_set_optab
, mode
);
5903 if (libfunc
!= NULL
)
5907 addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
5908 return emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
5909 mode
, 2, addr
, ptr_mode
,
5914 /* If the test_and_set can't be emitted, eliminate any barrier that might
5915 have been emitted. */
5916 delete_insns_since (last_insn
);
5920 /* This function tries to implement an atomic exchange operation using a
5921 compare_and_swap loop. VAL is written to *MEM. The previous contents of
5922 *MEM are returned, using TARGET if possible. No memory model is required
5923 since a compare_and_swap loop is seq-cst. */
5926 maybe_emit_compare_and_swap_exchange_loop (rtx target
, rtx mem
, rtx val
)
5928 machine_mode mode
= GET_MODE (mem
);
5930 if (can_compare_and_swap_p (mode
, true))
5932 if (!target
|| !register_operand (target
, mode
))
5933 target
= gen_reg_rtx (mode
);
5934 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
5941 /* This function tries to implement an atomic test-and-set operation
5942 using the atomic_test_and_set instruction pattern. A boolean value
5943 is returned from the operation, using TARGET if possible. */
5946 maybe_emit_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
5948 machine_mode pat_bool_mode
;
5949 struct expand_operand ops
[3];
5951 if (!targetm
.have_atomic_test_and_set ())
5954 /* While we always get QImode from __atomic_test_and_set, we get
5955 other memory modes from __sync_lock_test_and_set. Note that we
5956 use no endian adjustment here. This matches the 4.6 behavior
5957 in the Sparc backend. */
5958 enum insn_code icode
= targetm
.code_for_atomic_test_and_set
;
5959 gcc_checking_assert (insn_data
[icode
].operand
[1].mode
== QImode
);
5960 if (GET_MODE (mem
) != QImode
)
5961 mem
= adjust_address_nv (mem
, QImode
, 0);
5963 pat_bool_mode
= insn_data
[icode
].operand
[0].mode
;
5964 create_output_operand (&ops
[0], target
, pat_bool_mode
);
5965 create_fixed_operand (&ops
[1], mem
);
5966 create_integer_operand (&ops
[2], model
);
5968 if (maybe_expand_insn (icode
, 3, ops
))
5969 return ops
[0].value
;
5973 /* This function expands the legacy _sync_lock test_and_set operation which is
5974 generally an atomic exchange. Some limited targets only allow the
5975 constant 1 to be stored. This is an ACQUIRE operation.
5977 TARGET is an optional place to stick the return value.
5978 MEM is where VAL is stored. */
5981 expand_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
)
5985 /* Try an atomic_exchange first. */
5986 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, MEMMODEL_SYNC_ACQUIRE
);
5990 ret
= maybe_emit_sync_lock_test_and_set (target
, mem
, val
,
5991 MEMMODEL_SYNC_ACQUIRE
);
5995 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
5999 /* If there are no other options, try atomic_test_and_set if the value
6000 being stored is 1. */
6001 if (val
== const1_rtx
)
6002 ret
= maybe_emit_atomic_test_and_set (target
, mem
, MEMMODEL_SYNC_ACQUIRE
);
6007 /* This function expands the atomic test_and_set operation:
6008 atomically store a boolean TRUE into MEM and return the previous value.
6010 MEMMODEL is the memory model variant to use.
6011 TARGET is an optional place to stick the return value. */
6014 expand_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
6016 machine_mode mode
= GET_MODE (mem
);
6017 rtx ret
, trueval
, subtarget
;
6019 ret
= maybe_emit_atomic_test_and_set (target
, mem
, model
);
6023 /* Be binary compatible with non-default settings of trueval, and different
6024 cpu revisions. E.g. one revision may have atomic-test-and-set, but
6025 another only has atomic-exchange. */
6026 if (targetm
.atomic_test_and_set_trueval
== 1)
6028 trueval
= const1_rtx
;
6029 subtarget
= target
? target
: gen_reg_rtx (mode
);
6033 trueval
= gen_int_mode (targetm
.atomic_test_and_set_trueval
, mode
);
6034 subtarget
= gen_reg_rtx (mode
);
6037 /* Try the atomic-exchange optab... */
6038 ret
= maybe_emit_atomic_exchange (subtarget
, mem
, trueval
, model
);
6040 /* ... then an atomic-compare-and-swap loop ... */
6042 ret
= maybe_emit_compare_and_swap_exchange_loop (subtarget
, mem
, trueval
);
6044 /* ... before trying the vaguely defined legacy lock_test_and_set. */
6046 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, trueval
, model
);
6048 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
6049 things with the value 1. Thus we try again without trueval. */
6050 if (!ret
&& targetm
.atomic_test_and_set_trueval
!= 1)
6051 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, const1_rtx
, model
);
6053 /* Failing all else, assume a single threaded environment and simply
6054 perform the operation. */
6057 /* If the result is ignored skip the move to target. */
6058 if (subtarget
!= const0_rtx
)
6059 emit_move_insn (subtarget
, mem
);
6061 emit_move_insn (mem
, trueval
);
6065 /* Recall that have to return a boolean value; rectify if trueval
6066 is not exactly one. */
6067 if (targetm
.atomic_test_and_set_trueval
!= 1)
6068 ret
= emit_store_flag_force (target
, NE
, ret
, const0_rtx
, mode
, 0, 1);
6073 /* This function expands the atomic exchange operation:
6074 atomically store VAL in MEM and return the previous value in MEM.
6076 MEMMODEL is the memory model variant to use.
6077 TARGET is an optional place to stick the return value. */
6080 expand_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
6082 machine_mode mode
= GET_MODE (mem
);
6085 /* If loads are not atomic for the required size and we are not called to
6086 provide a __sync builtin, do not do anything so that we stay consistent
6087 with atomic loads of the same size. */
6088 if (!can_atomic_load_p (mode
) && !is_mm_sync (model
))
6091 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6093 /* Next try a compare-and-swap loop for the exchange. */
6095 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
6100 /* This function expands the atomic compare exchange operation:
6102 *PTARGET_BOOL is an optional place to store the boolean success/failure.
6103 *PTARGET_OVAL is an optional place to store the old value from memory.
6104 Both target parameters may be NULL or const0_rtx to indicate that we do
6105 not care about that return value. Both target parameters are updated on
6106 success to the actual location of the corresponding result.
6108 MEMMODEL is the memory model variant to use.
6110 The return value of the function is true for success. */
6113 expand_atomic_compare_and_swap (rtx
*ptarget_bool
, rtx
*ptarget_oval
,
6114 rtx mem
, rtx expected
, rtx desired
,
6115 bool is_weak
, enum memmodel succ_model
,
6116 enum memmodel fail_model
)
6118 machine_mode mode
= GET_MODE (mem
);
6119 struct expand_operand ops
[8];
6120 enum insn_code icode
;
6121 rtx target_oval
, target_bool
= NULL_RTX
;
6124 /* If loads are not atomic for the required size and we are not called to
6125 provide a __sync builtin, do not do anything so that we stay consistent
6126 with atomic loads of the same size. */
6127 if (!can_atomic_load_p (mode
) && !is_mm_sync (succ_model
))
6130 /* Load expected into a register for the compare and swap. */
6131 if (MEM_P (expected
))
6132 expected
= copy_to_reg (expected
);
6134 /* Make sure we always have some place to put the return oldval.
6135 Further, make sure that place is distinct from the input expected,
6136 just in case we need that path down below. */
6137 if (ptarget_oval
&& *ptarget_oval
== const0_rtx
)
6138 ptarget_oval
= NULL
;
6140 if (ptarget_oval
== NULL
6141 || (target_oval
= *ptarget_oval
) == NULL
6142 || reg_overlap_mentioned_p (expected
, target_oval
))
6143 target_oval
= gen_reg_rtx (mode
);
6145 icode
= direct_optab_handler (atomic_compare_and_swap_optab
, mode
);
6146 if (icode
!= CODE_FOR_nothing
)
6148 machine_mode bool_mode
= insn_data
[icode
].operand
[0].mode
;
6150 if (ptarget_bool
&& *ptarget_bool
== const0_rtx
)
6151 ptarget_bool
= NULL
;
6153 /* Make sure we always have a place for the bool operand. */
6154 if (ptarget_bool
== NULL
6155 || (target_bool
= *ptarget_bool
) == NULL
6156 || GET_MODE (target_bool
) != bool_mode
)
6157 target_bool
= gen_reg_rtx (bool_mode
);
6159 /* Emit the compare_and_swap. */
6160 create_output_operand (&ops
[0], target_bool
, bool_mode
);
6161 create_output_operand (&ops
[1], target_oval
, mode
);
6162 create_fixed_operand (&ops
[2], mem
);
6163 create_input_operand (&ops
[3], expected
, mode
);
6164 create_input_operand (&ops
[4], desired
, mode
);
6165 create_integer_operand (&ops
[5], is_weak
);
6166 create_integer_operand (&ops
[6], succ_model
);
6167 create_integer_operand (&ops
[7], fail_model
);
6168 if (maybe_expand_insn (icode
, 8, ops
))
6170 /* Return success/failure. */
6171 target_bool
= ops
[0].value
;
6172 target_oval
= ops
[1].value
;
6177 /* Otherwise fall back to the original __sync_val_compare_and_swap
6178 which is always seq-cst. */
6179 icode
= optab_handler (sync_compare_and_swap_optab
, mode
);
6180 if (icode
!= CODE_FOR_nothing
)
6184 create_output_operand (&ops
[0], target_oval
, mode
);
6185 create_fixed_operand (&ops
[1], mem
);
6186 create_input_operand (&ops
[2], expected
, mode
);
6187 create_input_operand (&ops
[3], desired
, mode
);
6188 if (!maybe_expand_insn (icode
, 4, ops
))
6191 target_oval
= ops
[0].value
;
6193 /* If the caller isn't interested in the boolean return value,
6194 skip the computation of it. */
6195 if (ptarget_bool
== NULL
)
6198 /* Otherwise, work out if the compare-and-swap succeeded. */
6200 if (have_insn_for (COMPARE
, CCmode
))
6201 note_stores (PATTERN (get_last_insn ()), find_cc_set
, &cc_reg
);
6204 target_bool
= emit_store_flag_force (target_bool
, EQ
, cc_reg
,
6205 const0_rtx
, VOIDmode
, 0, 1);
6208 goto success_bool_from_val
;
6211 /* Also check for library support for __sync_val_compare_and_swap. */
6212 libfunc
= optab_libfunc (sync_compare_and_swap_optab
, mode
);
6213 if (libfunc
!= NULL
)
6215 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
6216 rtx target
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
6217 mode
, 3, addr
, ptr_mode
,
6218 expected
, mode
, desired
, mode
);
6219 emit_move_insn (target_oval
, target
);
6221 /* Compute the boolean return value only if requested. */
6223 goto success_bool_from_val
;
6231 success_bool_from_val
:
6232 target_bool
= emit_store_flag_force (target_bool
, EQ
, target_oval
,
6233 expected
, VOIDmode
, 1, 1);
6235 /* Make sure that the oval output winds up where the caller asked. */
6237 *ptarget_oval
= target_oval
;
6239 *ptarget_bool
= target_bool
;
6243 /* Generate asm volatile("" : : : "memory") as the memory barrier. */
6246 expand_asm_memory_barrier (void)
6250 asm_op
= gen_rtx_ASM_OPERANDS (VOIDmode
, "", "", 0,
6251 rtvec_alloc (0), rtvec_alloc (0),
6252 rtvec_alloc (0), UNKNOWN_LOCATION
);
6253 MEM_VOLATILE_P (asm_op
) = 1;
6255 clob
= gen_rtx_SCRATCH (VOIDmode
);
6256 clob
= gen_rtx_MEM (BLKmode
, clob
);
6257 clob
= gen_rtx_CLOBBER (VOIDmode
, clob
);
6259 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, asm_op
, clob
)));
6262 /* This routine will either emit the mem_thread_fence pattern or issue a
6263 sync_synchronize to generate a fence for memory model MEMMODEL. */
6266 expand_mem_thread_fence (enum memmodel model
)
6268 if (is_mm_relaxed (model
))
6270 if (targetm
.have_mem_thread_fence ())
6272 emit_insn (targetm
.gen_mem_thread_fence (GEN_INT (model
)));
6273 expand_asm_memory_barrier ();
6275 else if (targetm
.have_memory_barrier ())
6276 emit_insn (targetm
.gen_memory_barrier ());
6277 else if (synchronize_libfunc
!= NULL_RTX
)
6278 emit_library_call (synchronize_libfunc
, LCT_NORMAL
, VOIDmode
, 0);
6280 expand_asm_memory_barrier ();
6283 /* This routine will either emit the mem_signal_fence pattern or issue a
6284 sync_synchronize to generate a fence for memory model MEMMODEL. */
6287 expand_mem_signal_fence (enum memmodel model
)
6289 if (targetm
.have_mem_signal_fence ())
6290 emit_insn (targetm
.gen_mem_signal_fence (GEN_INT (model
)));
6291 else if (!is_mm_relaxed (model
))
6293 /* By default targets are coherent between a thread and the signal
6294 handler running on the same thread. Thus this really becomes a
6295 compiler barrier, in that stores must not be sunk past
6296 (or raised above) a given point. */
6297 expand_asm_memory_barrier ();
6301 /* This function expands the atomic load operation:
6302 return the atomically loaded value in MEM.
6304 MEMMODEL is the memory model variant to use.
6305 TARGET is an option place to stick the return value. */
6308 expand_atomic_load (rtx target
, rtx mem
, enum memmodel model
)
6310 machine_mode mode
= GET_MODE (mem
);
6311 enum insn_code icode
;
6313 /* If the target supports the load directly, great. */
6314 icode
= direct_optab_handler (atomic_load_optab
, mode
);
6315 if (icode
!= CODE_FOR_nothing
)
6317 struct expand_operand ops
[3];
6319 create_output_operand (&ops
[0], target
, mode
);
6320 create_fixed_operand (&ops
[1], mem
);
6321 create_integer_operand (&ops
[2], model
);
6322 if (maybe_expand_insn (icode
, 3, ops
))
6323 return ops
[0].value
;
6326 /* If the size of the object is greater than word size on this target,
6327 then we assume that a load will not be atomic. We could try to
6328 emulate a load with a compare-and-swap operation, but the store that
6329 doing this could result in would be incorrect if this is a volatile
6330 atomic load or targetting read-only-mapped memory. */
6331 if (GET_MODE_PRECISION (mode
) > BITS_PER_WORD
)
6332 /* If there is no atomic load, leave the library call. */
6335 /* Otherwise assume loads are atomic, and emit the proper barriers. */
6336 if (!target
|| target
== const0_rtx
)
6337 target
= gen_reg_rtx (mode
);
6339 /* For SEQ_CST, emit a barrier before the load. */
6340 if (is_mm_seq_cst (model
))
6341 expand_mem_thread_fence (model
);
6343 emit_move_insn (target
, mem
);
6345 /* Emit the appropriate barrier after the load. */
6346 expand_mem_thread_fence (model
);
6351 /* This function expands the atomic store operation:
6352 Atomically store VAL in MEM.
6353 MEMMODEL is the memory model variant to use.
6354 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
6355 function returns const0_rtx if a pattern was emitted. */
6358 expand_atomic_store (rtx mem
, rtx val
, enum memmodel model
, bool use_release
)
6360 machine_mode mode
= GET_MODE (mem
);
6361 enum insn_code icode
;
6362 struct expand_operand ops
[3];
6364 /* If the target supports the store directly, great. */
6365 icode
= direct_optab_handler (atomic_store_optab
, mode
);
6366 if (icode
!= CODE_FOR_nothing
)
6368 create_fixed_operand (&ops
[0], mem
);
6369 create_input_operand (&ops
[1], val
, mode
);
6370 create_integer_operand (&ops
[2], model
);
6371 if (maybe_expand_insn (icode
, 3, ops
))
6375 /* If using __sync_lock_release is a viable alternative, try it.
6376 Note that this will not be set to true if we are expanding a generic
6377 __atomic_store_n. */
6380 icode
= direct_optab_handler (sync_lock_release_optab
, mode
);
6381 if (icode
!= CODE_FOR_nothing
)
6383 create_fixed_operand (&ops
[0], mem
);
6384 create_input_operand (&ops
[1], const0_rtx
, mode
);
6385 if (maybe_expand_insn (icode
, 2, ops
))
6387 /* lock_release is only a release barrier. */
6388 if (is_mm_seq_cst (model
))
6389 expand_mem_thread_fence (model
);
6395 /* If the size of the object is greater than word size on this target,
6396 a default store will not be atomic. */
6397 if (GET_MODE_PRECISION (mode
) > BITS_PER_WORD
)
6399 /* If loads are atomic or we are called to provide a __sync builtin,
6400 we can try a atomic_exchange and throw away the result. Otherwise,
6401 don't do anything so that we do not create an inconsistency between
6402 loads and stores. */
6403 if (can_atomic_load_p (mode
) || is_mm_sync (model
))
6405 rtx target
= maybe_emit_atomic_exchange (NULL_RTX
, mem
, val
, model
);
6407 target
= maybe_emit_compare_and_swap_exchange_loop (NULL_RTX
, mem
,
6415 /* Otherwise assume stores are atomic, and emit the proper barriers. */
6416 expand_mem_thread_fence (model
);
6418 emit_move_insn (mem
, val
);
6420 /* For SEQ_CST, also emit a barrier after the store. */
6421 if (is_mm_seq_cst (model
))
6422 expand_mem_thread_fence (model
);
6428 /* Structure containing the pointers and values required to process the
6429 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
6431 struct atomic_op_functions
6433 direct_optab mem_fetch_before
;
6434 direct_optab mem_fetch_after
;
6435 direct_optab mem_no_result
;
6438 direct_optab no_result
;
6439 enum rtx_code reverse_code
;
6443 /* Fill in structure pointed to by OP with the various optab entries for an
6444 operation of type CODE. */
6447 get_atomic_op_for_code (struct atomic_op_functions
*op
, enum rtx_code code
)
6449 gcc_assert (op
!= NULL
);
6451 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
6452 in the source code during compilation, and the optab entries are not
6453 computable until runtime. Fill in the values at runtime. */
6457 op
->mem_fetch_before
= atomic_fetch_add_optab
;
6458 op
->mem_fetch_after
= atomic_add_fetch_optab
;
6459 op
->mem_no_result
= atomic_add_optab
;
6460 op
->fetch_before
= sync_old_add_optab
;
6461 op
->fetch_after
= sync_new_add_optab
;
6462 op
->no_result
= sync_add_optab
;
6463 op
->reverse_code
= MINUS
;
6466 op
->mem_fetch_before
= atomic_fetch_sub_optab
;
6467 op
->mem_fetch_after
= atomic_sub_fetch_optab
;
6468 op
->mem_no_result
= atomic_sub_optab
;
6469 op
->fetch_before
= sync_old_sub_optab
;
6470 op
->fetch_after
= sync_new_sub_optab
;
6471 op
->no_result
= sync_sub_optab
;
6472 op
->reverse_code
= PLUS
;
6475 op
->mem_fetch_before
= atomic_fetch_xor_optab
;
6476 op
->mem_fetch_after
= atomic_xor_fetch_optab
;
6477 op
->mem_no_result
= atomic_xor_optab
;
6478 op
->fetch_before
= sync_old_xor_optab
;
6479 op
->fetch_after
= sync_new_xor_optab
;
6480 op
->no_result
= sync_xor_optab
;
6481 op
->reverse_code
= XOR
;
6484 op
->mem_fetch_before
= atomic_fetch_and_optab
;
6485 op
->mem_fetch_after
= atomic_and_fetch_optab
;
6486 op
->mem_no_result
= atomic_and_optab
;
6487 op
->fetch_before
= sync_old_and_optab
;
6488 op
->fetch_after
= sync_new_and_optab
;
6489 op
->no_result
= sync_and_optab
;
6490 op
->reverse_code
= UNKNOWN
;
6493 op
->mem_fetch_before
= atomic_fetch_or_optab
;
6494 op
->mem_fetch_after
= atomic_or_fetch_optab
;
6495 op
->mem_no_result
= atomic_or_optab
;
6496 op
->fetch_before
= sync_old_ior_optab
;
6497 op
->fetch_after
= sync_new_ior_optab
;
6498 op
->no_result
= sync_ior_optab
;
6499 op
->reverse_code
= UNKNOWN
;
6502 op
->mem_fetch_before
= atomic_fetch_nand_optab
;
6503 op
->mem_fetch_after
= atomic_nand_fetch_optab
;
6504 op
->mem_no_result
= atomic_nand_optab
;
6505 op
->fetch_before
= sync_old_nand_optab
;
6506 op
->fetch_after
= sync_new_nand_optab
;
6507 op
->no_result
= sync_nand_optab
;
6508 op
->reverse_code
= UNKNOWN
;
6515 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
6516 using memory order MODEL. If AFTER is true the operation needs to return
6517 the value of *MEM after the operation, otherwise the previous value.
6518 TARGET is an optional place to place the result. The result is unused if
6520 Return the result if there is a better sequence, otherwise NULL_RTX. */
6523 maybe_optimize_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
6524 enum memmodel model
, bool after
)
6526 /* If the value is prefetched, or not used, it may be possible to replace
6527 the sequence with a native exchange operation. */
6528 if (!after
|| target
== const0_rtx
)
6530 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
6531 if (code
== AND
&& val
== const0_rtx
)
6533 if (target
== const0_rtx
)
6534 target
= gen_reg_rtx (GET_MODE (mem
));
6535 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6538 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
6539 if (code
== IOR
&& val
== constm1_rtx
)
6541 if (target
== const0_rtx
)
6542 target
= gen_reg_rtx (GET_MODE (mem
));
6543 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6550 /* Try to emit an instruction for a specific operation varaition.
6551 OPTAB contains the OP functions.
6552 TARGET is an optional place to return the result. const0_rtx means unused.
6553 MEM is the memory location to operate on.
6554 VAL is the value to use in the operation.
6555 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
6556 MODEL is the memory model, if used.
6557 AFTER is true if the returned result is the value after the operation. */
6560 maybe_emit_op (const struct atomic_op_functions
*optab
, rtx target
, rtx mem
,
6561 rtx val
, bool use_memmodel
, enum memmodel model
, bool after
)
6563 machine_mode mode
= GET_MODE (mem
);
6564 struct expand_operand ops
[4];
6565 enum insn_code icode
;
6569 /* Check to see if there is a result returned. */
6570 if (target
== const0_rtx
)
6574 icode
= direct_optab_handler (optab
->mem_no_result
, mode
);
6575 create_integer_operand (&ops
[2], model
);
6580 icode
= direct_optab_handler (optab
->no_result
, mode
);
6584 /* Otherwise, we need to generate a result. */
6589 icode
= direct_optab_handler (after
? optab
->mem_fetch_after
6590 : optab
->mem_fetch_before
, mode
);
6591 create_integer_operand (&ops
[3], model
);
6596 icode
= optab_handler (after
? optab
->fetch_after
6597 : optab
->fetch_before
, mode
);
6600 create_output_operand (&ops
[op_counter
++], target
, mode
);
6602 if (icode
== CODE_FOR_nothing
)
6605 create_fixed_operand (&ops
[op_counter
++], mem
);
6606 /* VAL may have been promoted to a wider mode. Shrink it if so. */
6607 create_convert_operand_to (&ops
[op_counter
++], val
, mode
, true);
6609 if (maybe_expand_insn (icode
, num_ops
, ops
))
6610 return (target
== const0_rtx
? const0_rtx
: ops
[0].value
);
6616 /* This function expands an atomic fetch_OP or OP_fetch operation:
6617 TARGET is an option place to stick the return value. const0_rtx indicates
6618 the result is unused.
6619 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6620 CODE is the operation being performed (OP)
6621 MEMMODEL is the memory model variant to use.
6622 AFTER is true to return the result of the operation (OP_fetch).
6623 AFTER is false to return the value before the operation (fetch_OP).
6625 This function will *only* generate instructions if there is a direct
6626 optab. No compare and swap loops or libcalls will be generated. */
6629 expand_atomic_fetch_op_no_fallback (rtx target
, rtx mem
, rtx val
,
6630 enum rtx_code code
, enum memmodel model
,
6633 machine_mode mode
= GET_MODE (mem
);
6634 struct atomic_op_functions optab
;
6636 bool unused_result
= (target
== const0_rtx
);
6638 get_atomic_op_for_code (&optab
, code
);
6640 /* Check to see if there are any better instructions. */
6641 result
= maybe_optimize_fetch_op (target
, mem
, val
, code
, model
, after
);
6645 /* Check for the case where the result isn't used and try those patterns. */
6648 /* Try the memory model variant first. */
6649 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, true);
6653 /* Next try the old style withuot a memory model. */
6654 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, true);
6658 /* There is no no-result pattern, so try patterns with a result. */
6662 /* Try the __atomic version. */
6663 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, after
);
6667 /* Try the older __sync version. */
6668 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, after
);
6672 /* If the fetch value can be calculated from the other variation of fetch,
6673 try that operation. */
6674 if (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
)
6676 /* Try the __atomic version, then the older __sync version. */
6677 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, !after
);
6679 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, !after
);
6683 /* If the result isn't used, no need to do compensation code. */
6687 /* Issue compensation code. Fetch_after == fetch_before OP val.
6688 Fetch_before == after REVERSE_OP val. */
6690 code
= optab
.reverse_code
;
6693 result
= expand_simple_binop (mode
, AND
, result
, val
, NULL_RTX
,
6694 true, OPTAB_LIB_WIDEN
);
6695 result
= expand_simple_unop (mode
, NOT
, result
, target
, true);
6698 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
6699 true, OPTAB_LIB_WIDEN
);
6704 /* No direct opcode can be generated. */
6710 /* This function expands an atomic fetch_OP or OP_fetch operation:
6711 TARGET is an option place to stick the return value. const0_rtx indicates
6712 the result is unused.
6713 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6714 CODE is the operation being performed (OP)
6715 MEMMODEL is the memory model variant to use.
6716 AFTER is true to return the result of the operation (OP_fetch).
6717 AFTER is false to return the value before the operation (fetch_OP). */
6719 expand_atomic_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
6720 enum memmodel model
, bool after
)
6722 machine_mode mode
= GET_MODE (mem
);
6724 bool unused_result
= (target
== const0_rtx
);
6726 /* If loads are not atomic for the required size and we are not called to
6727 provide a __sync builtin, do not do anything so that we stay consistent
6728 with atomic loads of the same size. */
6729 if (!can_atomic_load_p (mode
) && !is_mm_sync (model
))
6732 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, val
, code
, model
,
6738 /* Add/sub can be implemented by doing the reverse operation with -(val). */
6739 if (code
== PLUS
|| code
== MINUS
)
6742 enum rtx_code reverse
= (code
== PLUS
? MINUS
: PLUS
);
6745 tmp
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, true);
6746 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, tmp
, reverse
,
6750 /* PLUS worked so emit the insns and return. */
6757 /* PLUS did not work, so throw away the negation code and continue. */
6761 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
6762 if (!can_compare_and_swap_p (mode
, false))
6766 enum rtx_code orig_code
= code
;
6767 struct atomic_op_functions optab
;
6769 get_atomic_op_for_code (&optab
, code
);
6770 libfunc
= optab_libfunc (after
? optab
.fetch_after
6771 : optab
.fetch_before
, mode
);
6773 && (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
))
6777 code
= optab
.reverse_code
;
6778 libfunc
= optab_libfunc (after
? optab
.fetch_before
6779 : optab
.fetch_after
, mode
);
6781 if (libfunc
!= NULL
)
6783 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
6784 result
= emit_library_call_value (libfunc
, NULL
, LCT_NORMAL
, mode
,
6785 2, addr
, ptr_mode
, val
, mode
);
6787 if (!unused_result
&& fixup
)
6788 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
6789 true, OPTAB_LIB_WIDEN
);
6793 /* We need the original code for any further attempts. */
6797 /* If nothing else has succeeded, default to a compare and swap loop. */
6798 if (can_compare_and_swap_p (mode
, true))
6801 rtx t0
= gen_reg_rtx (mode
), t1
;
6805 /* If the result is used, get a register for it. */
6808 if (!target
|| !register_operand (target
, mode
))
6809 target
= gen_reg_rtx (mode
);
6810 /* If fetch_before, copy the value now. */
6812 emit_move_insn (target
, t0
);
6815 target
= const0_rtx
;
6820 t1
= expand_simple_binop (mode
, AND
, t1
, val
, NULL_RTX
,
6821 true, OPTAB_LIB_WIDEN
);
6822 t1
= expand_simple_unop (mode
, code
, t1
, NULL_RTX
, true);
6825 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
, true,
6828 /* For after, copy the value now. */
6829 if (!unused_result
&& after
)
6830 emit_move_insn (target
, t1
);
6831 insn
= get_insns ();
6834 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
6841 /* Return true if OPERAND is suitable for operand number OPNO of
6842 instruction ICODE. */
6845 insn_operand_matches (enum insn_code icode
, unsigned int opno
, rtx operand
)
6847 return (!insn_data
[(int) icode
].operand
[opno
].predicate
6848 || (insn_data
[(int) icode
].operand
[opno
].predicate
6849 (operand
, insn_data
[(int) icode
].operand
[opno
].mode
)));
6852 /* TARGET is a target of a multiword operation that we are going to
6853 implement as a series of word-mode operations. Return true if
6854 TARGET is suitable for this purpose. */
6857 valid_multiword_target_p (rtx target
)
6862 mode
= GET_MODE (target
);
6863 for (i
= 0; i
< GET_MODE_SIZE (mode
); i
+= UNITS_PER_WORD
)
6864 if (!validate_subreg (word_mode
, mode
, target
, i
))
6869 /* Like maybe_legitimize_operand, but do not change the code of the
6870 current rtx value. */
6873 maybe_legitimize_operand_same_code (enum insn_code icode
, unsigned int opno
,
6874 struct expand_operand
*op
)
6876 /* See if the operand matches in its current form. */
6877 if (insn_operand_matches (icode
, opno
, op
->value
))
6880 /* If the operand is a memory whose address has no side effects,
6881 try forcing the address into a non-virtual pseudo register.
6882 The check for side effects is important because copy_to_mode_reg
6883 cannot handle things like auto-modified addresses. */
6884 if (insn_data
[(int) icode
].operand
[opno
].allows_mem
&& MEM_P (op
->value
))
6889 addr
= XEXP (mem
, 0);
6890 if (!(REG_P (addr
) && REGNO (addr
) > LAST_VIRTUAL_REGISTER
)
6891 && !side_effects_p (addr
))
6896 last
= get_last_insn ();
6897 mode
= get_address_mode (mem
);
6898 mem
= replace_equiv_address (mem
, copy_to_mode_reg (mode
, addr
));
6899 if (insn_operand_matches (icode
, opno
, mem
))
6904 delete_insns_since (last
);
6911 /* Try to make OP match operand OPNO of instruction ICODE. Return true
6912 on success, storing the new operand value back in OP. */
6915 maybe_legitimize_operand (enum insn_code icode
, unsigned int opno
,
6916 struct expand_operand
*op
)
6918 machine_mode mode
, imode
;
6919 bool old_volatile_ok
, result
;
6925 old_volatile_ok
= volatile_ok
;
6927 result
= maybe_legitimize_operand_same_code (icode
, opno
, op
);
6928 volatile_ok
= old_volatile_ok
;
6932 gcc_assert (mode
!= VOIDmode
);
6934 && op
->value
!= const0_rtx
6935 && GET_MODE (op
->value
) == mode
6936 && maybe_legitimize_operand_same_code (icode
, opno
, op
))
6939 op
->value
= gen_reg_rtx (mode
);
6945 gcc_assert (mode
!= VOIDmode
);
6946 gcc_assert (GET_MODE (op
->value
) == VOIDmode
6947 || GET_MODE (op
->value
) == mode
);
6948 if (maybe_legitimize_operand_same_code (icode
, opno
, op
))
6951 op
->value
= copy_to_mode_reg (mode
, op
->value
);
6954 case EXPAND_CONVERT_TO
:
6955 gcc_assert (mode
!= VOIDmode
);
6956 op
->value
= convert_to_mode (mode
, op
->value
, op
->unsigned_p
);
6959 case EXPAND_CONVERT_FROM
:
6960 if (GET_MODE (op
->value
) != VOIDmode
)
6961 mode
= GET_MODE (op
->value
);
6963 /* The caller must tell us what mode this value has. */
6964 gcc_assert (mode
!= VOIDmode
);
6966 imode
= insn_data
[(int) icode
].operand
[opno
].mode
;
6967 if (imode
!= VOIDmode
&& imode
!= mode
)
6969 op
->value
= convert_modes (imode
, mode
, op
->value
, op
->unsigned_p
);
6974 case EXPAND_ADDRESS
:
6975 gcc_assert (mode
!= VOIDmode
);
6976 op
->value
= convert_memory_address (mode
, op
->value
);
6979 case EXPAND_INTEGER
:
6980 mode
= insn_data
[(int) icode
].operand
[opno
].mode
;
6981 if (mode
!= VOIDmode
&& const_int_operand (op
->value
, mode
))
6985 return insn_operand_matches (icode
, opno
, op
->value
);
6988 /* Make OP describe an input operand that should have the same value
6989 as VALUE, after any mode conversion that the target might request.
6990 TYPE is the type of VALUE. */
6993 create_convert_operand_from_type (struct expand_operand
*op
,
6994 rtx value
, tree type
)
6996 create_convert_operand_from (op
, value
, TYPE_MODE (type
),
6997 TYPE_UNSIGNED (type
));
7000 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
7001 of instruction ICODE. Return true on success, leaving the new operand
7002 values in the OPS themselves. Emit no code on failure. */
7005 maybe_legitimize_operands (enum insn_code icode
, unsigned int opno
,
7006 unsigned int nops
, struct expand_operand
*ops
)
7011 last
= get_last_insn ();
7012 for (i
= 0; i
< nops
; i
++)
7013 if (!maybe_legitimize_operand (icode
, opno
+ i
, &ops
[i
]))
7015 delete_insns_since (last
);
7021 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
7022 as its operands. Return the instruction pattern on success,
7023 and emit any necessary set-up code. Return null and emit no
7027 maybe_gen_insn (enum insn_code icode
, unsigned int nops
,
7028 struct expand_operand
*ops
)
7030 gcc_assert (nops
== (unsigned int) insn_data
[(int) icode
].n_generator_args
);
7031 if (!maybe_legitimize_operands (icode
, 0, nops
, ops
))
7037 return GEN_FCN (icode
) (ops
[0].value
);
7039 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
);
7041 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
);
7043 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7046 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7047 ops
[3].value
, ops
[4].value
);
7049 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7050 ops
[3].value
, ops
[4].value
, ops
[5].value
);
7052 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7053 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7056 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7057 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7058 ops
[6].value
, ops
[7].value
);
7060 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7061 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7062 ops
[6].value
, ops
[7].value
, ops
[8].value
);
7067 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7068 as its operands. Return true on success and emit no code on failure. */
7071 maybe_expand_insn (enum insn_code icode
, unsigned int nops
,
7072 struct expand_operand
*ops
)
7074 rtx_insn
*pat
= maybe_gen_insn (icode
, nops
, ops
);
7083 /* Like maybe_expand_insn, but for jumps. */
7086 maybe_expand_jump_insn (enum insn_code icode
, unsigned int nops
,
7087 struct expand_operand
*ops
)
7089 rtx_insn
*pat
= maybe_gen_insn (icode
, nops
, ops
);
7092 emit_jump_insn (pat
);
7098 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7102 expand_insn (enum insn_code icode
, unsigned int nops
,
7103 struct expand_operand
*ops
)
7105 if (!maybe_expand_insn (icode
, nops
, ops
))
7109 /* Like expand_insn, but for jumps. */
7112 expand_jump_insn (enum insn_code icode
, unsigned int nops
,
7113 struct expand_operand
*ops
)
7115 if (!maybe_expand_jump_insn (icode
, nops
, ops
))