1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
35 #include "diagnostic-core.h"
37 /* Include insn-config.h before expr.h so that HAVE_conditional_move
38 is properly defined. */
39 #include "stor-layout.h"
44 #include "optabs-tree.h"
47 static void prepare_float_lib_cmp (rtx
, rtx
, enum rtx_code
, rtx
*,
49 static rtx
expand_unop_direct (machine_mode
, optab
, rtx
, rtx
, int);
50 static void emit_libcall_block_1 (rtx_insn
*, rtx
, rtx
, rtx
, bool);
52 /* Debug facility for use in GDB. */
53 void debug_optab_libfuncs (void);
55 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
56 the result of operation CODE applied to OP0 (and OP1 if it is a binary
59 If the last insn does not set TARGET, don't do anything, but return 1.
61 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
62 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
63 try again, ensuring that TARGET is not one of the operands. */
66 add_equal_note (rtx_insn
*insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
72 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
74 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
75 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
76 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
77 && GET_RTX_CLASS (code
) != RTX_COMPARE
78 && GET_RTX_CLASS (code
) != RTX_UNARY
)
81 if (GET_CODE (target
) == ZERO_EXTRACT
)
84 for (last_insn
= insns
;
85 NEXT_INSN (last_insn
) != NULL_RTX
;
86 last_insn
= NEXT_INSN (last_insn
))
89 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
90 a value changing in the insn, so the note would be invalid for CSE. */
91 if (reg_overlap_mentioned_p (target
, op0
)
92 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
95 && (rtx_equal_p (target
, op0
)
96 || (op1
&& rtx_equal_p (target
, op1
))))
98 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
99 over expanding it as temp = MEM op X, MEM = temp. If the target
100 supports MEM = MEM op X instructions, it is sometimes too hard
101 to reconstruct that form later, especially if X is also a memory,
102 and due to multiple occurrences of addresses the address might
103 be forced into register unnecessarily.
104 Note that not emitting the REG_EQUIV note might inhibit
105 CSE in some cases. */
106 set
= single_set (last_insn
);
108 && GET_CODE (SET_SRC (set
)) == code
109 && MEM_P (SET_DEST (set
))
110 && (rtx_equal_p (SET_DEST (set
), XEXP (SET_SRC (set
), 0))
111 || (op1
&& rtx_equal_p (SET_DEST (set
),
112 XEXP (SET_SRC (set
), 1)))))
118 set
= set_for_reg_notes (last_insn
);
122 if (! rtx_equal_p (SET_DEST (set
), target
)
123 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
124 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
125 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
128 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
138 if (GET_MODE (op0
) != VOIDmode
&& GET_MODE (target
) != GET_MODE (op0
))
140 note
= gen_rtx_fmt_e (code
, GET_MODE (op0
), copy_rtx (op0
));
141 if (GET_MODE_SIZE (GET_MODE (op0
))
142 > GET_MODE_SIZE (GET_MODE (target
)))
143 note
= simplify_gen_unary (TRUNCATE
, GET_MODE (target
),
144 note
, GET_MODE (op0
));
146 note
= simplify_gen_unary (ZERO_EXTEND
, GET_MODE (target
),
147 note
, GET_MODE (op0
));
152 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
156 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
158 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
163 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
164 for a widening operation would be. In most cases this would be OP0, but if
165 that's a constant it'll be VOIDmode, which isn't useful. */
168 widened_mode (machine_mode to_mode
, rtx op0
, rtx op1
)
170 machine_mode m0
= GET_MODE (op0
);
171 machine_mode m1
= GET_MODE (op1
);
174 if (m0
== VOIDmode
&& m1
== VOIDmode
)
176 else if (m0
== VOIDmode
|| GET_MODE_SIZE (m0
) < GET_MODE_SIZE (m1
))
181 if (GET_MODE_SIZE (result
) > GET_MODE_SIZE (to_mode
))
187 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
188 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
189 not actually do a sign-extend or zero-extend, but can leave the
190 higher-order bits of the result rtx undefined, for example, in the case
191 of logical operations, but not right shifts. */
194 widen_operand (rtx op
, machine_mode mode
, machine_mode oldmode
,
195 int unsignedp
, int no_extend
)
199 /* If we don't have to extend and this is a constant, return it. */
200 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
203 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
204 extend since it will be more efficient to do so unless the signedness of
205 a promoted object differs from our extension. */
207 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
208 && SUBREG_CHECK_PROMOTED_SIGN (op
, unsignedp
)))
209 return convert_modes (mode
, oldmode
, op
, unsignedp
);
211 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
213 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
214 return gen_lowpart (mode
, force_reg (GET_MODE (op
), op
));
216 /* Otherwise, get an object of MODE, clobber it, and set the low-order
219 result
= gen_reg_rtx (mode
);
220 emit_clobber (result
);
221 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
225 /* Expand vector widening operations.
227 There are two different classes of operations handled here:
228 1) Operations whose result is wider than all the arguments to the operation.
229 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
230 In this case OP0 and optionally OP1 would be initialized,
231 but WIDE_OP wouldn't (not relevant for this case).
232 2) Operations whose result is of the same size as the last argument to the
233 operation, but wider than all the other arguments to the operation.
234 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
235 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
237 E.g, when called to expand the following operations, this is how
238 the arguments will be initialized:
240 widening-sum 2 oprnd0 - oprnd1
241 widening-dot-product 3 oprnd0 oprnd1 oprnd2
242 widening-mult 2 oprnd0 oprnd1 -
243 type-promotion (vec-unpack) 1 oprnd0 - - */
246 expand_widen_pattern_expr (sepops ops
, rtx op0
, rtx op1
, rtx wide_op
,
247 rtx target
, int unsignedp
)
249 struct expand_operand eops
[4];
250 tree oprnd0
, oprnd1
, oprnd2
;
251 machine_mode wmode
= VOIDmode
, tmode0
, tmode1
= VOIDmode
;
252 optab widen_pattern_optab
;
253 enum insn_code icode
;
254 int nops
= TREE_CODE_LENGTH (ops
->code
);
258 tmode0
= TYPE_MODE (TREE_TYPE (oprnd0
));
259 widen_pattern_optab
=
260 optab_for_tree_code (ops
->code
, TREE_TYPE (oprnd0
), optab_default
);
261 if (ops
->code
== WIDEN_MULT_PLUS_EXPR
262 || ops
->code
== WIDEN_MULT_MINUS_EXPR
)
263 icode
= find_widening_optab_handler (widen_pattern_optab
,
264 TYPE_MODE (TREE_TYPE (ops
->op2
)),
267 icode
= optab_handler (widen_pattern_optab
, tmode0
);
268 gcc_assert (icode
!= CODE_FOR_nothing
);
273 tmode1
= TYPE_MODE (TREE_TYPE (oprnd1
));
276 /* The last operand is of a wider mode than the rest of the operands. */
281 gcc_assert (tmode1
== tmode0
);
284 wmode
= TYPE_MODE (TREE_TYPE (oprnd2
));
288 create_output_operand (&eops
[op
++], target
, TYPE_MODE (ops
->type
));
289 create_convert_operand_from (&eops
[op
++], op0
, tmode0
, unsignedp
);
291 create_convert_operand_from (&eops
[op
++], op1
, tmode1
, unsignedp
);
293 create_convert_operand_from (&eops
[op
++], wide_op
, wmode
, unsignedp
);
294 expand_insn (icode
, op
, eops
);
295 return eops
[0].value
;
298 /* Generate code to perform an operation specified by TERNARY_OPTAB
299 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
301 UNSIGNEDP is for the case where we have to widen the operands
302 to perform the operation. It says to use zero-extension.
304 If TARGET is nonzero, the value
305 is generated there, if it is convenient to do so.
306 In all cases an rtx is returned for the locus of the value;
307 this may or may not be TARGET. */
310 expand_ternary_op (machine_mode mode
, optab ternary_optab
, rtx op0
,
311 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
313 struct expand_operand ops
[4];
314 enum insn_code icode
= optab_handler (ternary_optab
, mode
);
316 gcc_assert (optab_handler (ternary_optab
, mode
) != CODE_FOR_nothing
);
318 create_output_operand (&ops
[0], target
, mode
);
319 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
320 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
321 create_convert_operand_from (&ops
[3], op2
, mode
, unsignedp
);
322 expand_insn (icode
, 4, ops
);
327 /* Like expand_binop, but return a constant rtx if the result can be
328 calculated at compile time. The arguments and return value are
329 otherwise the same as for expand_binop. */
332 simplify_expand_binop (machine_mode mode
, optab binoptab
,
333 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
334 enum optab_methods methods
)
336 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
338 rtx x
= simplify_binary_operation (optab_to_code (binoptab
),
344 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
347 /* Like simplify_expand_binop, but always put the result in TARGET.
348 Return true if the expansion succeeded. */
351 force_expand_binop (machine_mode mode
, optab binoptab
,
352 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
353 enum optab_methods methods
)
355 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
356 target
, unsignedp
, methods
);
360 emit_move_insn (target
, x
);
364 /* Create a new vector value in VMODE with all elements set to OP. The
365 mode of OP must be the element mode of VMODE. If OP is a constant,
366 then the return value will be a constant. */
369 expand_vector_broadcast (machine_mode vmode
, rtx op
)
371 enum insn_code icode
;
376 gcc_checking_assert (VECTOR_MODE_P (vmode
));
378 n
= GET_MODE_NUNITS (vmode
);
379 vec
= rtvec_alloc (n
);
380 for (i
= 0; i
< n
; ++i
)
381 RTVEC_ELT (vec
, i
) = op
;
384 return gen_rtx_CONST_VECTOR (vmode
, vec
);
386 /* ??? If the target doesn't have a vec_init, then we have no easy way
387 of performing this operation. Most of this sort of generic support
388 is hidden away in the vector lowering support in gimple. */
389 icode
= optab_handler (vec_init_optab
, vmode
);
390 if (icode
== CODE_FOR_nothing
)
393 ret
= gen_reg_rtx (vmode
);
394 emit_insn (GEN_FCN (icode
) (ret
, gen_rtx_PARALLEL (vmode
, vec
)));
399 /* This subroutine of expand_doubleword_shift handles the cases in which
400 the effective shift value is >= BITS_PER_WORD. The arguments and return
401 value are the same as for the parent routine, except that SUPERWORD_OP1
402 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
403 INTO_TARGET may be null if the caller has decided to calculate it. */
406 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
407 rtx outof_target
, rtx into_target
,
408 int unsignedp
, enum optab_methods methods
)
410 if (into_target
!= 0)
411 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
412 into_target
, unsignedp
, methods
))
415 if (outof_target
!= 0)
417 /* For a signed right shift, we must fill OUTOF_TARGET with copies
418 of the sign bit, otherwise we must fill it with zeros. */
419 if (binoptab
!= ashr_optab
)
420 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
422 if (!force_expand_binop (word_mode
, binoptab
,
423 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
424 outof_target
, unsignedp
, methods
))
430 /* This subroutine of expand_doubleword_shift handles the cases in which
431 the effective shift value is < BITS_PER_WORD. The arguments and return
432 value are the same as for the parent routine. */
435 expand_subword_shift (machine_mode op1_mode
, optab binoptab
,
436 rtx outof_input
, rtx into_input
, rtx op1
,
437 rtx outof_target
, rtx into_target
,
438 int unsignedp
, enum optab_methods methods
,
439 unsigned HOST_WIDE_INT shift_mask
)
441 optab reverse_unsigned_shift
, unsigned_shift
;
444 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
445 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
447 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
448 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
449 the opposite direction to BINOPTAB. */
450 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
452 carries
= outof_input
;
453 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
,
454 op1_mode
), op1_mode
);
455 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
460 /* We must avoid shifting by BITS_PER_WORD bits since that is either
461 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
462 has unknown behavior. Do a single shift first, then shift by the
463 remainder. It's OK to use ~OP1 as the remainder if shift counts
464 are truncated to the mode size. */
465 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
466 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
467 if (shift_mask
== BITS_PER_WORD
- 1)
469 tmp
= immed_wide_int_const
470 (wi::minus_one (GET_MODE_PRECISION (op1_mode
)), op1_mode
);
471 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
476 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
- 1,
477 op1_mode
), op1_mode
);
478 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
482 if (tmp
== 0 || carries
== 0)
484 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
485 carries
, tmp
, 0, unsignedp
, methods
);
489 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
490 so the result can go directly into INTO_TARGET if convenient. */
491 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
492 into_target
, unsignedp
, methods
);
496 /* Now OR in the bits carried over from OUTOF_INPUT. */
497 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
498 into_target
, unsignedp
, methods
))
501 /* Use a standard word_mode shift for the out-of half. */
502 if (outof_target
!= 0)
503 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
504 outof_target
, unsignedp
, methods
))
511 /* Try implementing expand_doubleword_shift using conditional moves.
512 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
513 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
514 are the shift counts to use in the former and latter case. All other
515 arguments are the same as the parent routine. */
518 expand_doubleword_shift_condmove (machine_mode op1_mode
, optab binoptab
,
519 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
520 rtx outof_input
, rtx into_input
,
521 rtx subword_op1
, rtx superword_op1
,
522 rtx outof_target
, rtx into_target
,
523 int unsignedp
, enum optab_methods methods
,
524 unsigned HOST_WIDE_INT shift_mask
)
526 rtx outof_superword
, into_superword
;
528 /* Put the superword version of the output into OUTOF_SUPERWORD and
530 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
531 if (outof_target
!= 0 && subword_op1
== superword_op1
)
533 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
534 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
535 into_superword
= outof_target
;
536 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
537 outof_superword
, 0, unsignedp
, methods
))
542 into_superword
= gen_reg_rtx (word_mode
);
543 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
544 outof_superword
, into_superword
,
549 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
550 if (!expand_subword_shift (op1_mode
, binoptab
,
551 outof_input
, into_input
, subword_op1
,
552 outof_target
, into_target
,
553 unsignedp
, methods
, shift_mask
))
556 /* Select between them. Do the INTO half first because INTO_SUPERWORD
557 might be the current value of OUTOF_TARGET. */
558 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
559 into_target
, into_superword
, word_mode
, false))
562 if (outof_target
!= 0)
563 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
564 outof_target
, outof_superword
,
571 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
572 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
573 input operand; the shift moves bits in the direction OUTOF_INPUT->
574 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
575 of the target. OP1 is the shift count and OP1_MODE is its mode.
576 If OP1 is constant, it will have been truncated as appropriate
577 and is known to be nonzero.
579 If SHIFT_MASK is zero, the result of word shifts is undefined when the
580 shift count is outside the range [0, BITS_PER_WORD). This routine must
581 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
583 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
584 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
585 fill with zeros or sign bits as appropriate.
587 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
588 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
589 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
590 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
593 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
594 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
595 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
596 function wants to calculate it itself.
598 Return true if the shift could be successfully synthesized. */
601 expand_doubleword_shift (machine_mode op1_mode
, optab binoptab
,
602 rtx outof_input
, rtx into_input
, rtx op1
,
603 rtx outof_target
, rtx into_target
,
604 int unsignedp
, enum optab_methods methods
,
605 unsigned HOST_WIDE_INT shift_mask
)
607 rtx superword_op1
, tmp
, cmp1
, cmp2
;
608 enum rtx_code cmp_code
;
610 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
611 fill the result with sign or zero bits as appropriate. If so, the value
612 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
613 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
614 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
616 This isn't worthwhile for constant shifts since the optimizers will
617 cope better with in-range shift counts. */
618 if (shift_mask
>= BITS_PER_WORD
620 && !CONSTANT_P (op1
))
622 if (!expand_doubleword_shift (op1_mode
, binoptab
,
623 outof_input
, into_input
, op1
,
625 unsignedp
, methods
, shift_mask
))
627 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
628 outof_target
, unsignedp
, methods
))
633 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
634 is true when the effective shift value is less than BITS_PER_WORD.
635 Set SUPERWORD_OP1 to the shift count that should be used to shift
636 OUTOF_INPUT into INTO_TARGET when the condition is false. */
637 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
, op1_mode
), op1_mode
);
638 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
640 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
641 is a subword shift count. */
642 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
644 cmp2
= CONST0_RTX (op1_mode
);
650 /* Set CMP1 to OP1 - BITS_PER_WORD. */
651 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
653 cmp2
= CONST0_RTX (op1_mode
);
655 superword_op1
= cmp1
;
660 /* If we can compute the condition at compile time, pick the
661 appropriate subroutine. */
662 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
663 if (tmp
!= 0 && CONST_INT_P (tmp
))
665 if (tmp
== const0_rtx
)
666 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
667 outof_target
, into_target
,
670 return expand_subword_shift (op1_mode
, binoptab
,
671 outof_input
, into_input
, op1
,
672 outof_target
, into_target
,
673 unsignedp
, methods
, shift_mask
);
676 /* Try using conditional moves to generate straight-line code. */
677 if (HAVE_conditional_move
)
679 rtx_insn
*start
= get_last_insn ();
680 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
681 cmp_code
, cmp1
, cmp2
,
682 outof_input
, into_input
,
684 outof_target
, into_target
,
685 unsignedp
, methods
, shift_mask
))
687 delete_insns_since (start
);
690 /* As a last resort, use branches to select the correct alternative. */
691 rtx_code_label
*subword_label
= gen_label_rtx ();
692 rtx_code_label
*done_label
= gen_label_rtx ();
695 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
696 0, 0, subword_label
, -1);
699 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
700 outof_target
, into_target
,
704 emit_jump_insn (targetm
.gen_jump (done_label
));
706 emit_label (subword_label
);
708 if (!expand_subword_shift (op1_mode
, binoptab
,
709 outof_input
, into_input
, op1
,
710 outof_target
, into_target
,
711 unsignedp
, methods
, shift_mask
))
714 emit_label (done_label
);
718 /* Subroutine of expand_binop. Perform a double word multiplication of
719 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
720 as the target's word_mode. This function return NULL_RTX if anything
721 goes wrong, in which case it may have already emitted instructions
722 which need to be deleted.
724 If we want to multiply two two-word values and have normal and widening
725 multiplies of single-word values, we can do this with three smaller
728 The multiplication proceeds as follows:
729 _______________________
730 [__op0_high_|__op0_low__]
731 _______________________
732 * [__op1_high_|__op1_low__]
733 _______________________________________________
734 _______________________
735 (1) [__op0_low__*__op1_low__]
736 _______________________
737 (2a) [__op0_low__*__op1_high_]
738 _______________________
739 (2b) [__op0_high_*__op1_low__]
740 _______________________
741 (3) [__op0_high_*__op1_high_]
744 This gives a 4-word result. Since we are only interested in the
745 lower 2 words, partial result (3) and the upper words of (2a) and
746 (2b) don't need to be calculated. Hence (2a) and (2b) can be
747 calculated using non-widening multiplication.
749 (1), however, needs to be calculated with an unsigned widening
750 multiplication. If this operation is not directly supported we
751 try using a signed widening multiplication and adjust the result.
752 This adjustment works as follows:
754 If both operands are positive then no adjustment is needed.
756 If the operands have different signs, for example op0_low < 0 and
757 op1_low >= 0, the instruction treats the most significant bit of
758 op0_low as a sign bit instead of a bit with significance
759 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
760 with 2**BITS_PER_WORD - op0_low, and two's complements the
761 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
764 Similarly, if both operands are negative, we need to add
765 (op0_low + op1_low) * 2**BITS_PER_WORD.
767 We use a trick to adjust quickly. We logically shift op0_low right
768 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
769 op0_high (op1_high) before it is used to calculate 2b (2a). If no
770 logical shift exists, we do an arithmetic right shift and subtract
774 expand_doubleword_mult (machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
775 bool umulp
, enum optab_methods methods
)
777 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
778 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
779 rtx wordm1
= umulp
? NULL_RTX
: GEN_INT (BITS_PER_WORD
- 1);
780 rtx product
, adjust
, product_high
, temp
;
782 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
783 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
784 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
785 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
787 /* If we're using an unsigned multiply to directly compute the product
788 of the low-order words of the operands and perform any required
789 adjustments of the operands, we begin by trying two more multiplications
790 and then computing the appropriate sum.
792 We have checked above that the required addition is provided.
793 Full-word addition will normally always succeed, especially if
794 it is provided at all, so we don't worry about its failure. The
795 multiplication may well fail, however, so we do handle that. */
799 /* ??? This could be done with emit_store_flag where available. */
800 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
801 NULL_RTX
, 1, methods
);
803 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
804 NULL_RTX
, 0, OPTAB_DIRECT
);
807 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
808 NULL_RTX
, 0, methods
);
811 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
812 NULL_RTX
, 0, OPTAB_DIRECT
);
819 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
820 NULL_RTX
, 0, OPTAB_DIRECT
);
824 /* OP0_HIGH should now be dead. */
828 /* ??? This could be done with emit_store_flag where available. */
829 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
830 NULL_RTX
, 1, methods
);
832 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
833 NULL_RTX
, 0, OPTAB_DIRECT
);
836 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
837 NULL_RTX
, 0, methods
);
840 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
841 NULL_RTX
, 0, OPTAB_DIRECT
);
848 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
849 NULL_RTX
, 0, OPTAB_DIRECT
);
853 /* OP1_HIGH should now be dead. */
855 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
856 NULL_RTX
, 0, OPTAB_DIRECT
);
858 if (target
&& !REG_P (target
))
862 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
863 target
, 1, OPTAB_DIRECT
);
865 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
866 target
, 1, OPTAB_DIRECT
);
871 product_high
= operand_subword (product
, high
, 1, mode
);
872 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
873 NULL_RTX
, 0, OPTAB_DIRECT
);
874 emit_move_insn (product_high
, adjust
);
878 /* Wrapper around expand_binop which takes an rtx code to specify
879 the operation to perform, not an optab pointer. All other
880 arguments are the same. */
882 expand_simple_binop (machine_mode mode
, enum rtx_code code
, rtx op0
,
883 rtx op1
, rtx target
, int unsignedp
,
884 enum optab_methods methods
)
886 optab binop
= code_to_optab (code
);
889 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
892 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
893 binop. Order them according to commutative_operand_precedence and, if
894 possible, try to put TARGET or a pseudo first. */
896 swap_commutative_operands_with_target (rtx target
, rtx op0
, rtx op1
)
898 int op0_prec
= commutative_operand_precedence (op0
);
899 int op1_prec
= commutative_operand_precedence (op1
);
901 if (op0_prec
< op1_prec
)
904 if (op0_prec
> op1_prec
)
907 /* With equal precedence, both orders are ok, but it is better if the
908 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
909 if (target
== 0 || REG_P (target
))
910 return (REG_P (op1
) && !REG_P (op0
)) || target
== op1
;
912 return rtx_equal_p (op1
, target
);
915 /* Return true if BINOPTAB implements a shift operation. */
918 shift_optab_p (optab binoptab
)
920 switch (optab_to_code (binoptab
))
936 /* Return true if BINOPTAB implements a commutative binary operation. */
939 commutative_optab_p (optab binoptab
)
941 return (GET_RTX_CLASS (optab_to_code (binoptab
)) == RTX_COMM_ARITH
942 || binoptab
== smul_widen_optab
943 || binoptab
== umul_widen_optab
944 || binoptab
== smul_highpart_optab
945 || binoptab
== umul_highpart_optab
);
948 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
949 optimizing, and if the operand is a constant that costs more than
950 1 instruction, force the constant into a register and return that
951 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
954 avoid_expensive_constant (machine_mode mode
, optab binoptab
,
955 int opn
, rtx x
, bool unsignedp
)
957 bool speed
= optimize_insn_for_speed_p ();
962 && (rtx_cost (x
, mode
, optab_to_code (binoptab
), opn
, speed
)
963 > set_src_cost (x
, mode
, speed
)))
967 HOST_WIDE_INT intval
= trunc_int_for_mode (INTVAL (x
), mode
);
968 if (intval
!= INTVAL (x
))
969 x
= GEN_INT (intval
);
972 x
= convert_modes (mode
, VOIDmode
, x
, unsignedp
);
973 x
= force_reg (mode
, x
);
978 /* Helper function for expand_binop: handle the case where there
979 is an insn that directly implements the indicated operation.
980 Returns null if this is not possible. */
982 expand_binop_directly (machine_mode mode
, optab binoptab
,
984 rtx target
, int unsignedp
, enum optab_methods methods
,
987 machine_mode from_mode
= widened_mode (mode
, op0
, op1
);
988 enum insn_code icode
= find_widening_optab_handler (binoptab
, mode
,
990 machine_mode xmode0
= insn_data
[(int) icode
].operand
[1].mode
;
991 machine_mode xmode1
= insn_data
[(int) icode
].operand
[2].mode
;
992 machine_mode mode0
, mode1
, tmp_mode
;
993 struct expand_operand ops
[3];
996 rtx xop0
= op0
, xop1
= op1
;
997 bool canonicalize_op1
= false;
999 /* If it is a commutative operator and the modes would match
1000 if we would swap the operands, we can save the conversions. */
1001 commutative_p
= commutative_optab_p (binoptab
);
1003 && GET_MODE (xop0
) != xmode0
&& GET_MODE (xop1
) != xmode1
1004 && GET_MODE (xop0
) == xmode1
&& GET_MODE (xop1
) == xmode1
)
1005 std::swap (xop0
, xop1
);
1007 /* If we are optimizing, force expensive constants into a register. */
1008 xop0
= avoid_expensive_constant (xmode0
, binoptab
, 0, xop0
, unsignedp
);
1009 if (!shift_optab_p (binoptab
))
1010 xop1
= avoid_expensive_constant (xmode1
, binoptab
, 1, xop1
, unsignedp
);
1012 /* Shifts and rotates often use a different mode for op1 from op0;
1013 for VOIDmode constants we don't know the mode, so force it
1014 to be canonicalized using convert_modes. */
1015 canonicalize_op1
= true;
1017 /* In case the insn wants input operands in modes different from
1018 those of the actual operands, convert the operands. It would
1019 seem that we don't need to convert CONST_INTs, but we do, so
1020 that they're properly zero-extended, sign-extended or truncated
1023 mode0
= GET_MODE (xop0
) != VOIDmode
? GET_MODE (xop0
) : mode
;
1024 if (xmode0
!= VOIDmode
&& xmode0
!= mode0
)
1026 xop0
= convert_modes (xmode0
, mode0
, xop0
, unsignedp
);
1030 mode1
= ((GET_MODE (xop1
) != VOIDmode
|| canonicalize_op1
)
1031 ? GET_MODE (xop1
) : mode
);
1032 if (xmode1
!= VOIDmode
&& xmode1
!= mode1
)
1034 xop1
= convert_modes (xmode1
, mode1
, xop1
, unsignedp
);
1038 /* If operation is commutative,
1039 try to make the first operand a register.
1040 Even better, try to make it the same as the target.
1041 Also try to make the last operand a constant. */
1043 && swap_commutative_operands_with_target (target
, xop0
, xop1
))
1044 std::swap (xop0
, xop1
);
1046 /* Now, if insn's predicates don't allow our operands, put them into
1049 if (binoptab
== vec_pack_trunc_optab
1050 || binoptab
== vec_pack_usat_optab
1051 || binoptab
== vec_pack_ssat_optab
1052 || binoptab
== vec_pack_ufix_trunc_optab
1053 || binoptab
== vec_pack_sfix_trunc_optab
)
1055 /* The mode of the result is different then the mode of the
1057 tmp_mode
= insn_data
[(int) icode
].operand
[0].mode
;
1058 if (VECTOR_MODE_P (mode
)
1059 && GET_MODE_NUNITS (tmp_mode
) != 2 * GET_MODE_NUNITS (mode
))
1061 delete_insns_since (last
);
1068 create_output_operand (&ops
[0], target
, tmp_mode
);
1069 create_input_operand (&ops
[1], xop0
, mode0
);
1070 create_input_operand (&ops
[2], xop1
, mode1
);
1071 pat
= maybe_gen_insn (icode
, 3, ops
);
1074 /* If PAT is composed of more than one insn, try to add an appropriate
1075 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1076 operand, call expand_binop again, this time without a target. */
1077 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1078 && ! add_equal_note (pat
, ops
[0].value
,
1079 optab_to_code (binoptab
),
1080 ops
[1].value
, ops
[2].value
))
1082 delete_insns_since (last
);
1083 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1084 unsignedp
, methods
);
1088 return ops
[0].value
;
1090 delete_insns_since (last
);
1094 /* Generate code to perform an operation specified by BINOPTAB
1095 on operands OP0 and OP1, with result having machine-mode MODE.
1097 UNSIGNEDP is for the case where we have to widen the operands
1098 to perform the operation. It says to use zero-extension.
1100 If TARGET is nonzero, the value
1101 is generated there, if it is convenient to do so.
1102 In all cases an rtx is returned for the locus of the value;
1103 this may or may not be TARGET. */
1106 expand_binop (machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1107 rtx target
, int unsignedp
, enum optab_methods methods
)
1109 enum optab_methods next_methods
1110 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1111 ? OPTAB_WIDEN
: methods
);
1112 enum mode_class mclass
;
1113 machine_mode wider_mode
;
1116 rtx_insn
*entry_last
= get_last_insn ();
1119 mclass
= GET_MODE_CLASS (mode
);
1121 /* If subtracting an integer constant, convert this into an addition of
1122 the negated constant. */
1124 if (binoptab
== sub_optab
&& CONST_INT_P (op1
))
1126 op1
= negate_rtx (mode
, op1
);
1127 binoptab
= add_optab
;
1129 /* For shifts, constant invalid op1 might be expanded from different
1130 mode than MODE. As those are invalid, force them to a register
1131 to avoid further problems during expansion. */
1132 else if (CONST_INT_P (op1
)
1133 && shift_optab_p (binoptab
)
1134 && UINTVAL (op1
) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode
)))
1136 op1
= gen_int_mode (INTVAL (op1
), GET_MODE_INNER (mode
));
1137 op1
= force_reg (GET_MODE_INNER (mode
), op1
);
1140 /* Record where to delete back to if we backtrack. */
1141 last
= get_last_insn ();
1143 /* If we can do it with a three-operand insn, do so. */
1145 if (methods
!= OPTAB_MUST_WIDEN
1146 && find_widening_optab_handler (binoptab
, mode
,
1147 widened_mode (mode
, op0
, op1
), 1)
1148 != CODE_FOR_nothing
)
1150 temp
= expand_binop_directly (mode
, binoptab
, op0
, op1
, target
,
1151 unsignedp
, methods
, last
);
1156 /* If we were trying to rotate, and that didn't work, try rotating
1157 the other direction before falling back to shifts and bitwise-or. */
1158 if (((binoptab
== rotl_optab
1159 && optab_handler (rotr_optab
, mode
) != CODE_FOR_nothing
)
1160 || (binoptab
== rotr_optab
1161 && optab_handler (rotl_optab
, mode
) != CODE_FOR_nothing
))
1162 && mclass
== MODE_INT
)
1164 optab otheroptab
= (binoptab
== rotl_optab
? rotr_optab
: rotl_optab
);
1166 unsigned int bits
= GET_MODE_PRECISION (mode
);
1168 if (CONST_INT_P (op1
))
1169 newop1
= GEN_INT (bits
- INTVAL (op1
));
1170 else if (targetm
.shift_truncation_mask (mode
) == bits
- 1)
1171 newop1
= negate_rtx (GET_MODE (op1
), op1
);
1173 newop1
= expand_binop (GET_MODE (op1
), sub_optab
,
1174 gen_int_mode (bits
, GET_MODE (op1
)), op1
,
1175 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1177 temp
= expand_binop_directly (mode
, otheroptab
, op0
, newop1
,
1178 target
, unsignedp
, methods
, last
);
1183 /* If this is a multiply, see if we can do a widening operation that
1184 takes operands of this mode and makes a wider mode. */
1186 if (binoptab
== smul_optab
1187 && GET_MODE_2XWIDER_MODE (mode
) != VOIDmode
1188 && (widening_optab_handler ((unsignedp
? umul_widen_optab
1189 : smul_widen_optab
),
1190 GET_MODE_2XWIDER_MODE (mode
), mode
)
1191 != CODE_FOR_nothing
))
1193 temp
= expand_binop (GET_MODE_2XWIDER_MODE (mode
),
1194 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1195 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1199 if (GET_MODE_CLASS (mode
) == MODE_INT
1200 && TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (temp
)))
1201 return gen_lowpart (mode
, temp
);
1203 return convert_to_mode (mode
, temp
, unsignedp
);
1207 /* If this is a vector shift by a scalar, see if we can do a vector
1208 shift by a vector. If so, broadcast the scalar into a vector. */
1209 if (mclass
== MODE_VECTOR_INT
)
1211 optab otheroptab
= unknown_optab
;
1213 if (binoptab
== ashl_optab
)
1214 otheroptab
= vashl_optab
;
1215 else if (binoptab
== ashr_optab
)
1216 otheroptab
= vashr_optab
;
1217 else if (binoptab
== lshr_optab
)
1218 otheroptab
= vlshr_optab
;
1219 else if (binoptab
== rotl_optab
)
1220 otheroptab
= vrotl_optab
;
1221 else if (binoptab
== rotr_optab
)
1222 otheroptab
= vrotr_optab
;
1224 if (otheroptab
&& optab_handler (otheroptab
, mode
) != CODE_FOR_nothing
)
1226 /* The scalar may have been extended to be too wide. Truncate
1227 it back to the proper size to fit in the broadcast vector. */
1228 machine_mode inner_mode
= GET_MODE_INNER (mode
);
1229 if (!CONST_INT_P (op1
)
1230 && (GET_MODE_BITSIZE (inner_mode
)
1231 < GET_MODE_BITSIZE (GET_MODE (op1
))))
1232 op1
= force_reg (inner_mode
,
1233 simplify_gen_unary (TRUNCATE
, inner_mode
, op1
,
1235 rtx vop1
= expand_vector_broadcast (mode
, op1
);
1238 temp
= expand_binop_directly (mode
, otheroptab
, op0
, vop1
,
1239 target
, unsignedp
, methods
, last
);
1246 /* Look for a wider mode of the same class for which we think we
1247 can open-code the operation. Check for a widening multiply at the
1248 wider mode as well. */
1250 if (CLASS_HAS_WIDER_MODES_P (mclass
)
1251 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1252 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1253 wider_mode
!= VOIDmode
;
1254 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1256 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
1257 || (binoptab
== smul_optab
1258 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
1259 && (find_widening_optab_handler ((unsignedp
1261 : smul_widen_optab
),
1262 GET_MODE_WIDER_MODE (wider_mode
),
1264 != CODE_FOR_nothing
)))
1266 rtx xop0
= op0
, xop1
= op1
;
1269 /* For certain integer operations, we need not actually extend
1270 the narrow operands, as long as we will truncate
1271 the results to the same narrowness. */
1273 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1274 || binoptab
== xor_optab
1275 || binoptab
== add_optab
|| binoptab
== sub_optab
1276 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1277 && mclass
== MODE_INT
)
1280 xop0
= avoid_expensive_constant (mode
, binoptab
, 0,
1282 if (binoptab
!= ashl_optab
)
1283 xop1
= avoid_expensive_constant (mode
, binoptab
, 1,
1287 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1289 /* The second operand of a shift must always be extended. */
1290 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1291 no_extend
&& binoptab
!= ashl_optab
);
1293 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1294 unsignedp
, OPTAB_DIRECT
);
1297 if (mclass
!= MODE_INT
1298 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
1301 target
= gen_reg_rtx (mode
);
1302 convert_move (target
, temp
, 0);
1306 return gen_lowpart (mode
, temp
);
1309 delete_insns_since (last
);
1313 /* If operation is commutative,
1314 try to make the first operand a register.
1315 Even better, try to make it the same as the target.
1316 Also try to make the last operand a constant. */
1317 if (commutative_optab_p (binoptab
)
1318 && swap_commutative_operands_with_target (target
, op0
, op1
))
1319 std::swap (op0
, op1
);
1321 /* These can be done a word at a time. */
1322 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1323 && mclass
== MODE_INT
1324 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1325 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1330 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1331 won't be accurate, so use a new target. */
1335 || !valid_multiword_target_p (target
))
1336 target
= gen_reg_rtx (mode
);
1340 /* Do the actual arithmetic. */
1341 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1343 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1344 rtx x
= expand_binop (word_mode
, binoptab
,
1345 operand_subword_force (op0
, i
, mode
),
1346 operand_subword_force (op1
, i
, mode
),
1347 target_piece
, unsignedp
, next_methods
);
1352 if (target_piece
!= x
)
1353 emit_move_insn (target_piece
, x
);
1356 insns
= get_insns ();
1359 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1366 /* Synthesize double word shifts from single word shifts. */
1367 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1368 || binoptab
== ashr_optab
)
1369 && mclass
== MODE_INT
1370 && (CONST_INT_P (op1
) || optimize_insn_for_speed_p ())
1371 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1372 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
)
1373 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
1374 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1375 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1377 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1378 machine_mode op1_mode
;
1380 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1381 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1382 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1384 /* Apply the truncation to constant shifts. */
1385 if (double_shift_mask
> 0 && CONST_INT_P (op1
))
1386 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1388 if (op1
== CONST0_RTX (op1_mode
))
1391 /* Make sure that this is a combination that expand_doubleword_shift
1392 can handle. See the comments there for details. */
1393 if (double_shift_mask
== 0
1394 || (shift_mask
== BITS_PER_WORD
- 1
1395 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1398 rtx into_target
, outof_target
;
1399 rtx into_input
, outof_input
;
1400 int left_shift
, outof_word
;
1402 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1403 won't be accurate, so use a new target. */
1407 || !valid_multiword_target_p (target
))
1408 target
= gen_reg_rtx (mode
);
1412 /* OUTOF_* is the word we are shifting bits away from, and
1413 INTO_* is the word that we are shifting bits towards, thus
1414 they differ depending on the direction of the shift and
1415 WORDS_BIG_ENDIAN. */
1417 left_shift
= binoptab
== ashl_optab
;
1418 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1420 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1421 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1423 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1424 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1426 if (expand_doubleword_shift (op1_mode
, binoptab
,
1427 outof_input
, into_input
, op1
,
1428 outof_target
, into_target
,
1429 unsignedp
, next_methods
, shift_mask
))
1431 insns
= get_insns ();
1441 /* Synthesize double word rotates from single word shifts. */
1442 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1443 && mclass
== MODE_INT
1444 && CONST_INT_P (op1
)
1445 && GET_MODE_PRECISION (mode
) == 2 * BITS_PER_WORD
1446 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1447 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1450 rtx into_target
, outof_target
;
1451 rtx into_input
, outof_input
;
1453 int shift_count
, left_shift
, outof_word
;
1455 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1456 won't be accurate, so use a new target. Do this also if target is not
1457 a REG, first because having a register instead may open optimization
1458 opportunities, and second because if target and op0 happen to be MEMs
1459 designating the same location, we would risk clobbering it too early
1460 in the code sequence we generate below. */
1465 || !valid_multiword_target_p (target
))
1466 target
= gen_reg_rtx (mode
);
1470 shift_count
= INTVAL (op1
);
1472 /* OUTOF_* is the word we are shifting bits away from, and
1473 INTO_* is the word that we are shifting bits towards, thus
1474 they differ depending on the direction of the shift and
1475 WORDS_BIG_ENDIAN. */
1477 left_shift
= (binoptab
== rotl_optab
);
1478 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1480 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1481 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1483 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1484 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1486 if (shift_count
== BITS_PER_WORD
)
1488 /* This is just a word swap. */
1489 emit_move_insn (outof_target
, into_input
);
1490 emit_move_insn (into_target
, outof_input
);
1495 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1496 rtx first_shift_count
, second_shift_count
;
1497 optab reverse_unsigned_shift
, unsigned_shift
;
1499 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1500 ? lshr_optab
: ashl_optab
);
1502 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1503 ? ashl_optab
: lshr_optab
);
1505 if (shift_count
> BITS_PER_WORD
)
1507 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1508 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1512 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1513 second_shift_count
= GEN_INT (shift_count
);
1516 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1517 outof_input
, first_shift_count
,
1518 NULL_RTX
, unsignedp
, next_methods
);
1519 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1520 into_input
, second_shift_count
,
1521 NULL_RTX
, unsignedp
, next_methods
);
1523 if (into_temp1
!= 0 && into_temp2
!= 0)
1524 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1525 into_target
, unsignedp
, next_methods
);
1529 if (inter
!= 0 && inter
!= into_target
)
1530 emit_move_insn (into_target
, inter
);
1532 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1533 into_input
, first_shift_count
,
1534 NULL_RTX
, unsignedp
, next_methods
);
1535 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1536 outof_input
, second_shift_count
,
1537 NULL_RTX
, unsignedp
, next_methods
);
1539 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1540 inter
= expand_binop (word_mode
, ior_optab
,
1541 outof_temp1
, outof_temp2
,
1542 outof_target
, unsignedp
, next_methods
);
1544 if (inter
!= 0 && inter
!= outof_target
)
1545 emit_move_insn (outof_target
, inter
);
1548 insns
= get_insns ();
1558 /* These can be done a word at a time by propagating carries. */
1559 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1560 && mclass
== MODE_INT
1561 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1562 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1565 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1566 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1567 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1568 rtx xop0
, xop1
, xtarget
;
1570 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1571 value is one of those, use it. Otherwise, use 1 since it is the
1572 one easiest to get. */
1573 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1574 int normalizep
= STORE_FLAG_VALUE
;
1579 /* Prepare the operands. */
1580 xop0
= force_reg (mode
, op0
);
1581 xop1
= force_reg (mode
, op1
);
1583 xtarget
= gen_reg_rtx (mode
);
1585 if (target
== 0 || !REG_P (target
) || !valid_multiword_target_p (target
))
1588 /* Indicate for flow that the entire target reg is being set. */
1590 emit_clobber (xtarget
);
1592 /* Do the actual arithmetic. */
1593 for (i
= 0; i
< nwords
; i
++)
1595 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1596 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1597 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1598 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1601 /* Main add/subtract of the input operands. */
1602 x
= expand_binop (word_mode
, binoptab
,
1603 op0_piece
, op1_piece
,
1604 target_piece
, unsignedp
, next_methods
);
1610 /* Store carry from main add/subtract. */
1611 carry_out
= gen_reg_rtx (word_mode
);
1612 carry_out
= emit_store_flag_force (carry_out
,
1613 (binoptab
== add_optab
1616 word_mode
, 1, normalizep
);
1623 /* Add/subtract previous carry to main result. */
1624 newx
= expand_binop (word_mode
,
1625 normalizep
== 1 ? binoptab
: otheroptab
,
1627 NULL_RTX
, 1, next_methods
);
1631 /* Get out carry from adding/subtracting carry in. */
1632 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1633 carry_tmp
= emit_store_flag_force (carry_tmp
,
1634 (binoptab
== add_optab
1637 word_mode
, 1, normalizep
);
1639 /* Logical-ior the two poss. carry together. */
1640 carry_out
= expand_binop (word_mode
, ior_optab
,
1641 carry_out
, carry_tmp
,
1642 carry_out
, 0, next_methods
);
1646 emit_move_insn (target_piece
, newx
);
1650 if (x
!= target_piece
)
1651 emit_move_insn (target_piece
, x
);
1654 carry_in
= carry_out
;
1657 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1659 if (optab_handler (mov_optab
, mode
) != CODE_FOR_nothing
1660 || ! rtx_equal_p (target
, xtarget
))
1662 rtx_insn
*temp
= emit_move_insn (target
, xtarget
);
1664 set_dst_reg_note (temp
, REG_EQUAL
,
1665 gen_rtx_fmt_ee (optab_to_code (binoptab
),
1666 mode
, copy_rtx (xop0
),
1677 delete_insns_since (last
);
1680 /* Attempt to synthesize double word multiplies using a sequence of word
1681 mode multiplications. We first attempt to generate a sequence using a
1682 more efficient unsigned widening multiply, and if that fails we then
1683 try using a signed widening multiply. */
1685 if (binoptab
== smul_optab
1686 && mclass
== MODE_INT
1687 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1688 && optab_handler (smul_optab
, word_mode
) != CODE_FOR_nothing
1689 && optab_handler (add_optab
, word_mode
) != CODE_FOR_nothing
)
1691 rtx product
= NULL_RTX
;
1692 if (widening_optab_handler (umul_widen_optab
, mode
, word_mode
)
1693 != CODE_FOR_nothing
)
1695 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1698 delete_insns_since (last
);
1701 if (product
== NULL_RTX
1702 && widening_optab_handler (smul_widen_optab
, mode
, word_mode
)
1703 != CODE_FOR_nothing
)
1705 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1708 delete_insns_since (last
);
1711 if (product
!= NULL_RTX
)
1713 if (optab_handler (mov_optab
, mode
) != CODE_FOR_nothing
)
1715 rtx_insn
*move
= emit_move_insn (target
? target
: product
,
1717 set_dst_reg_note (move
,
1719 gen_rtx_fmt_ee (MULT
, mode
,
1722 target
? target
: product
);
1728 /* It can't be open-coded in this mode.
1729 Use a library call if one is available and caller says that's ok. */
1731 libfunc
= optab_libfunc (binoptab
, mode
);
1733 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1737 machine_mode op1_mode
= mode
;
1742 if (shift_optab_p (binoptab
))
1744 op1_mode
= targetm
.libgcc_shift_count_mode ();
1745 /* Specify unsigned here,
1746 since negative shift counts are meaningless. */
1747 op1x
= convert_to_mode (op1_mode
, op1
, 1);
1750 if (GET_MODE (op0
) != VOIDmode
1751 && GET_MODE (op0
) != mode
)
1752 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1754 /* Pass 1 for NO_QUEUE so we don't lose any increments
1755 if the libcall is cse'd or moved. */
1756 value
= emit_library_call_value (libfunc
,
1757 NULL_RTX
, LCT_CONST
, mode
, 2,
1758 op0
, mode
, op1x
, op1_mode
);
1760 insns
= get_insns ();
1763 bool trapv
= trapv_binoptab_p (binoptab
);
1764 target
= gen_reg_rtx (mode
);
1765 emit_libcall_block_1 (insns
, target
, value
,
1767 : gen_rtx_fmt_ee (optab_to_code (binoptab
),
1768 mode
, op0
, op1
), trapv
);
1773 delete_insns_since (last
);
1775 /* It can't be done in this mode. Can we do it in a wider mode? */
1777 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1778 || methods
== OPTAB_MUST_WIDEN
))
1780 /* Caller says, don't even try. */
1781 delete_insns_since (entry_last
);
1785 /* Compute the value of METHODS to pass to recursive calls.
1786 Don't allow widening to be tried recursively. */
1788 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1790 /* Look for a wider mode of the same class for which it appears we can do
1793 if (CLASS_HAS_WIDER_MODES_P (mclass
))
1795 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1796 wider_mode
!= VOIDmode
;
1797 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1799 if (find_widening_optab_handler (binoptab
, wider_mode
, mode
, 1)
1801 || (methods
== OPTAB_LIB
1802 && optab_libfunc (binoptab
, wider_mode
)))
1804 rtx xop0
= op0
, xop1
= op1
;
1807 /* For certain integer operations, we need not actually extend
1808 the narrow operands, as long as we will truncate
1809 the results to the same narrowness. */
1811 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1812 || binoptab
== xor_optab
1813 || binoptab
== add_optab
|| binoptab
== sub_optab
1814 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1815 && mclass
== MODE_INT
)
1818 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1819 unsignedp
, no_extend
);
1821 /* The second operand of a shift must always be extended. */
1822 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1823 no_extend
&& binoptab
!= ashl_optab
);
1825 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1826 unsignedp
, methods
);
1829 if (mclass
!= MODE_INT
1830 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
1833 target
= gen_reg_rtx (mode
);
1834 convert_move (target
, temp
, 0);
1838 return gen_lowpart (mode
, temp
);
1841 delete_insns_since (last
);
1846 delete_insns_since (entry_last
);
1850 /* Expand a binary operator which has both signed and unsigned forms.
1851 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1854 If we widen unsigned operands, we may use a signed wider operation instead
1855 of an unsigned wider operation, since the result would be the same. */
1858 sign_expand_binop (machine_mode mode
, optab uoptab
, optab soptab
,
1859 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
1860 enum optab_methods methods
)
1863 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1866 /* Do it without widening, if possible. */
1867 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1868 unsignedp
, OPTAB_DIRECT
);
1869 if (temp
|| methods
== OPTAB_DIRECT
)
1872 /* Try widening to a signed int. Disable any direct use of any
1873 signed insn in the current mode. */
1874 save_enable
= swap_optab_enable (soptab
, mode
, false);
1876 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
1877 unsignedp
, OPTAB_WIDEN
);
1879 /* For unsigned operands, try widening to an unsigned int. */
1880 if (!temp
&& unsignedp
)
1881 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1882 unsignedp
, OPTAB_WIDEN
);
1883 if (temp
|| methods
== OPTAB_WIDEN
)
1886 /* Use the right width libcall if that exists. */
1887 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1888 unsignedp
, OPTAB_LIB
);
1889 if (temp
|| methods
== OPTAB_LIB
)
1892 /* Must widen and use a libcall, use either signed or unsigned. */
1893 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
1894 unsignedp
, methods
);
1895 if (!temp
&& unsignedp
)
1896 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1897 unsignedp
, methods
);
1900 /* Undo the fiddling above. */
1902 swap_optab_enable (soptab
, mode
, true);
1906 /* Generate code to perform an operation specified by UNOPPTAB
1907 on operand OP0, with two results to TARG0 and TARG1.
1908 We assume that the order of the operands for the instruction
1909 is TARG0, TARG1, OP0.
1911 Either TARG0 or TARG1 may be zero, but what that means is that
1912 the result is not actually wanted. We will generate it into
1913 a dummy pseudo-reg and discard it. They may not both be zero.
1915 Returns 1 if this operation can be performed; 0 if not. */
1918 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
1921 machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1922 enum mode_class mclass
;
1923 machine_mode wider_mode
;
1924 rtx_insn
*entry_last
= get_last_insn ();
1927 mclass
= GET_MODE_CLASS (mode
);
1930 targ0
= gen_reg_rtx (mode
);
1932 targ1
= gen_reg_rtx (mode
);
1934 /* Record where to go back to if we fail. */
1935 last
= get_last_insn ();
1937 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
1939 struct expand_operand ops
[3];
1940 enum insn_code icode
= optab_handler (unoptab
, mode
);
1942 create_fixed_operand (&ops
[0], targ0
);
1943 create_fixed_operand (&ops
[1], targ1
);
1944 create_convert_operand_from (&ops
[2], op0
, mode
, unsignedp
);
1945 if (maybe_expand_insn (icode
, 3, ops
))
1949 /* It can't be done in this mode. Can we do it in a wider mode? */
1951 if (CLASS_HAS_WIDER_MODES_P (mclass
))
1953 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1954 wider_mode
!= VOIDmode
;
1955 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1957 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
1959 rtx t0
= gen_reg_rtx (wider_mode
);
1960 rtx t1
= gen_reg_rtx (wider_mode
);
1961 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1963 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
1965 convert_move (targ0
, t0
, unsignedp
);
1966 convert_move (targ1
, t1
, unsignedp
);
1970 delete_insns_since (last
);
1975 delete_insns_since (entry_last
);
1979 /* Generate code to perform an operation specified by BINOPTAB
1980 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1981 We assume that the order of the operands for the instruction
1982 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1983 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1985 Either TARG0 or TARG1 may be zero, but what that means is that
1986 the result is not actually wanted. We will generate it into
1987 a dummy pseudo-reg and discard it. They may not both be zero.
1989 Returns 1 if this operation can be performed; 0 if not. */
1992 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
1995 machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1996 enum mode_class mclass
;
1997 machine_mode wider_mode
;
1998 rtx_insn
*entry_last
= get_last_insn ();
2001 mclass
= GET_MODE_CLASS (mode
);
2004 targ0
= gen_reg_rtx (mode
);
2006 targ1
= gen_reg_rtx (mode
);
2008 /* Record where to go back to if we fail. */
2009 last
= get_last_insn ();
2011 if (optab_handler (binoptab
, mode
) != CODE_FOR_nothing
)
2013 struct expand_operand ops
[4];
2014 enum insn_code icode
= optab_handler (binoptab
, mode
);
2015 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2016 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2017 rtx xop0
= op0
, xop1
= op1
;
2019 /* If we are optimizing, force expensive constants into a register. */
2020 xop0
= avoid_expensive_constant (mode0
, binoptab
, 0, xop0
, unsignedp
);
2021 xop1
= avoid_expensive_constant (mode1
, binoptab
, 1, xop1
, unsignedp
);
2023 create_fixed_operand (&ops
[0], targ0
);
2024 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
2025 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
2026 create_fixed_operand (&ops
[3], targ1
);
2027 if (maybe_expand_insn (icode
, 4, ops
))
2029 delete_insns_since (last
);
2032 /* It can't be done in this mode. Can we do it in a wider mode? */
2034 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2036 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2037 wider_mode
!= VOIDmode
;
2038 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2040 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
)
2042 rtx t0
= gen_reg_rtx (wider_mode
);
2043 rtx t1
= gen_reg_rtx (wider_mode
);
2044 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2045 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2047 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2050 convert_move (targ0
, t0
, unsignedp
);
2051 convert_move (targ1
, t1
, unsignedp
);
2055 delete_insns_since (last
);
2060 delete_insns_since (entry_last
);
2064 /* Expand the two-valued library call indicated by BINOPTAB, but
2065 preserve only one of the values. If TARG0 is non-NULL, the first
2066 value is placed into TARG0; otherwise the second value is placed
2067 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2068 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2069 This routine assumes that the value returned by the library call is
2070 as if the return value was of an integral mode twice as wide as the
2071 mode of OP0. Returns 1 if the call was successful. */
2074 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2075 rtx targ0
, rtx targ1
, enum rtx_code code
)
2078 machine_mode libval_mode
;
2083 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2084 gcc_assert (!targ0
!= !targ1
);
2086 mode
= GET_MODE (op0
);
2087 libfunc
= optab_libfunc (binoptab
, mode
);
2091 /* The value returned by the library function will have twice as
2092 many bits as the nominal MODE. */
2093 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2096 libval
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
2100 /* Get the part of VAL containing the value that we want. */
2101 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2102 targ0
? 0 : GET_MODE_SIZE (mode
));
2103 insns
= get_insns ();
2105 /* Move the into the desired location. */
2106 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2107 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2113 /* Wrapper around expand_unop which takes an rtx code to specify
2114 the operation to perform, not an optab pointer. All other
2115 arguments are the same. */
2117 expand_simple_unop (machine_mode mode
, enum rtx_code code
, rtx op0
,
2118 rtx target
, int unsignedp
)
2120 optab unop
= code_to_optab (code
);
2123 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2129 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2131 A similar operation can be used for clrsb. UNOPTAB says which operation
2132 we are trying to expand. */
2134 widen_leading (machine_mode mode
, rtx op0
, rtx target
, optab unoptab
)
2136 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2137 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2139 machine_mode wider_mode
;
2140 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2141 wider_mode
!= VOIDmode
;
2142 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2144 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2149 last
= get_last_insn ();
2152 target
= gen_reg_rtx (mode
);
2153 xop0
= widen_operand (op0
, wider_mode
, mode
,
2154 unoptab
!= clrsb_optab
, false);
2155 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2156 unoptab
!= clrsb_optab
);
2159 (wider_mode
, sub_optab
, temp
,
2160 gen_int_mode (GET_MODE_PRECISION (wider_mode
)
2161 - GET_MODE_PRECISION (mode
),
2163 target
, true, OPTAB_DIRECT
);
2165 delete_insns_since (last
);
2174 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2175 quantities, choosing which based on whether the high word is nonzero. */
2177 expand_doubleword_clz (machine_mode mode
, rtx op0
, rtx target
)
2179 rtx xop0
= force_reg (mode
, op0
);
2180 rtx subhi
= gen_highpart (word_mode
, xop0
);
2181 rtx sublo
= gen_lowpart (word_mode
, xop0
);
2182 rtx_code_label
*hi0_label
= gen_label_rtx ();
2183 rtx_code_label
*after_label
= gen_label_rtx ();
2187 /* If we were not given a target, use a word_mode register, not a
2188 'mode' register. The result will fit, and nobody is expecting
2189 anything bigger (the return type of __builtin_clz* is int). */
2191 target
= gen_reg_rtx (word_mode
);
2193 /* In any case, write to a word_mode scratch in both branches of the
2194 conditional, so we can ensure there is a single move insn setting
2195 'target' to tag a REG_EQUAL note on. */
2196 result
= gen_reg_rtx (word_mode
);
2200 /* If the high word is not equal to zero,
2201 then clz of the full value is clz of the high word. */
2202 emit_cmp_and_jump_insns (subhi
, CONST0_RTX (word_mode
), EQ
, 0,
2203 word_mode
, true, hi0_label
);
2205 temp
= expand_unop_direct (word_mode
, clz_optab
, subhi
, result
, true);
2210 convert_move (result
, temp
, true);
2212 emit_jump_insn (targetm
.gen_jump (after_label
));
2215 /* Else clz of the full value is clz of the low word plus the number
2216 of bits in the high word. */
2217 emit_label (hi0_label
);
2219 temp
= expand_unop_direct (word_mode
, clz_optab
, sublo
, 0, true);
2222 temp
= expand_binop (word_mode
, add_optab
, temp
,
2223 gen_int_mode (GET_MODE_BITSIZE (word_mode
), word_mode
),
2224 result
, true, OPTAB_DIRECT
);
2228 convert_move (result
, temp
, true);
2230 emit_label (after_label
);
2231 convert_move (target
, result
, true);
2236 add_equal_note (seq
, target
, CLZ
, xop0
, 0);
2245 /* Try calculating popcount of a double-word quantity as two popcount's of
2246 word-sized quantities and summing up the results. */
2248 expand_doubleword_popcount (machine_mode mode
, rtx op0
, rtx target
)
2255 t0
= expand_unop_direct (word_mode
, popcount_optab
,
2256 operand_subword_force (op0
, 0, mode
), NULL_RTX
,
2258 t1
= expand_unop_direct (word_mode
, popcount_optab
,
2259 operand_subword_force (op0
, 1, mode
), NULL_RTX
,
2267 /* If we were not given a target, use a word_mode register, not a
2268 'mode' register. The result will fit, and nobody is expecting
2269 anything bigger (the return type of __builtin_popcount* is int). */
2271 target
= gen_reg_rtx (word_mode
);
2273 t
= expand_binop (word_mode
, add_optab
, t0
, t1
, target
, 0, OPTAB_DIRECT
);
2278 add_equal_note (seq
, t
, POPCOUNT
, op0
, 0);
2286 (parity:narrow (low (x) ^ high (x))) */
2288 expand_doubleword_parity (machine_mode mode
, rtx op0
, rtx target
)
2290 rtx t
= expand_binop (word_mode
, xor_optab
,
2291 operand_subword_force (op0
, 0, mode
),
2292 operand_subword_force (op0
, 1, mode
),
2293 NULL_RTX
, 0, OPTAB_DIRECT
);
2294 return expand_unop (word_mode
, parity_optab
, t
, target
, true);
2300 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2302 widen_bswap (machine_mode mode
, rtx op0
, rtx target
)
2304 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2305 machine_mode wider_mode
;
2309 if (!CLASS_HAS_WIDER_MODES_P (mclass
))
2312 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2313 wider_mode
!= VOIDmode
;
2314 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2315 if (optab_handler (bswap_optab
, wider_mode
) != CODE_FOR_nothing
)
2320 last
= get_last_insn ();
2322 x
= widen_operand (op0
, wider_mode
, mode
, true, true);
2323 x
= expand_unop (wider_mode
, bswap_optab
, x
, NULL_RTX
, true);
2325 gcc_assert (GET_MODE_PRECISION (wider_mode
) == GET_MODE_BITSIZE (wider_mode
)
2326 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
));
2328 x
= expand_shift (RSHIFT_EXPR
, wider_mode
, x
,
2329 GET_MODE_BITSIZE (wider_mode
)
2330 - GET_MODE_BITSIZE (mode
),
2336 target
= gen_reg_rtx (mode
);
2337 emit_move_insn (target
, gen_lowpart (mode
, x
));
2340 delete_insns_since (last
);
2345 /* Try calculating bswap as two bswaps of two word-sized operands. */
2348 expand_doubleword_bswap (machine_mode mode
, rtx op
, rtx target
)
2352 t1
= expand_unop (word_mode
, bswap_optab
,
2353 operand_subword_force (op
, 0, mode
), NULL_RTX
, true);
2354 t0
= expand_unop (word_mode
, bswap_optab
,
2355 operand_subword_force (op
, 1, mode
), NULL_RTX
, true);
2357 if (target
== 0 || !valid_multiword_target_p (target
))
2358 target
= gen_reg_rtx (mode
);
2360 emit_clobber (target
);
2361 emit_move_insn (operand_subword (target
, 0, 1, mode
), t0
);
2362 emit_move_insn (operand_subword (target
, 1, 1, mode
), t1
);
2367 /* Try calculating (parity x) as (and (popcount x) 1), where
2368 popcount can also be done in a wider mode. */
2370 expand_parity (machine_mode mode
, rtx op0
, rtx target
)
2372 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2373 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2375 machine_mode wider_mode
;
2376 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2377 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2379 if (optab_handler (popcount_optab
, wider_mode
) != CODE_FOR_nothing
)
2384 last
= get_last_insn ();
2386 if (target
== 0 || GET_MODE (target
) != wider_mode
)
2387 target
= gen_reg_rtx (wider_mode
);
2389 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2390 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2393 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2394 target
, true, OPTAB_DIRECT
);
2398 if (mclass
!= MODE_INT
2399 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2400 return convert_to_mode (mode
, temp
, 0);
2402 return gen_lowpart (mode
, temp
);
2405 delete_insns_since (last
);
2412 /* Try calculating ctz(x) as K - clz(x & -x) ,
2413 where K is GET_MODE_PRECISION(mode) - 1.
2415 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2416 don't have to worry about what the hardware does in that case. (If
2417 the clz instruction produces the usual value at 0, which is K, the
2418 result of this code sequence will be -1; expand_ffs, below, relies
2419 on this. It might be nice to have it be K instead, for consistency
2420 with the (very few) processors that provide a ctz with a defined
2421 value, but that would take one more instruction, and it would be
2422 less convenient for expand_ffs anyway. */
2425 expand_ctz (machine_mode mode
, rtx op0
, rtx target
)
2430 if (optab_handler (clz_optab
, mode
) == CODE_FOR_nothing
)
2435 temp
= expand_unop_direct (mode
, neg_optab
, op0
, NULL_RTX
, true);
2437 temp
= expand_binop (mode
, and_optab
, op0
, temp
, NULL_RTX
,
2438 true, OPTAB_DIRECT
);
2440 temp
= expand_unop_direct (mode
, clz_optab
, temp
, NULL_RTX
, true);
2442 temp
= expand_binop (mode
, sub_optab
,
2443 gen_int_mode (GET_MODE_PRECISION (mode
) - 1, mode
),
2445 true, OPTAB_DIRECT
);
2455 add_equal_note (seq
, temp
, CTZ
, op0
, 0);
2461 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2462 else with the sequence used by expand_clz.
2464 The ffs builtin promises to return zero for a zero value and ctz/clz
2465 may have an undefined value in that case. If they do not give us a
2466 convenient value, we have to generate a test and branch. */
2468 expand_ffs (machine_mode mode
, rtx op0
, rtx target
)
2470 HOST_WIDE_INT val
= 0;
2471 bool defined_at_zero
= false;
2475 if (optab_handler (ctz_optab
, mode
) != CODE_FOR_nothing
)
2479 temp
= expand_unop_direct (mode
, ctz_optab
, op0
, 0, true);
2483 defined_at_zero
= (CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2);
2485 else if (optab_handler (clz_optab
, mode
) != CODE_FOR_nothing
)
2488 temp
= expand_ctz (mode
, op0
, 0);
2492 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2)
2494 defined_at_zero
= true;
2495 val
= (GET_MODE_PRECISION (mode
) - 1) - val
;
2501 if (defined_at_zero
&& val
== -1)
2502 /* No correction needed at zero. */;
2505 /* We don't try to do anything clever with the situation found
2506 on some processors (eg Alpha) where ctz(0:mode) ==
2507 bitsize(mode). If someone can think of a way to send N to -1
2508 and leave alone all values in the range 0..N-1 (where N is a
2509 power of two), cheaper than this test-and-branch, please add it.
2511 The test-and-branch is done after the operation itself, in case
2512 the operation sets condition codes that can be recycled for this.
2513 (This is true on i386, for instance.) */
2515 rtx_code_label
*nonzero_label
= gen_label_rtx ();
2516 emit_cmp_and_jump_insns (op0
, CONST0_RTX (mode
), NE
, 0,
2517 mode
, true, nonzero_label
);
2519 convert_move (temp
, GEN_INT (-1), false);
2520 emit_label (nonzero_label
);
2523 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2524 to produce a value in the range 0..bitsize. */
2525 temp
= expand_binop (mode
, add_optab
, temp
, gen_int_mode (1, mode
),
2526 target
, false, OPTAB_DIRECT
);
2533 add_equal_note (seq
, temp
, FFS
, op0
, 0);
2542 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2543 conditions, VAL may already be a SUBREG against which we cannot generate
2544 a further SUBREG. In this case, we expect forcing the value into a
2545 register will work around the situation. */
2548 lowpart_subreg_maybe_copy (machine_mode omode
, rtx val
,
2552 ret
= lowpart_subreg (omode
, val
, imode
);
2555 val
= force_reg (imode
, val
);
2556 ret
= lowpart_subreg (omode
, val
, imode
);
2557 gcc_assert (ret
!= NULL
);
2562 /* Expand a floating point absolute value or negation operation via a
2563 logical operation on the sign bit. */
2566 expand_absneg_bit (enum rtx_code code
, machine_mode mode
,
2567 rtx op0
, rtx target
)
2569 const struct real_format
*fmt
;
2570 int bitpos
, word
, nwords
, i
;
2575 /* The format has to have a simple sign bit. */
2576 fmt
= REAL_MODE_FORMAT (mode
);
2580 bitpos
= fmt
->signbit_rw
;
2584 /* Don't create negative zeros if the format doesn't support them. */
2585 if (code
== NEG
&& !fmt
->has_signed_zero
)
2588 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2590 imode
= int_mode_for_mode (mode
);
2591 if (imode
== BLKmode
)
2600 if (FLOAT_WORDS_BIG_ENDIAN
)
2601 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2603 word
= bitpos
/ BITS_PER_WORD
;
2604 bitpos
= bitpos
% BITS_PER_WORD
;
2605 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2608 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
2614 || (nwords
> 1 && !valid_multiword_target_p (target
)))
2615 target
= gen_reg_rtx (mode
);
2621 for (i
= 0; i
< nwords
; ++i
)
2623 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2624 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2628 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2630 immed_wide_int_const (mask
, imode
),
2631 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2632 if (temp
!= targ_piece
)
2633 emit_move_insn (targ_piece
, temp
);
2636 emit_move_insn (targ_piece
, op0_piece
);
2639 insns
= get_insns ();
2646 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2647 gen_lowpart (imode
, op0
),
2648 immed_wide_int_const (mask
, imode
),
2649 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2650 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2652 set_dst_reg_note (get_last_insn (), REG_EQUAL
,
2653 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)),
2660 /* As expand_unop, but will fail rather than attempt the operation in a
2661 different mode or with a libcall. */
2663 expand_unop_direct (machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2666 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
2668 struct expand_operand ops
[2];
2669 enum insn_code icode
= optab_handler (unoptab
, mode
);
2670 rtx_insn
*last
= get_last_insn ();
2673 create_output_operand (&ops
[0], target
, mode
);
2674 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
2675 pat
= maybe_gen_insn (icode
, 2, ops
);
2678 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2679 && ! add_equal_note (pat
, ops
[0].value
,
2680 optab_to_code (unoptab
),
2681 ops
[1].value
, NULL_RTX
))
2683 delete_insns_since (last
);
2684 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2689 return ops
[0].value
;
2695 /* Generate code to perform an operation specified by UNOPTAB
2696 on operand OP0, with result having machine-mode MODE.
2698 UNSIGNEDP is for the case where we have to widen the operands
2699 to perform the operation. It says to use zero-extension.
2701 If TARGET is nonzero, the value
2702 is generated there, if it is convenient to do so.
2703 In all cases an rtx is returned for the locus of the value;
2704 this may or may not be TARGET. */
2707 expand_unop (machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2710 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2711 machine_mode wider_mode
;
2715 temp
= expand_unop_direct (mode
, unoptab
, op0
, target
, unsignedp
);
2719 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2721 /* Widening (or narrowing) clz needs special treatment. */
2722 if (unoptab
== clz_optab
)
2724 temp
= widen_leading (mode
, op0
, target
, unoptab
);
2728 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2729 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2731 temp
= expand_doubleword_clz (mode
, op0
, target
);
2739 if (unoptab
== clrsb_optab
)
2741 temp
= widen_leading (mode
, op0
, target
, unoptab
);
2747 if (unoptab
== popcount_optab
2748 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2749 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
2750 && optimize_insn_for_speed_p ())
2752 temp
= expand_doubleword_popcount (mode
, op0
, target
);
2757 if (unoptab
== parity_optab
2758 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2759 && (optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
2760 || optab_handler (popcount_optab
, word_mode
) != CODE_FOR_nothing
)
2761 && optimize_insn_for_speed_p ())
2763 temp
= expand_doubleword_parity (mode
, op0
, target
);
2768 /* Widening (or narrowing) bswap needs special treatment. */
2769 if (unoptab
== bswap_optab
)
2771 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
2772 or ROTATERT. First try these directly; if this fails, then try the
2773 obvious pair of shifts with allowed widening, as this will probably
2774 be always more efficient than the other fallback methods. */
2780 if (optab_handler (rotl_optab
, mode
) != CODE_FOR_nothing
)
2782 temp
= expand_binop (mode
, rotl_optab
, op0
, GEN_INT (8), target
,
2783 unsignedp
, OPTAB_DIRECT
);
2788 if (optab_handler (rotr_optab
, mode
) != CODE_FOR_nothing
)
2790 temp
= expand_binop (mode
, rotr_optab
, op0
, GEN_INT (8), target
,
2791 unsignedp
, OPTAB_DIRECT
);
2796 last
= get_last_insn ();
2798 temp1
= expand_binop (mode
, ashl_optab
, op0
, GEN_INT (8), NULL_RTX
,
2799 unsignedp
, OPTAB_WIDEN
);
2800 temp2
= expand_binop (mode
, lshr_optab
, op0
, GEN_INT (8), NULL_RTX
,
2801 unsignedp
, OPTAB_WIDEN
);
2804 temp
= expand_binop (mode
, ior_optab
, temp1
, temp2
, target
,
2805 unsignedp
, OPTAB_WIDEN
);
2810 delete_insns_since (last
);
2813 temp
= widen_bswap (mode
, op0
, target
);
2817 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2818 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2820 temp
= expand_doubleword_bswap (mode
, op0
, target
);
2828 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2829 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2830 wider_mode
!= VOIDmode
;
2831 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2833 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2836 rtx_insn
*last
= get_last_insn ();
2838 /* For certain operations, we need not actually extend
2839 the narrow operand, as long as we will truncate the
2840 results to the same narrowness. */
2842 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2843 (unoptab
== neg_optab
2844 || unoptab
== one_cmpl_optab
)
2845 && mclass
== MODE_INT
);
2847 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2852 if (mclass
!= MODE_INT
2853 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2856 target
= gen_reg_rtx (mode
);
2857 convert_move (target
, temp
, 0);
2861 return gen_lowpart (mode
, temp
);
2864 delete_insns_since (last
);
2868 /* These can be done a word at a time. */
2869 if (unoptab
== one_cmpl_optab
2870 && mclass
== MODE_INT
2871 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2872 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2877 if (target
== 0 || target
== op0
|| !valid_multiword_target_p (target
))
2878 target
= gen_reg_rtx (mode
);
2882 /* Do the actual arithmetic. */
2883 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2885 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2886 rtx x
= expand_unop (word_mode
, unoptab
,
2887 operand_subword_force (op0
, i
, mode
),
2888 target_piece
, unsignedp
);
2890 if (target_piece
!= x
)
2891 emit_move_insn (target_piece
, x
);
2894 insns
= get_insns ();
2901 if (optab_to_code (unoptab
) == NEG
)
2903 /* Try negating floating point values by flipping the sign bit. */
2904 if (SCALAR_FLOAT_MODE_P (mode
))
2906 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
2911 /* If there is no negation pattern, and we have no negative zero,
2912 try subtracting from zero. */
2913 if (!HONOR_SIGNED_ZEROS (mode
))
2915 temp
= expand_binop (mode
, (unoptab
== negv_optab
2916 ? subv_optab
: sub_optab
),
2917 CONST0_RTX (mode
), op0
, target
,
2918 unsignedp
, OPTAB_DIRECT
);
2924 /* Try calculating parity (x) as popcount (x) % 2. */
2925 if (unoptab
== parity_optab
)
2927 temp
= expand_parity (mode
, op0
, target
);
2932 /* Try implementing ffs (x) in terms of clz (x). */
2933 if (unoptab
== ffs_optab
)
2935 temp
= expand_ffs (mode
, op0
, target
);
2940 /* Try implementing ctz (x) in terms of clz (x). */
2941 if (unoptab
== ctz_optab
)
2943 temp
= expand_ctz (mode
, op0
, target
);
2949 /* Now try a library call in this mode. */
2950 libfunc
= optab_libfunc (unoptab
, mode
);
2956 machine_mode outmode
= mode
;
2958 /* All of these functions return small values. Thus we choose to
2959 have them return something that isn't a double-word. */
2960 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2961 || unoptab
== clrsb_optab
|| unoptab
== popcount_optab
2962 || unoptab
== parity_optab
)
2964 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
),
2965 optab_libfunc (unoptab
, mode
)));
2969 /* Pass 1 for NO_QUEUE so we don't lose any increments
2970 if the libcall is cse'd or moved. */
2971 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, outmode
,
2973 insns
= get_insns ();
2976 target
= gen_reg_rtx (outmode
);
2977 bool trapv
= trapv_unoptab_p (unoptab
);
2979 eq_value
= NULL_RTX
;
2982 eq_value
= gen_rtx_fmt_e (optab_to_code (unoptab
), mode
, op0
);
2983 if (GET_MODE_SIZE (outmode
) < GET_MODE_SIZE (mode
))
2984 eq_value
= simplify_gen_unary (TRUNCATE
, outmode
, eq_value
, mode
);
2985 else if (GET_MODE_SIZE (outmode
) > GET_MODE_SIZE (mode
))
2986 eq_value
= simplify_gen_unary (ZERO_EXTEND
,
2987 outmode
, eq_value
, mode
);
2989 emit_libcall_block_1 (insns
, target
, value
, eq_value
, trapv
);
2994 /* It can't be done in this mode. Can we do it in a wider mode? */
2996 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2998 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2999 wider_mode
!= VOIDmode
;
3000 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3002 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
3003 || optab_libfunc (unoptab
, wider_mode
))
3006 rtx_insn
*last
= get_last_insn ();
3008 /* For certain operations, we need not actually extend
3009 the narrow operand, as long as we will truncate the
3010 results to the same narrowness. */
3011 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3012 (unoptab
== neg_optab
3013 || unoptab
== one_cmpl_optab
3014 || unoptab
== bswap_optab
)
3015 && mclass
== MODE_INT
);
3017 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3020 /* If we are generating clz using wider mode, adjust the
3021 result. Similarly for clrsb. */
3022 if ((unoptab
== clz_optab
|| unoptab
== clrsb_optab
)
3025 (wider_mode
, sub_optab
, temp
,
3026 gen_int_mode (GET_MODE_PRECISION (wider_mode
)
3027 - GET_MODE_PRECISION (mode
),
3029 target
, true, OPTAB_DIRECT
);
3031 /* Likewise for bswap. */
3032 if (unoptab
== bswap_optab
&& temp
!= 0)
3034 gcc_assert (GET_MODE_PRECISION (wider_mode
)
3035 == GET_MODE_BITSIZE (wider_mode
)
3036 && GET_MODE_PRECISION (mode
)
3037 == GET_MODE_BITSIZE (mode
));
3039 temp
= expand_shift (RSHIFT_EXPR
, wider_mode
, temp
,
3040 GET_MODE_BITSIZE (wider_mode
)
3041 - GET_MODE_BITSIZE (mode
),
3047 if (mclass
!= MODE_INT
)
3050 target
= gen_reg_rtx (mode
);
3051 convert_move (target
, temp
, 0);
3055 return gen_lowpart (mode
, temp
);
3058 delete_insns_since (last
);
3063 /* One final attempt at implementing negation via subtraction,
3064 this time allowing widening of the operand. */
3065 if (optab_to_code (unoptab
) == NEG
&& !HONOR_SIGNED_ZEROS (mode
))
3068 temp
= expand_binop (mode
,
3069 unoptab
== negv_optab
? subv_optab
: sub_optab
,
3070 CONST0_RTX (mode
), op0
,
3071 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3079 /* Emit code to compute the absolute value of OP0, with result to
3080 TARGET if convenient. (TARGET may be 0.) The return value says
3081 where the result actually is to be found.
3083 MODE is the mode of the operand; the mode of the result is
3084 different but can be deduced from MODE.
3089 expand_abs_nojump (machine_mode mode
, rtx op0
, rtx target
,
3090 int result_unsignedp
)
3094 if (GET_MODE_CLASS (mode
) != MODE_INT
3096 result_unsignedp
= 1;
3098 /* First try to do it with a special abs instruction. */
3099 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
3104 /* For floating point modes, try clearing the sign bit. */
3105 if (SCALAR_FLOAT_MODE_P (mode
))
3107 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
3112 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3113 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
3114 && !HONOR_SIGNED_ZEROS (mode
))
3116 rtx_insn
*last
= get_last_insn ();
3118 temp
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3121 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3127 delete_insns_since (last
);
3130 /* If this machine has expensive jumps, we can do integer absolute
3131 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3132 where W is the width of MODE. */
3134 if (GET_MODE_CLASS (mode
) == MODE_INT
3135 && BRANCH_COST (optimize_insn_for_speed_p (),
3138 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3139 GET_MODE_PRECISION (mode
) - 1,
3142 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
3145 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
3146 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
3156 expand_abs (machine_mode mode
, rtx op0
, rtx target
,
3157 int result_unsignedp
, int safe
)
3160 rtx_code_label
*op1
;
3162 if (GET_MODE_CLASS (mode
) != MODE_INT
3164 result_unsignedp
= 1;
3166 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
3170 /* If that does not win, use conditional jump and negate. */
3172 /* It is safe to use the target if it is the same
3173 as the source if this is also a pseudo register */
3174 if (op0
== target
&& REG_P (op0
)
3175 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
3178 op1
= gen_label_rtx ();
3179 if (target
== 0 || ! safe
3180 || GET_MODE (target
) != mode
3181 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
3183 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
3184 target
= gen_reg_rtx (mode
);
3186 emit_move_insn (target
, op0
);
3189 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
3190 NULL_RTX
, NULL
, op1
, -1);
3192 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3195 emit_move_insn (target
, op0
);
3201 /* Emit code to compute the one's complement absolute value of OP0
3202 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3203 (TARGET may be NULL_RTX.) The return value says where the result
3204 actually is to be found.
3206 MODE is the mode of the operand; the mode of the result is
3207 different but can be deduced from MODE. */
3210 expand_one_cmpl_abs_nojump (machine_mode mode
, rtx op0
, rtx target
)
3214 /* Not applicable for floating point modes. */
3215 if (FLOAT_MODE_P (mode
))
3218 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3219 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
)
3221 rtx_insn
*last
= get_last_insn ();
3223 temp
= expand_unop (mode
, one_cmpl_optab
, op0
, NULL_RTX
, 0);
3225 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3231 delete_insns_since (last
);
3234 /* If this machine has expensive jumps, we can do one's complement
3235 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3237 if (GET_MODE_CLASS (mode
) == MODE_INT
3238 && BRANCH_COST (optimize_insn_for_speed_p (),
3241 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3242 GET_MODE_PRECISION (mode
) - 1,
3245 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
3255 /* A subroutine of expand_copysign, perform the copysign operation using the
3256 abs and neg primitives advertised to exist on the target. The assumption
3257 is that we have a split register file, and leaving op0 in fp registers,
3258 and not playing with subregs so much, will help the register allocator. */
3261 expand_copysign_absneg (machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3262 int bitpos
, bool op0_is_abs
)
3265 enum insn_code icode
;
3267 rtx_code_label
*label
;
3272 /* Check if the back end provides an insn that handles signbit for the
3274 icode
= optab_handler (signbit_optab
, mode
);
3275 if (icode
!= CODE_FOR_nothing
)
3277 imode
= insn_data
[(int) icode
].operand
[0].mode
;
3278 sign
= gen_reg_rtx (imode
);
3279 emit_unop_insn (icode
, sign
, op1
, UNKNOWN
);
3283 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3285 imode
= int_mode_for_mode (mode
);
3286 if (imode
== BLKmode
)
3288 op1
= gen_lowpart (imode
, op1
);
3295 if (FLOAT_WORDS_BIG_ENDIAN
)
3296 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3298 word
= bitpos
/ BITS_PER_WORD
;
3299 bitpos
= bitpos
% BITS_PER_WORD
;
3300 op1
= operand_subword_force (op1
, word
, mode
);
3303 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3304 sign
= expand_binop (imode
, and_optab
, op1
,
3305 immed_wide_int_const (mask
, imode
),
3306 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3311 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
3318 if (target
== NULL_RTX
)
3319 target
= copy_to_reg (op0
);
3321 emit_move_insn (target
, op0
);
3324 label
= gen_label_rtx ();
3325 emit_cmp_and_jump_insns (sign
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
3327 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
3328 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3330 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
3332 emit_move_insn (target
, op0
);
3340 /* A subroutine of expand_copysign, perform the entire copysign operation
3341 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3342 is true if op0 is known to have its sign bit clear. */
3345 expand_copysign_bit (machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3346 int bitpos
, bool op0_is_abs
)
3349 int word
, nwords
, i
;
3353 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3355 imode
= int_mode_for_mode (mode
);
3356 if (imode
== BLKmode
)
3365 if (FLOAT_WORDS_BIG_ENDIAN
)
3366 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3368 word
= bitpos
/ BITS_PER_WORD
;
3369 bitpos
= bitpos
% BITS_PER_WORD
;
3370 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3373 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3378 || (nwords
> 1 && !valid_multiword_target_p (target
)))
3379 target
= gen_reg_rtx (mode
);
3385 for (i
= 0; i
< nwords
; ++i
)
3387 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3388 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3394 = expand_binop (imode
, and_optab
, op0_piece
,
3395 immed_wide_int_const (~mask
, imode
),
3396 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3397 op1
= expand_binop (imode
, and_optab
,
3398 operand_subword_force (op1
, i
, mode
),
3399 immed_wide_int_const (mask
, imode
),
3400 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3402 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
3403 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3404 if (temp
!= targ_piece
)
3405 emit_move_insn (targ_piece
, temp
);
3408 emit_move_insn (targ_piece
, op0_piece
);
3411 insns
= get_insns ();
3418 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
3419 immed_wide_int_const (mask
, imode
),
3420 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3422 op0
= gen_lowpart (imode
, op0
);
3424 op0
= expand_binop (imode
, and_optab
, op0
,
3425 immed_wide_int_const (~mask
, imode
),
3426 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3428 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
3429 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3430 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3436 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3437 scalar floating point mode. Return NULL if we do not know how to
3438 expand the operation inline. */
3441 expand_copysign (rtx op0
, rtx op1
, rtx target
)
3443 machine_mode mode
= GET_MODE (op0
);
3444 const struct real_format
*fmt
;
3448 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
3449 gcc_assert (GET_MODE (op1
) == mode
);
3451 /* First try to do it with a special instruction. */
3452 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
3453 target
, 0, OPTAB_DIRECT
);
3457 fmt
= REAL_MODE_FORMAT (mode
);
3458 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
3462 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
3464 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
3465 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
3469 if (fmt
->signbit_ro
>= 0
3470 && (CONST_DOUBLE_AS_FLOAT_P (op0
)
3471 || (optab_handler (neg_optab
, mode
) != CODE_FOR_nothing
3472 && optab_handler (abs_optab
, mode
) != CODE_FOR_nothing
)))
3474 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
3475 fmt
->signbit_ro
, op0_is_abs
);
3480 if (fmt
->signbit_rw
< 0)
3482 return expand_copysign_bit (mode
, op0
, op1
, target
,
3483 fmt
->signbit_rw
, op0_is_abs
);
3486 /* Generate an instruction whose insn-code is INSN_CODE,
3487 with two operands: an output TARGET and an input OP0.
3488 TARGET *must* be nonzero, and the output is always stored there.
3489 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3490 the value that is stored into TARGET.
3492 Return false if expansion failed. */
3495 maybe_emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
,
3498 struct expand_operand ops
[2];
3501 create_output_operand (&ops
[0], target
, GET_MODE (target
));
3502 create_input_operand (&ops
[1], op0
, GET_MODE (op0
));
3503 pat
= maybe_gen_insn (icode
, 2, ops
);
3507 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
3509 add_equal_note (pat
, ops
[0].value
, code
, ops
[1].value
, NULL_RTX
);
3513 if (ops
[0].value
!= target
)
3514 emit_move_insn (target
, ops
[0].value
);
3517 /* Generate an instruction whose insn-code is INSN_CODE,
3518 with two operands: an output TARGET and an input OP0.
3519 TARGET *must* be nonzero, and the output is always stored there.
3520 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3521 the value that is stored into TARGET. */
3524 emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
, enum rtx_code code
)
3526 bool ok
= maybe_emit_unop_insn (icode
, target
, op0
, code
);
3530 struct no_conflict_data
3533 rtx_insn
*first
, *insn
;
3537 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3538 the currently examined clobber / store has to stay in the list of
3539 insns that constitute the actual libcall block. */
3541 no_conflict_move_test (rtx dest
, const_rtx set
, void *p0
)
3543 struct no_conflict_data
*p
= (struct no_conflict_data
*) p0
;
3545 /* If this inns directly contributes to setting the target, it must stay. */
3546 if (reg_overlap_mentioned_p (p
->target
, dest
))
3547 p
->must_stay
= true;
3548 /* If we haven't committed to keeping any other insns in the list yet,
3549 there is nothing more to check. */
3550 else if (p
->insn
== p
->first
)
3552 /* If this insn sets / clobbers a register that feeds one of the insns
3553 already in the list, this insn has to stay too. */
3554 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
3555 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
3556 || reg_used_between_p (dest
, p
->first
, p
->insn
)
3557 /* Likewise if this insn depends on a register set by a previous
3558 insn in the list, or if it sets a result (presumably a hard
3559 register) that is set or clobbered by a previous insn.
3560 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3561 SET_DEST perform the former check on the address, and the latter
3562 check on the MEM. */
3563 || (GET_CODE (set
) == SET
3564 && (modified_in_p (SET_SRC (set
), p
->first
)
3565 || modified_in_p (SET_DEST (set
), p
->first
)
3566 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
3567 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
3568 p
->must_stay
= true;
3572 /* Emit code to make a call to a constant function or a library call.
3574 INSNS is a list containing all insns emitted in the call.
3575 These insns leave the result in RESULT. Our block is to copy RESULT
3576 to TARGET, which is logically equivalent to EQUIV.
3578 We first emit any insns that set a pseudo on the assumption that these are
3579 loading constants into registers; doing so allows them to be safely cse'ed
3580 between blocks. Then we emit all the other insns in the block, followed by
3581 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3582 note with an operand of EQUIV. */
3585 emit_libcall_block_1 (rtx_insn
*insns
, rtx target
, rtx result
, rtx equiv
,
3586 bool equiv_may_trap
)
3588 rtx final_dest
= target
;
3589 rtx_insn
*next
, *last
, *insn
;
3591 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3592 into a MEM later. Protect the libcall block from this change. */
3593 if (! REG_P (target
) || REG_USERVAR_P (target
))
3594 target
= gen_reg_rtx (GET_MODE (target
));
3596 /* If we're using non-call exceptions, a libcall corresponding to an
3597 operation that may trap may also trap. */
3598 /* ??? See the comment in front of make_reg_eh_region_note. */
3599 if (cfun
->can_throw_non_call_exceptions
3600 && (equiv_may_trap
|| may_trap_p (equiv
)))
3602 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3605 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3608 int lp_nr
= INTVAL (XEXP (note
, 0));
3609 if (lp_nr
== 0 || lp_nr
== INT_MIN
)
3610 remove_note (insn
, note
);
3616 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3617 reg note to indicate that this call cannot throw or execute a nonlocal
3618 goto (unless there is already a REG_EH_REGION note, in which case
3620 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3622 make_reg_eh_region_note_nothrow_nononlocal (insn
);
3625 /* First emit all insns that set pseudos. Remove them from the list as
3626 we go. Avoid insns that set pseudos which were referenced in previous
3627 insns. These can be generated by move_by_pieces, for example,
3628 to update an address. Similarly, avoid insns that reference things
3629 set in previous insns. */
3631 for (insn
= insns
; insn
; insn
= next
)
3633 rtx set
= single_set (insn
);
3635 next
= NEXT_INSN (insn
);
3637 if (set
!= 0 && REG_P (SET_DEST (set
))
3638 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3640 struct no_conflict_data data
;
3642 data
.target
= const0_rtx
;
3646 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3647 if (! data
.must_stay
)
3649 if (PREV_INSN (insn
))
3650 SET_NEXT_INSN (PREV_INSN (insn
)) = next
;
3655 SET_PREV_INSN (next
) = PREV_INSN (insn
);
3661 /* Some ports use a loop to copy large arguments onto the stack.
3662 Don't move anything outside such a loop. */
3667 /* Write the remaining insns followed by the final copy. */
3668 for (insn
= insns
; insn
; insn
= next
)
3670 next
= NEXT_INSN (insn
);
3675 last
= emit_move_insn (target
, result
);
3677 set_dst_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
), target
);
3679 if (final_dest
!= target
)
3680 emit_move_insn (final_dest
, target
);
3684 emit_libcall_block (rtx_insn
*insns
, rtx target
, rtx result
, rtx equiv
)
3686 emit_libcall_block_1 (insns
, target
, result
, equiv
, false);
3689 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3690 PURPOSE describes how this comparison will be used. CODE is the rtx
3691 comparison code we will be using.
3693 ??? Actually, CODE is slightly weaker than that. A target is still
3694 required to implement all of the normal bcc operations, but not
3695 required to implement all (or any) of the unordered bcc operations. */
3698 can_compare_p (enum rtx_code code
, machine_mode mode
,
3699 enum can_compare_purpose purpose
)
3702 test
= gen_rtx_fmt_ee (code
, mode
, const0_rtx
, const0_rtx
);
3705 enum insn_code icode
;
3707 if (purpose
== ccp_jump
3708 && (icode
= optab_handler (cbranch_optab
, mode
)) != CODE_FOR_nothing
3709 && insn_operand_matches (icode
, 0, test
))
3711 if (purpose
== ccp_store_flag
3712 && (icode
= optab_handler (cstore_optab
, mode
)) != CODE_FOR_nothing
3713 && insn_operand_matches (icode
, 1, test
))
3715 if (purpose
== ccp_cmov
3716 && optab_handler (cmov_optab
, mode
) != CODE_FOR_nothing
)
3719 mode
= GET_MODE_WIDER_MODE (mode
);
3720 PUT_MODE (test
, mode
);
3722 while (mode
!= VOIDmode
);
3727 /* This function is called when we are going to emit a compare instruction that
3728 compares the values found in X and Y, using the rtl operator COMPARISON.
3730 If they have mode BLKmode, then SIZE specifies the size of both operands.
3732 UNSIGNEDP nonzero says that the operands are unsigned;
3733 this matters if they need to be widened (as given by METHODS).
3735 *PTEST is where the resulting comparison RTX is returned or NULL_RTX
3736 if we failed to produce one.
3738 *PMODE is the mode of the inputs (in case they are const_int).
3740 This function performs all the setup necessary so that the caller only has
3741 to emit a single comparison insn. This setup can involve doing a BLKmode
3742 comparison or emitting a library call to perform the comparison if no insn
3743 is available to handle it.
3744 The values which are passed in through pointers can be modified; the caller
3745 should perform the comparison on the modified values. Constant
3746 comparisons must have already been folded. */
3749 prepare_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3750 int unsignedp
, enum optab_methods methods
,
3751 rtx
*ptest
, machine_mode
*pmode
)
3753 machine_mode mode
= *pmode
;
3755 machine_mode cmp_mode
;
3756 enum mode_class mclass
;
3758 /* The other methods are not needed. */
3759 gcc_assert (methods
== OPTAB_DIRECT
|| methods
== OPTAB_WIDEN
3760 || methods
== OPTAB_LIB_WIDEN
);
3762 /* If we are optimizing, force expensive constants into a register. */
3763 if (CONSTANT_P (x
) && optimize
3764 && (rtx_cost (x
, mode
, COMPARE
, 0, optimize_insn_for_speed_p ())
3765 > COSTS_N_INSNS (1)))
3766 x
= force_reg (mode
, x
);
3768 if (CONSTANT_P (y
) && optimize
3769 && (rtx_cost (y
, mode
, COMPARE
, 1, optimize_insn_for_speed_p ())
3770 > COSTS_N_INSNS (1)))
3771 y
= force_reg (mode
, y
);
3774 /* Make sure if we have a canonical comparison. The RTL
3775 documentation states that canonical comparisons are required only
3776 for targets which have cc0. */
3777 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
3780 /* Don't let both operands fail to indicate the mode. */
3781 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3782 x
= force_reg (mode
, x
);
3783 if (mode
== VOIDmode
)
3784 mode
= GET_MODE (x
) != VOIDmode
? GET_MODE (x
) : GET_MODE (y
);
3786 /* Handle all BLKmode compares. */
3788 if (mode
== BLKmode
)
3790 machine_mode result_mode
;
3791 enum insn_code cmp_code
;
3794 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3798 /* Try to use a memory block compare insn - either cmpstr
3799 or cmpmem will do. */
3800 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
3801 cmp_mode
!= VOIDmode
;
3802 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
3804 cmp_code
= direct_optab_handler (cmpmem_optab
, cmp_mode
);
3805 if (cmp_code
== CODE_FOR_nothing
)
3806 cmp_code
= direct_optab_handler (cmpstr_optab
, cmp_mode
);
3807 if (cmp_code
== CODE_FOR_nothing
)
3808 cmp_code
= direct_optab_handler (cmpstrn_optab
, cmp_mode
);
3809 if (cmp_code
== CODE_FOR_nothing
)
3812 /* Must make sure the size fits the insn's mode. */
3813 if ((CONST_INT_P (size
)
3814 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
3815 || (GET_MODE_BITSIZE (GET_MODE (size
))
3816 > GET_MODE_BITSIZE (cmp_mode
)))
3819 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3820 result
= gen_reg_rtx (result_mode
);
3821 size
= convert_to_mode (cmp_mode
, size
, 1);
3822 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3824 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, result
, const0_rtx
);
3825 *pmode
= result_mode
;
3829 if (methods
!= OPTAB_LIB
&& methods
!= OPTAB_LIB_WIDEN
)
3832 /* Otherwise call a library function. */
3833 result
= emit_block_comp_via_libcall (XEXP (x
, 0), XEXP (y
, 0), size
);
3837 mode
= TYPE_MODE (integer_type_node
);
3838 methods
= OPTAB_LIB_WIDEN
;
3842 /* Don't allow operands to the compare to trap, as that can put the
3843 compare and branch in different basic blocks. */
3844 if (cfun
->can_throw_non_call_exceptions
)
3847 x
= force_reg (mode
, x
);
3849 y
= force_reg (mode
, y
);
3852 if (GET_MODE_CLASS (mode
) == MODE_CC
)
3854 enum insn_code icode
= optab_handler (cbranch_optab
, CCmode
);
3855 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
3856 gcc_assert (icode
!= CODE_FOR_nothing
3857 && insn_operand_matches (icode
, 0, test
));
3862 mclass
= GET_MODE_CLASS (mode
);
3863 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
3867 enum insn_code icode
;
3868 icode
= optab_handler (cbranch_optab
, cmp_mode
);
3869 if (icode
!= CODE_FOR_nothing
3870 && insn_operand_matches (icode
, 0, test
))
3872 rtx_insn
*last
= get_last_insn ();
3873 rtx op0
= prepare_operand (icode
, x
, 1, mode
, cmp_mode
, unsignedp
);
3874 rtx op1
= prepare_operand (icode
, y
, 2, mode
, cmp_mode
, unsignedp
);
3876 && insn_operand_matches (icode
, 1, op0
)
3877 && insn_operand_matches (icode
, 2, op1
))
3879 XEXP (test
, 0) = op0
;
3880 XEXP (test
, 1) = op1
;
3885 delete_insns_since (last
);
3888 if (methods
== OPTAB_DIRECT
|| !CLASS_HAS_WIDER_MODES_P (mclass
))
3890 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
);
3892 while (cmp_mode
!= VOIDmode
);
3894 if (methods
!= OPTAB_LIB_WIDEN
)
3897 if (!SCALAR_FLOAT_MODE_P (mode
))
3900 machine_mode ret_mode
;
3902 /* Handle a libcall just for the mode we are using. */
3903 libfunc
= optab_libfunc (cmp_optab
, mode
);
3904 gcc_assert (libfunc
);
3906 /* If we want unsigned, and this mode has a distinct unsigned
3907 comparison routine, use that. */
3910 rtx ulibfunc
= optab_libfunc (ucmp_optab
, mode
);
3915 ret_mode
= targetm
.libgcc_cmp_return_mode ();
3916 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
3917 ret_mode
, 2, x
, mode
, y
, mode
);
3919 /* There are two kinds of comparison routines. Biased routines
3920 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3921 of gcc expect that the comparison operation is equivalent
3922 to the modified comparison. For signed comparisons compare the
3923 result against 1 in the biased case, and zero in the unbiased
3924 case. For unsigned comparisons always compare against 1 after
3925 biasing the unbiased result by adding 1. This gives us a way to
3927 The comparisons in the fixed-point helper library are always
3932 if (!TARGET_LIB_INT_CMP_BIASED
&& !ALL_FIXED_POINT_MODE_P (mode
))
3935 x
= plus_constant (ret_mode
, result
, 1);
3941 prepare_cmp_insn (x
, y
, comparison
, NULL_RTX
, unsignedp
, methods
,
3945 prepare_float_lib_cmp (x
, y
, comparison
, ptest
, pmode
);
3953 /* Before emitting an insn with code ICODE, make sure that X, which is going
3954 to be used for operand OPNUM of the insn, is converted from mode MODE to
3955 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3956 that it is accepted by the operand predicate. Return the new value. */
3959 prepare_operand (enum insn_code icode
, rtx x
, int opnum
, machine_mode mode
,
3960 machine_mode wider_mode
, int unsignedp
)
3962 if (mode
!= wider_mode
)
3963 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3965 if (!insn_operand_matches (icode
, opnum
, x
))
3967 machine_mode op_mode
= insn_data
[(int) icode
].operand
[opnum
].mode
;
3968 if (reload_completed
)
3970 if (GET_MODE (x
) != op_mode
&& GET_MODE (x
) != VOIDmode
)
3972 x
= copy_to_mode_reg (op_mode
, x
);
3978 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3979 we can do the branch. */
3982 emit_cmp_and_jump_insn_1 (rtx test
, machine_mode mode
, rtx label
, int prob
)
3984 machine_mode optab_mode
;
3985 enum mode_class mclass
;
3986 enum insn_code icode
;
3989 mclass
= GET_MODE_CLASS (mode
);
3990 optab_mode
= (mclass
== MODE_CC
) ? CCmode
: mode
;
3991 icode
= optab_handler (cbranch_optab
, optab_mode
);
3993 gcc_assert (icode
!= CODE_FOR_nothing
);
3994 gcc_assert (insn_operand_matches (icode
, 0, test
));
3995 insn
= emit_jump_insn (GEN_FCN (icode
) (test
, XEXP (test
, 0),
3996 XEXP (test
, 1), label
));
3998 && profile_status_for_fn (cfun
) != PROFILE_ABSENT
4001 && any_condjump_p (insn
)
4002 && !find_reg_note (insn
, REG_BR_PROB
, 0))
4003 add_int_reg_note (insn
, REG_BR_PROB
, prob
);
4006 /* Generate code to compare X with Y so that the condition codes are
4007 set and to jump to LABEL if the condition is true. If X is a
4008 constant and Y is not a constant, then the comparison is swapped to
4009 ensure that the comparison RTL has the canonical form.
4011 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4012 need to be widened. UNSIGNEDP is also used to select the proper
4013 branch condition code.
4015 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4017 MODE is the mode of the inputs (in case they are const_int).
4019 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4020 It will be potentially converted into an unsigned variant based on
4021 UNSIGNEDP to select a proper jump instruction.
4023 PROB is the probability of jumping to LABEL. */
4026 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4027 machine_mode mode
, int unsignedp
, rtx label
,
4030 rtx op0
= x
, op1
= y
;
4033 /* Swap operands and condition to ensure canonical RTL. */
4034 if (swap_commutative_operands_p (x
, y
)
4035 && can_compare_p (swap_condition (comparison
), mode
, ccp_jump
))
4038 comparison
= swap_condition (comparison
);
4041 /* If OP0 is still a constant, then both X and Y must be constants
4042 or the opposite comparison is not supported. Force X into a register
4043 to create canonical RTL. */
4044 if (CONSTANT_P (op0
))
4045 op0
= force_reg (mode
, op0
);
4048 comparison
= unsigned_condition (comparison
);
4050 prepare_cmp_insn (op0
, op1
, comparison
, size
, unsignedp
, OPTAB_LIB_WIDEN
,
4052 emit_cmp_and_jump_insn_1 (test
, mode
, label
, prob
);
4056 /* Emit a library call comparison between floating point X and Y.
4057 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4060 prepare_float_lib_cmp (rtx x
, rtx y
, enum rtx_code comparison
,
4061 rtx
*ptest
, machine_mode
*pmode
)
4063 enum rtx_code swapped
= swap_condition (comparison
);
4064 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
4065 machine_mode orig_mode
= GET_MODE (x
);
4066 machine_mode mode
, cmp_mode
;
4067 rtx true_rtx
, false_rtx
;
4068 rtx value
, target
, equiv
;
4071 bool reversed_p
= false;
4072 cmp_mode
= targetm
.libgcc_cmp_return_mode ();
4074 for (mode
= orig_mode
;
4076 mode
= GET_MODE_WIDER_MODE (mode
))
4078 if (code_to_optab (comparison
)
4079 && (libfunc
= optab_libfunc (code_to_optab (comparison
), mode
)))
4082 if (code_to_optab (swapped
)
4083 && (libfunc
= optab_libfunc (code_to_optab (swapped
), mode
)))
4086 comparison
= swapped
;
4090 if (code_to_optab (reversed
)
4091 && (libfunc
= optab_libfunc (code_to_optab (reversed
), mode
)))
4093 comparison
= reversed
;
4099 gcc_assert (mode
!= VOIDmode
);
4101 if (mode
!= orig_mode
)
4103 x
= convert_to_mode (mode
, x
, 0);
4104 y
= convert_to_mode (mode
, y
, 0);
4107 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4108 the RTL. The allows the RTL optimizers to delete the libcall if the
4109 condition can be determined at compile-time. */
4110 if (comparison
== UNORDERED
4111 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4113 true_rtx
= const_true_rtx
;
4114 false_rtx
= const0_rtx
;
4121 true_rtx
= const0_rtx
;
4122 false_rtx
= const_true_rtx
;
4126 true_rtx
= const_true_rtx
;
4127 false_rtx
= const0_rtx
;
4131 true_rtx
= const1_rtx
;
4132 false_rtx
= const0_rtx
;
4136 true_rtx
= const0_rtx
;
4137 false_rtx
= constm1_rtx
;
4141 true_rtx
= constm1_rtx
;
4142 false_rtx
= const0_rtx
;
4146 true_rtx
= const0_rtx
;
4147 false_rtx
= const1_rtx
;
4155 if (comparison
== UNORDERED
)
4157 rtx temp
= simplify_gen_relational (NE
, cmp_mode
, mode
, x
, x
);
4158 equiv
= simplify_gen_relational (NE
, cmp_mode
, mode
, y
, y
);
4159 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4160 temp
, const_true_rtx
, equiv
);
4164 equiv
= simplify_gen_relational (comparison
, cmp_mode
, mode
, x
, y
);
4165 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4166 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4167 equiv
, true_rtx
, false_rtx
);
4171 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4172 cmp_mode
, 2, x
, mode
, y
, mode
);
4173 insns
= get_insns ();
4176 target
= gen_reg_rtx (cmp_mode
);
4177 emit_libcall_block (insns
, target
, value
, equiv
);
4179 if (comparison
== UNORDERED
4180 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
)
4182 *ptest
= gen_rtx_fmt_ee (reversed_p
? EQ
: NE
, VOIDmode
, target
, false_rtx
);
4184 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, target
, const0_rtx
);
4189 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4192 emit_indirect_jump (rtx loc
)
4194 if (!targetm
.have_indirect_jump ())
4195 sorry ("indirect jumps are not available on this target");
4198 struct expand_operand ops
[1];
4199 create_address_operand (&ops
[0], loc
);
4200 expand_jump_insn (targetm
.code_for_indirect_jump
, 1, ops
);
4206 /* Emit a conditional move instruction if the machine supports one for that
4207 condition and machine mode.
4209 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4210 the mode to use should they be constants. If it is VOIDmode, they cannot
4213 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4214 should be stored there. MODE is the mode to use should they be constants.
4215 If it is VOIDmode, they cannot both be constants.
4217 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4218 is not supported. */
4221 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4222 machine_mode cmode
, rtx op2
, rtx op3
,
4223 machine_mode mode
, int unsignedp
)
4227 enum insn_code icode
;
4228 enum rtx_code reversed
;
4230 /* If the two source operands are identical, that's just a move. */
4232 if (rtx_equal_p (op2
, op3
))
4235 target
= gen_reg_rtx (mode
);
4237 emit_move_insn (target
, op3
);
4241 /* If one operand is constant, make it the second one. Only do this
4242 if the other operand is not constant as well. */
4244 if (swap_commutative_operands_p (op0
, op1
))
4246 std::swap (op0
, op1
);
4247 code
= swap_condition (code
);
4250 /* get_condition will prefer to generate LT and GT even if the old
4251 comparison was against zero, so undo that canonicalization here since
4252 comparisons against zero are cheaper. */
4253 if (code
== LT
&& op1
== const1_rtx
)
4254 code
= LE
, op1
= const0_rtx
;
4255 else if (code
== GT
&& op1
== constm1_rtx
)
4256 code
= GE
, op1
= const0_rtx
;
4258 if (cmode
== VOIDmode
)
4259 cmode
= GET_MODE (op0
);
4261 if (swap_commutative_operands_p (op2
, op3
)
4262 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4265 std::swap (op2
, op3
);
4269 if (mode
== VOIDmode
)
4270 mode
= GET_MODE (op2
);
4272 icode
= direct_optab_handler (movcc_optab
, mode
);
4274 if (icode
== CODE_FOR_nothing
)
4278 target
= gen_reg_rtx (mode
);
4280 code
= unsignedp
? unsigned_condition (code
) : code
;
4281 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4283 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4284 return NULL and let the caller figure out how best to deal with this
4286 if (!COMPARISON_P (comparison
))
4289 saved_pending_stack_adjust save
;
4290 save_pending_stack_adjust (&save
);
4291 last
= get_last_insn ();
4292 do_pending_stack_adjust ();
4293 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4294 GET_CODE (comparison
), NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4295 &comparison
, &cmode
);
4298 struct expand_operand ops
[4];
4300 create_output_operand (&ops
[0], target
, mode
);
4301 create_fixed_operand (&ops
[1], comparison
);
4302 create_input_operand (&ops
[2], op2
, mode
);
4303 create_input_operand (&ops
[3], op3
, mode
);
4304 if (maybe_expand_insn (icode
, 4, ops
))
4306 if (ops
[0].value
!= target
)
4307 convert_move (target
, ops
[0].value
, false);
4311 delete_insns_since (last
);
4312 restore_pending_stack_adjust (&save
);
4317 /* Emit a conditional negate or bitwise complement using the
4318 negcc or notcc optabs if available. Return NULL_RTX if such operations
4319 are not available. Otherwise return the RTX holding the result.
4320 TARGET is the desired destination of the result. COMP is the comparison
4321 on which to negate. If COND is true move into TARGET the negation
4322 or bitwise complement of OP1. Otherwise move OP2 into TARGET.
4323 CODE is either NEG or NOT. MODE is the machine mode in which the
4324 operation is performed. */
4327 emit_conditional_neg_or_complement (rtx target
, rtx_code code
,
4328 machine_mode mode
, rtx cond
, rtx op1
,
4331 optab op
= unknown_optab
;
4334 else if (code
== NOT
)
4339 insn_code icode
= direct_optab_handler (op
, mode
);
4341 if (icode
== CODE_FOR_nothing
)
4345 target
= gen_reg_rtx (mode
);
4347 rtx_insn
*last
= get_last_insn ();
4348 struct expand_operand ops
[4];
4350 create_output_operand (&ops
[0], target
, mode
);
4351 create_fixed_operand (&ops
[1], cond
);
4352 create_input_operand (&ops
[2], op1
, mode
);
4353 create_input_operand (&ops
[3], op2
, mode
);
4355 if (maybe_expand_insn (icode
, 4, ops
))
4357 if (ops
[0].value
!= target
)
4358 convert_move (target
, ops
[0].value
, false);
4362 delete_insns_since (last
);
4366 /* Emit a conditional addition instruction if the machine supports one for that
4367 condition and machine mode.
4369 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4370 the mode to use should they be constants. If it is VOIDmode, they cannot
4373 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
4374 should be stored there. MODE is the mode to use should they be constants.
4375 If it is VOIDmode, they cannot both be constants.
4377 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4378 is not supported. */
4381 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4382 machine_mode cmode
, rtx op2
, rtx op3
,
4383 machine_mode mode
, int unsignedp
)
4387 enum insn_code icode
;
4389 /* If one operand is constant, make it the second one. Only do this
4390 if the other operand is not constant as well. */
4392 if (swap_commutative_operands_p (op0
, op1
))
4394 std::swap (op0
, op1
);
4395 code
= swap_condition (code
);
4398 /* get_condition will prefer to generate LT and GT even if the old
4399 comparison was against zero, so undo that canonicalization here since
4400 comparisons against zero are cheaper. */
4401 if (code
== LT
&& op1
== const1_rtx
)
4402 code
= LE
, op1
= const0_rtx
;
4403 else if (code
== GT
&& op1
== constm1_rtx
)
4404 code
= GE
, op1
= const0_rtx
;
4406 if (cmode
== VOIDmode
)
4407 cmode
= GET_MODE (op0
);
4409 if (mode
== VOIDmode
)
4410 mode
= GET_MODE (op2
);
4412 icode
= optab_handler (addcc_optab
, mode
);
4414 if (icode
== CODE_FOR_nothing
)
4418 target
= gen_reg_rtx (mode
);
4420 code
= unsignedp
? unsigned_condition (code
) : code
;
4421 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4423 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4424 return NULL and let the caller figure out how best to deal with this
4426 if (!COMPARISON_P (comparison
))
4429 do_pending_stack_adjust ();
4430 last
= get_last_insn ();
4431 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4432 GET_CODE (comparison
), NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4433 &comparison
, &cmode
);
4436 struct expand_operand ops
[4];
4438 create_output_operand (&ops
[0], target
, mode
);
4439 create_fixed_operand (&ops
[1], comparison
);
4440 create_input_operand (&ops
[2], op2
, mode
);
4441 create_input_operand (&ops
[3], op3
, mode
);
4442 if (maybe_expand_insn (icode
, 4, ops
))
4444 if (ops
[0].value
!= target
)
4445 convert_move (target
, ops
[0].value
, false);
4449 delete_insns_since (last
);
4453 /* These functions attempt to generate an insn body, rather than
4454 emitting the insn, but if the gen function already emits them, we
4455 make no attempt to turn them back into naked patterns. */
4457 /* Generate and return an insn body to add Y to X. */
4460 gen_add2_insn (rtx x
, rtx y
)
4462 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (x
));
4464 gcc_assert (insn_operand_matches (icode
, 0, x
));
4465 gcc_assert (insn_operand_matches (icode
, 1, x
));
4466 gcc_assert (insn_operand_matches (icode
, 2, y
));
4468 return GEN_FCN (icode
) (x
, x
, y
);
4471 /* Generate and return an insn body to add r1 and c,
4472 storing the result in r0. */
4475 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4477 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (r0
));
4479 if (icode
== CODE_FOR_nothing
4480 || !insn_operand_matches (icode
, 0, r0
)
4481 || !insn_operand_matches (icode
, 1, r1
)
4482 || !insn_operand_matches (icode
, 2, c
))
4485 return GEN_FCN (icode
) (r0
, r1
, c
);
4489 have_add2_insn (rtx x
, rtx y
)
4491 enum insn_code icode
;
4493 gcc_assert (GET_MODE (x
) != VOIDmode
);
4495 icode
= optab_handler (add_optab
, GET_MODE (x
));
4497 if (icode
== CODE_FOR_nothing
)
4500 if (!insn_operand_matches (icode
, 0, x
)
4501 || !insn_operand_matches (icode
, 1, x
)
4502 || !insn_operand_matches (icode
, 2, y
))
4508 /* Generate and return an insn body to add Y to X. */
4511 gen_addptr3_insn (rtx x
, rtx y
, rtx z
)
4513 enum insn_code icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
4515 gcc_assert (insn_operand_matches (icode
, 0, x
));
4516 gcc_assert (insn_operand_matches (icode
, 1, y
));
4517 gcc_assert (insn_operand_matches (icode
, 2, z
));
4519 return GEN_FCN (icode
) (x
, y
, z
);
4522 /* Return true if the target implements an addptr pattern and X, Y,
4523 and Z are valid for the pattern predicates. */
4526 have_addptr3_insn (rtx x
, rtx y
, rtx z
)
4528 enum insn_code icode
;
4530 gcc_assert (GET_MODE (x
) != VOIDmode
);
4532 icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
4534 if (icode
== CODE_FOR_nothing
)
4537 if (!insn_operand_matches (icode
, 0, x
)
4538 || !insn_operand_matches (icode
, 1, y
)
4539 || !insn_operand_matches (icode
, 2, z
))
4545 /* Generate and return an insn body to subtract Y from X. */
4548 gen_sub2_insn (rtx x
, rtx y
)
4550 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (x
));
4552 gcc_assert (insn_operand_matches (icode
, 0, x
));
4553 gcc_assert (insn_operand_matches (icode
, 1, x
));
4554 gcc_assert (insn_operand_matches (icode
, 2, y
));
4556 return GEN_FCN (icode
) (x
, x
, y
);
4559 /* Generate and return an insn body to subtract r1 and c,
4560 storing the result in r0. */
4563 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4565 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (r0
));
4567 if (icode
== CODE_FOR_nothing
4568 || !insn_operand_matches (icode
, 0, r0
)
4569 || !insn_operand_matches (icode
, 1, r1
)
4570 || !insn_operand_matches (icode
, 2, c
))
4573 return GEN_FCN (icode
) (r0
, r1
, c
);
4577 have_sub2_insn (rtx x
, rtx y
)
4579 enum insn_code icode
;
4581 gcc_assert (GET_MODE (x
) != VOIDmode
);
4583 icode
= optab_handler (sub_optab
, GET_MODE (x
));
4585 if (icode
== CODE_FOR_nothing
)
4588 if (!insn_operand_matches (icode
, 0, x
)
4589 || !insn_operand_matches (icode
, 1, x
)
4590 || !insn_operand_matches (icode
, 2, y
))
4596 /* Generate the body of an insn to extend Y (with mode MFROM)
4597 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4600 gen_extend_insn (rtx x
, rtx y
, machine_mode mto
,
4601 machine_mode mfrom
, int unsignedp
)
4603 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4604 return GEN_FCN (icode
) (x
, y
);
4607 /* Generate code to convert FROM to floating point
4608 and store in TO. FROM must be fixed point and not VOIDmode.
4609 UNSIGNEDP nonzero means regard FROM as unsigned.
4610 Normally this is done by correcting the final value
4611 if it is negative. */
4614 expand_float (rtx to
, rtx from
, int unsignedp
)
4616 enum insn_code icode
;
4618 machine_mode fmode
, imode
;
4619 bool can_do_signed
= false;
4621 /* Crash now, because we won't be able to decide which mode to use. */
4622 gcc_assert (GET_MODE (from
) != VOIDmode
);
4624 /* Look for an insn to do the conversion. Do it in the specified
4625 modes if possible; otherwise convert either input, output or both to
4626 wider mode. If the integer mode is wider than the mode of FROM,
4627 we can do the conversion signed even if the input is unsigned. */
4629 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4630 fmode
= GET_MODE_WIDER_MODE (fmode
))
4631 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4632 imode
= GET_MODE_WIDER_MODE (imode
))
4634 int doing_unsigned
= unsignedp
;
4636 if (fmode
!= GET_MODE (to
)
4637 && significand_size (fmode
) < GET_MODE_PRECISION (GET_MODE (from
)))
4640 icode
= can_float_p (fmode
, imode
, unsignedp
);
4641 if (icode
== CODE_FOR_nothing
&& unsignedp
)
4643 enum insn_code scode
= can_float_p (fmode
, imode
, 0);
4644 if (scode
!= CODE_FOR_nothing
)
4645 can_do_signed
= true;
4646 if (imode
!= GET_MODE (from
))
4647 icode
= scode
, doing_unsigned
= 0;
4650 if (icode
!= CODE_FOR_nothing
)
4652 if (imode
!= GET_MODE (from
))
4653 from
= convert_to_mode (imode
, from
, unsignedp
);
4655 if (fmode
!= GET_MODE (to
))
4656 target
= gen_reg_rtx (fmode
);
4658 emit_unop_insn (icode
, target
, from
,
4659 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4662 convert_move (to
, target
, 0);
4667 /* Unsigned integer, and no way to convert directly. Convert as signed,
4668 then unconditionally adjust the result. */
4669 if (unsignedp
&& can_do_signed
)
4671 rtx_code_label
*label
= gen_label_rtx ();
4673 REAL_VALUE_TYPE offset
;
4675 /* Look for a usable floating mode FMODE wider than the source and at
4676 least as wide as the target. Using FMODE will avoid rounding woes
4677 with unsigned values greater than the signed maximum value. */
4679 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4680 fmode
= GET_MODE_WIDER_MODE (fmode
))
4681 if (GET_MODE_PRECISION (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4682 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4685 if (fmode
== VOIDmode
)
4687 /* There is no such mode. Pretend the target is wide enough. */
4688 fmode
= GET_MODE (to
);
4690 /* Avoid double-rounding when TO is narrower than FROM. */
4691 if ((significand_size (fmode
) + 1)
4692 < GET_MODE_PRECISION (GET_MODE (from
)))
4695 rtx_code_label
*neglabel
= gen_label_rtx ();
4697 /* Don't use TARGET if it isn't a register, is a hard register,
4698 or is the wrong mode. */
4700 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4701 || GET_MODE (target
) != fmode
)
4702 target
= gen_reg_rtx (fmode
);
4704 imode
= GET_MODE (from
);
4705 do_pending_stack_adjust ();
4707 /* Test whether the sign bit is set. */
4708 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4711 /* The sign bit is not set. Convert as signed. */
4712 expand_float (target
, from
, 0);
4713 emit_jump_insn (targetm
.gen_jump (label
));
4716 /* The sign bit is set.
4717 Convert to a usable (positive signed) value by shifting right
4718 one bit, while remembering if a nonzero bit was shifted
4719 out; i.e., compute (from & 1) | (from >> 1). */
4721 emit_label (neglabel
);
4722 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4723 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4724 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, 1, NULL_RTX
, 1);
4725 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4727 expand_float (target
, temp
, 0);
4729 /* Multiply by 2 to undo the shift above. */
4730 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4731 target
, 0, OPTAB_LIB_WIDEN
);
4733 emit_move_insn (target
, temp
);
4735 do_pending_stack_adjust ();
4741 /* If we are about to do some arithmetic to correct for an
4742 unsigned operand, do it in a pseudo-register. */
4744 if (GET_MODE (to
) != fmode
4745 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4746 target
= gen_reg_rtx (fmode
);
4748 /* Convert as signed integer to floating. */
4749 expand_float (target
, from
, 0);
4751 /* If FROM is negative (and therefore TO is negative),
4752 correct its value by 2**bitwidth. */
4754 do_pending_stack_adjust ();
4755 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4759 real_2expN (&offset
, GET_MODE_PRECISION (GET_MODE (from
)), fmode
);
4760 temp
= expand_binop (fmode
, add_optab
, target
,
4761 const_double_from_real_value (offset
, fmode
),
4762 target
, 0, OPTAB_LIB_WIDEN
);
4764 emit_move_insn (target
, temp
);
4766 do_pending_stack_adjust ();
4771 /* No hardware instruction available; call a library routine. */
4776 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4778 if (GET_MODE_PRECISION (GET_MODE (from
)) < GET_MODE_PRECISION (SImode
))
4779 from
= convert_to_mode (SImode
, from
, unsignedp
);
4781 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
4782 gcc_assert (libfunc
);
4786 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4787 GET_MODE (to
), 1, from
,
4789 insns
= get_insns ();
4792 emit_libcall_block (insns
, target
, value
,
4793 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FLOAT
: FLOAT
,
4794 GET_MODE (to
), from
));
4799 /* Copy result to requested destination
4800 if we have been computing in a temp location. */
4804 if (GET_MODE (target
) == GET_MODE (to
))
4805 emit_move_insn (to
, target
);
4807 convert_move (to
, target
, 0);
4811 /* Generate code to convert FROM to fixed point and store in TO. FROM
4812 must be floating point. */
4815 expand_fix (rtx to
, rtx from
, int unsignedp
)
4817 enum insn_code icode
;
4819 machine_mode fmode
, imode
;
4820 bool must_trunc
= false;
4822 /* We first try to find a pair of modes, one real and one integer, at
4823 least as wide as FROM and TO, respectively, in which we can open-code
4824 this conversion. If the integer mode is wider than the mode of TO,
4825 we can do the conversion either signed or unsigned. */
4827 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4828 fmode
= GET_MODE_WIDER_MODE (fmode
))
4829 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4830 imode
= GET_MODE_WIDER_MODE (imode
))
4832 int doing_unsigned
= unsignedp
;
4834 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4835 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4836 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4838 if (icode
!= CODE_FOR_nothing
)
4840 rtx_insn
*last
= get_last_insn ();
4841 if (fmode
!= GET_MODE (from
))
4842 from
= convert_to_mode (fmode
, from
, 0);
4846 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4847 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
4851 if (imode
!= GET_MODE (to
))
4852 target
= gen_reg_rtx (imode
);
4854 if (maybe_emit_unop_insn (icode
, target
, from
,
4855 doing_unsigned
? UNSIGNED_FIX
: FIX
))
4858 convert_move (to
, target
, unsignedp
);
4861 delete_insns_since (last
);
4865 /* For an unsigned conversion, there is one more way to do it.
4866 If we have a signed conversion, we generate code that compares
4867 the real value to the largest representable positive number. If if
4868 is smaller, the conversion is done normally. Otherwise, subtract
4869 one plus the highest signed number, convert, and add it back.
4871 We only need to check all real modes, since we know we didn't find
4872 anything with a wider integer mode.
4874 This code used to extend FP value into mode wider than the destination.
4875 This is needed for decimal float modes which cannot accurately
4876 represent one plus the highest signed number of the same size, but
4877 not for binary modes. Consider, for instance conversion from SFmode
4880 The hot path through the code is dealing with inputs smaller than 2^63
4881 and doing just the conversion, so there is no bits to lose.
4883 In the other path we know the value is positive in the range 2^63..2^64-1
4884 inclusive. (as for other input overflow happens and result is undefined)
4885 So we know that the most important bit set in mantissa corresponds to
4886 2^63. The subtraction of 2^63 should not generate any rounding as it
4887 simply clears out that bit. The rest is trivial. */
4889 if (unsignedp
&& GET_MODE_PRECISION (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4890 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4891 fmode
= GET_MODE_WIDER_MODE (fmode
))
4892 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0, &must_trunc
)
4893 && (!DECIMAL_FLOAT_MODE_P (fmode
)
4894 || GET_MODE_BITSIZE (fmode
) > GET_MODE_PRECISION (GET_MODE (to
))))
4897 REAL_VALUE_TYPE offset
;
4899 rtx_code_label
*lab1
, *lab2
;
4902 bitsize
= GET_MODE_PRECISION (GET_MODE (to
));
4903 real_2expN (&offset
, bitsize
- 1, fmode
);
4904 limit
= const_double_from_real_value (offset
, fmode
);
4905 lab1
= gen_label_rtx ();
4906 lab2
= gen_label_rtx ();
4908 if (fmode
!= GET_MODE (from
))
4909 from
= convert_to_mode (fmode
, from
, 0);
4911 /* See if we need to do the subtraction. */
4912 do_pending_stack_adjust ();
4913 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4916 /* If not, do the signed "fix" and branch around fixup code. */
4917 expand_fix (to
, from
, 0);
4918 emit_jump_insn (targetm
.gen_jump (lab2
));
4921 /* Otherwise, subtract 2**(N-1), convert to signed number,
4922 then add 2**(N-1). Do the addition using XOR since this
4923 will often generate better code. */
4925 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4926 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4927 expand_fix (to
, target
, 0);
4928 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4930 (HOST_WIDE_INT_1
<< (bitsize
- 1),
4932 to
, 1, OPTAB_LIB_WIDEN
);
4935 emit_move_insn (to
, target
);
4939 if (optab_handler (mov_optab
, GET_MODE (to
)) != CODE_FOR_nothing
)
4941 /* Make a place for a REG_NOTE and add it. */
4942 insn
= emit_move_insn (to
, to
);
4943 set_dst_reg_note (insn
, REG_EQUAL
,
4944 gen_rtx_fmt_e (UNSIGNED_FIX
, GET_MODE (to
),
4952 /* We can't do it with an insn, so use a library call. But first ensure
4953 that the mode of TO is at least as wide as SImode, since those are the
4954 only library calls we know about. */
4956 if (GET_MODE_PRECISION (GET_MODE (to
)) < GET_MODE_PRECISION (SImode
))
4958 target
= gen_reg_rtx (SImode
);
4960 expand_fix (target
, from
, unsignedp
);
4968 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
4969 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
4970 gcc_assert (libfunc
);
4974 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4975 GET_MODE (to
), 1, from
,
4977 insns
= get_insns ();
4980 emit_libcall_block (insns
, target
, value
,
4981 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4982 GET_MODE (to
), from
));
4987 if (GET_MODE (to
) == GET_MODE (target
))
4988 emit_move_insn (to
, target
);
4990 convert_move (to
, target
, 0);
4995 /* Promote integer arguments for a libcall if necessary.
4996 emit_library_call_value cannot do the promotion because it does not
4997 know if it should do a signed or unsigned promotion. This is because
4998 there are no tree types defined for libcalls. */
5001 prepare_libcall_arg (rtx arg
, int uintp
)
5003 machine_mode mode
= GET_MODE (arg
);
5004 machine_mode arg_mode
;
5005 if (SCALAR_INT_MODE_P (mode
))
5007 /* If we need to promote the integer function argument we need to do
5008 it here instead of inside emit_library_call_value because in
5009 emit_library_call_value we don't know if we should do a signed or
5010 unsigned promotion. */
5013 arg_mode
= promote_function_mode (NULL_TREE
, mode
,
5014 &unsigned_p
, NULL_TREE
, 0);
5015 if (arg_mode
!= mode
)
5016 return convert_to_mode (arg_mode
, arg
, uintp
);
5021 /* Generate code to convert FROM or TO a fixed-point.
5022 If UINTP is true, either TO or FROM is an unsigned integer.
5023 If SATP is true, we need to saturate the result. */
5026 expand_fixed_convert (rtx to
, rtx from
, int uintp
, int satp
)
5028 machine_mode to_mode
= GET_MODE (to
);
5029 machine_mode from_mode
= GET_MODE (from
);
5031 enum rtx_code this_code
;
5032 enum insn_code code
;
5037 if (to_mode
== from_mode
)
5039 emit_move_insn (to
, from
);
5045 tab
= satp
? satfractuns_optab
: fractuns_optab
;
5046 this_code
= satp
? UNSIGNED_SAT_FRACT
: UNSIGNED_FRACT_CONVERT
;
5050 tab
= satp
? satfract_optab
: fract_optab
;
5051 this_code
= satp
? SAT_FRACT
: FRACT_CONVERT
;
5053 code
= convert_optab_handler (tab
, to_mode
, from_mode
);
5054 if (code
!= CODE_FOR_nothing
)
5056 emit_unop_insn (code
, to
, from
, this_code
);
5060 libfunc
= convert_optab_libfunc (tab
, to_mode
, from_mode
);
5061 gcc_assert (libfunc
);
5063 from
= prepare_libcall_arg (from
, uintp
);
5064 from_mode
= GET_MODE (from
);
5067 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, to_mode
,
5068 1, from
, from_mode
);
5069 insns
= get_insns ();
5072 emit_libcall_block (insns
, to
, value
,
5073 gen_rtx_fmt_e (optab_to_code (tab
), to_mode
, from
));
5076 /* Generate code to convert FROM to fixed point and store in TO. FROM
5077 must be floating point, TO must be signed. Use the conversion optab
5078 TAB to do the conversion. */
5081 expand_sfix_optab (rtx to
, rtx from
, convert_optab tab
)
5083 enum insn_code icode
;
5085 machine_mode fmode
, imode
;
5087 /* We first try to find a pair of modes, one real and one integer, at
5088 least as wide as FROM and TO, respectively, in which we can open-code
5089 this conversion. If the integer mode is wider than the mode of TO,
5090 we can do the conversion either signed or unsigned. */
5092 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
5093 fmode
= GET_MODE_WIDER_MODE (fmode
))
5094 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
5095 imode
= GET_MODE_WIDER_MODE (imode
))
5097 icode
= convert_optab_handler (tab
, imode
, fmode
);
5098 if (icode
!= CODE_FOR_nothing
)
5100 rtx_insn
*last
= get_last_insn ();
5101 if (fmode
!= GET_MODE (from
))
5102 from
= convert_to_mode (fmode
, from
, 0);
5104 if (imode
!= GET_MODE (to
))
5105 target
= gen_reg_rtx (imode
);
5107 if (!maybe_emit_unop_insn (icode
, target
, from
, UNKNOWN
))
5109 delete_insns_since (last
);
5113 convert_move (to
, target
, 0);
5121 /* Report whether we have an instruction to perform the operation
5122 specified by CODE on operands of mode MODE. */
5124 have_insn_for (enum rtx_code code
, machine_mode mode
)
5126 return (code_to_optab (code
)
5127 && (optab_handler (code_to_optab (code
), mode
)
5128 != CODE_FOR_nothing
));
5131 /* Print information about the current contents of the optabs on
5135 debug_optab_libfuncs (void)
5139 /* Dump the arithmetic optabs. */
5140 for (i
= FIRST_NORM_OPTAB
; i
<= LAST_NORMLIB_OPTAB
; ++i
)
5141 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5143 rtx l
= optab_libfunc ((optab
) i
, (machine_mode
) j
);
5146 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
5147 fprintf (stderr
, "%s\t%s:\t%s\n",
5148 GET_RTX_NAME (optab_to_code ((optab
) i
)),
5154 /* Dump the conversion optabs. */
5155 for (i
= FIRST_CONV_OPTAB
; i
<= LAST_CONVLIB_OPTAB
; ++i
)
5156 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5157 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
5159 rtx l
= convert_optab_libfunc ((optab
) i
, (machine_mode
) j
,
5163 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
5164 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
5165 GET_RTX_NAME (optab_to_code ((optab
) i
)),
5173 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5174 CODE. Return 0 on failure. */
5177 gen_cond_trap (enum rtx_code code
, rtx op1
, rtx op2
, rtx tcode
)
5179 machine_mode mode
= GET_MODE (op1
);
5180 enum insn_code icode
;
5184 if (mode
== VOIDmode
)
5187 icode
= optab_handler (ctrap_optab
, mode
);
5188 if (icode
== CODE_FOR_nothing
)
5191 /* Some targets only accept a zero trap code. */
5192 if (!insn_operand_matches (icode
, 3, tcode
))
5195 do_pending_stack_adjust ();
5197 prepare_cmp_insn (op1
, op2
, code
, NULL_RTX
, false, OPTAB_DIRECT
,
5202 insn
= GEN_FCN (icode
) (trap_rtx
, XEXP (trap_rtx
, 0), XEXP (trap_rtx
, 1),
5205 /* If that failed, then give up. */
5213 insn
= get_insns ();
5218 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5219 or unsigned operation code. */
5222 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5234 code
= unsignedp
? LTU
: LT
;
5237 code
= unsignedp
? LEU
: LE
;
5240 code
= unsignedp
? GTU
: GT
;
5243 code
= unsignedp
? GEU
: GE
;
5246 case UNORDERED_EXPR
:
5285 /* Return a comparison rtx of mode CMP_MODE for COND. Use UNSIGNEDP to
5286 select signed or unsigned operators. OPNO holds the index of the
5287 first comparison operand for insn ICODE. Do not generate the
5288 compare instruction itself. */
5291 vector_compare_rtx (machine_mode cmp_mode
, enum tree_code tcode
,
5292 tree t_op0
, tree t_op1
, bool unsignedp
,
5293 enum insn_code icode
, unsigned int opno
)
5295 struct expand_operand ops
[2];
5296 rtx rtx_op0
, rtx_op1
;
5297 machine_mode m0
, m1
;
5298 enum rtx_code rcode
= get_rtx_code (tcode
, unsignedp
);
5300 gcc_assert (TREE_CODE_CLASS (tcode
) == tcc_comparison
);
5302 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
5303 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
5304 cases, use the original mode. */
5305 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)),
5307 m0
= GET_MODE (rtx_op0
);
5309 m0
= TYPE_MODE (TREE_TYPE (t_op0
));
5311 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)),
5313 m1
= GET_MODE (rtx_op1
);
5315 m1
= TYPE_MODE (TREE_TYPE (t_op1
));
5317 create_input_operand (&ops
[0], rtx_op0
, m0
);
5318 create_input_operand (&ops
[1], rtx_op1
, m1
);
5319 if (!maybe_legitimize_operands (icode
, opno
, 2, ops
))
5321 return gen_rtx_fmt_ee (rcode
, cmp_mode
, ops
[0].value
, ops
[1].value
);
5324 /* Checks if vec_perm mask SEL is a constant equivalent to a shift of the first
5325 vec_perm operand, assuming the second operand is a constant vector of zeroes.
5326 Return the shift distance in bits if so, or NULL_RTX if the vec_perm is not a
5329 shift_amt_for_vec_perm_mask (rtx sel
)
5331 unsigned int i
, first
, nelt
= GET_MODE_NUNITS (GET_MODE (sel
));
5332 unsigned int bitsize
= GET_MODE_UNIT_BITSIZE (GET_MODE (sel
));
5334 if (GET_CODE (sel
) != CONST_VECTOR
)
5337 first
= INTVAL (CONST_VECTOR_ELT (sel
, 0));
5340 for (i
= 1; i
< nelt
; i
++)
5342 int idx
= INTVAL (CONST_VECTOR_ELT (sel
, i
));
5343 unsigned int expected
= i
+ first
;
5344 /* Indices into the second vector are all equivalent. */
5345 if (idx
< 0 || (MIN (nelt
, (unsigned) idx
) != MIN (nelt
, expected
)))
5349 return GEN_INT (first
* bitsize
);
5352 /* A subroutine of expand_vec_perm for expanding one vec_perm insn. */
5355 expand_vec_perm_1 (enum insn_code icode
, rtx target
,
5356 rtx v0
, rtx v1
, rtx sel
)
5358 machine_mode tmode
= GET_MODE (target
);
5359 machine_mode smode
= GET_MODE (sel
);
5360 struct expand_operand ops
[4];
5362 create_output_operand (&ops
[0], target
, tmode
);
5363 create_input_operand (&ops
[3], sel
, smode
);
5365 /* Make an effort to preserve v0 == v1. The target expander is able to
5366 rely on this to determine if we're permuting a single input operand. */
5367 if (rtx_equal_p (v0
, v1
))
5369 if (!insn_operand_matches (icode
, 1, v0
))
5370 v0
= force_reg (tmode
, v0
);
5371 gcc_checking_assert (insn_operand_matches (icode
, 1, v0
));
5372 gcc_checking_assert (insn_operand_matches (icode
, 2, v0
));
5374 create_fixed_operand (&ops
[1], v0
);
5375 create_fixed_operand (&ops
[2], v0
);
5379 create_input_operand (&ops
[1], v0
, tmode
);
5380 create_input_operand (&ops
[2], v1
, tmode
);
5383 if (maybe_expand_insn (icode
, 4, ops
))
5384 return ops
[0].value
;
5388 /* Generate instructions for vec_perm optab given its mode
5389 and three operands. */
5392 expand_vec_perm (machine_mode mode
, rtx v0
, rtx v1
, rtx sel
, rtx target
)
5394 enum insn_code icode
;
5395 machine_mode qimode
;
5396 unsigned int i
, w
, e
, u
;
5397 rtx tmp
, sel_qi
= NULL
;
5400 if (!target
|| GET_MODE (target
) != mode
)
5401 target
= gen_reg_rtx (mode
);
5403 w
= GET_MODE_SIZE (mode
);
5404 e
= GET_MODE_NUNITS (mode
);
5405 u
= GET_MODE_UNIT_SIZE (mode
);
5407 /* Set QIMODE to a different vector mode with byte elements.
5408 If no such mode, or if MODE already has byte elements, use VOIDmode. */
5410 if (GET_MODE_INNER (mode
) != QImode
)
5412 qimode
= mode_for_vector (QImode
, w
);
5413 if (!VECTOR_MODE_P (qimode
))
5417 /* If the input is a constant, expand it specially. */
5418 gcc_assert (GET_MODE_CLASS (GET_MODE (sel
)) == MODE_VECTOR_INT
);
5419 if (GET_CODE (sel
) == CONST_VECTOR
)
5421 /* See if this can be handled with a vec_shr. We only do this if the
5422 second vector is all zeroes. */
5423 enum insn_code shift_code
= optab_handler (vec_shr_optab
, mode
);
5424 enum insn_code shift_code_qi
= ((qimode
!= VOIDmode
&& qimode
!= mode
)
5425 ? optab_handler (vec_shr_optab
, qimode
)
5426 : CODE_FOR_nothing
);
5427 rtx shift_amt
= NULL_RTX
;
5428 if (v1
== CONST0_RTX (GET_MODE (v1
))
5429 && (shift_code
!= CODE_FOR_nothing
5430 || shift_code_qi
!= CODE_FOR_nothing
))
5432 shift_amt
= shift_amt_for_vec_perm_mask (sel
);
5435 struct expand_operand ops
[3];
5436 if (shift_code
!= CODE_FOR_nothing
)
5438 create_output_operand (&ops
[0], target
, mode
);
5439 create_input_operand (&ops
[1], v0
, mode
);
5440 create_convert_operand_from_type (&ops
[2], shift_amt
,
5442 if (maybe_expand_insn (shift_code
, 3, ops
))
5443 return ops
[0].value
;
5445 if (shift_code_qi
!= CODE_FOR_nothing
)
5447 tmp
= gen_reg_rtx (qimode
);
5448 create_output_operand (&ops
[0], tmp
, qimode
);
5449 create_input_operand (&ops
[1], gen_lowpart (qimode
, v0
),
5451 create_convert_operand_from_type (&ops
[2], shift_amt
,
5453 if (maybe_expand_insn (shift_code_qi
, 3, ops
))
5454 return gen_lowpart (mode
, ops
[0].value
);
5459 icode
= direct_optab_handler (vec_perm_const_optab
, mode
);
5460 if (icode
!= CODE_FOR_nothing
)
5462 tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel
);
5467 /* Fall back to a constant byte-based permutation. */
5468 if (qimode
!= VOIDmode
)
5470 vec
= rtvec_alloc (w
);
5471 for (i
= 0; i
< e
; ++i
)
5473 unsigned int j
, this_e
;
5475 this_e
= INTVAL (CONST_VECTOR_ELT (sel
, i
));
5476 this_e
&= 2 * e
- 1;
5479 for (j
= 0; j
< u
; ++j
)
5480 RTVEC_ELT (vec
, i
* u
+ j
) = GEN_INT (this_e
+ j
);
5482 sel_qi
= gen_rtx_CONST_VECTOR (qimode
, vec
);
5484 icode
= direct_optab_handler (vec_perm_const_optab
, qimode
);
5485 if (icode
!= CODE_FOR_nothing
)
5487 tmp
= mode
!= qimode
? gen_reg_rtx (qimode
) : target
;
5488 tmp
= expand_vec_perm_1 (icode
, tmp
, gen_lowpart (qimode
, v0
),
5489 gen_lowpart (qimode
, v1
), sel_qi
);
5491 return gen_lowpart (mode
, tmp
);
5496 /* Otherwise expand as a fully variable permuation. */
5497 icode
= direct_optab_handler (vec_perm_optab
, mode
);
5498 if (icode
!= CODE_FOR_nothing
)
5500 tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel
);
5505 /* As a special case to aid several targets, lower the element-based
5506 permutation to a byte-based permutation and try again. */
5507 if (qimode
== VOIDmode
)
5509 icode
= direct_optab_handler (vec_perm_optab
, qimode
);
5510 if (icode
== CODE_FOR_nothing
)
5515 /* Multiply each element by its byte size. */
5516 machine_mode selmode
= GET_MODE (sel
);
5518 sel
= expand_simple_binop (selmode
, PLUS
, sel
, sel
,
5519 NULL
, 0, OPTAB_DIRECT
);
5521 sel
= expand_simple_binop (selmode
, ASHIFT
, sel
,
5522 GEN_INT (exact_log2 (u
)),
5523 NULL
, 0, OPTAB_DIRECT
);
5524 gcc_assert (sel
!= NULL
);
5526 /* Broadcast the low byte each element into each of its bytes. */
5527 vec
= rtvec_alloc (w
);
5528 for (i
= 0; i
< w
; ++i
)
5530 int this_e
= i
/ u
* u
;
5531 if (BYTES_BIG_ENDIAN
)
5533 RTVEC_ELT (vec
, i
) = GEN_INT (this_e
);
5535 tmp
= gen_rtx_CONST_VECTOR (qimode
, vec
);
5536 sel
= gen_lowpart (qimode
, sel
);
5537 sel
= expand_vec_perm (qimode
, sel
, sel
, tmp
, NULL
);
5538 gcc_assert (sel
!= NULL
);
5540 /* Add the byte offset to each byte element. */
5541 /* Note that the definition of the indicies here is memory ordering,
5542 so there should be no difference between big and little endian. */
5543 vec
= rtvec_alloc (w
);
5544 for (i
= 0; i
< w
; ++i
)
5545 RTVEC_ELT (vec
, i
) = GEN_INT (i
% u
);
5546 tmp
= gen_rtx_CONST_VECTOR (qimode
, vec
);
5547 sel_qi
= expand_simple_binop (qimode
, PLUS
, sel
, tmp
,
5548 sel
, 0, OPTAB_DIRECT
);
5549 gcc_assert (sel_qi
!= NULL
);
5552 tmp
= mode
!= qimode
? gen_reg_rtx (qimode
) : target
;
5553 tmp
= expand_vec_perm_1 (icode
, tmp
, gen_lowpart (qimode
, v0
),
5554 gen_lowpart (qimode
, v1
), sel_qi
);
5556 tmp
= gen_lowpart (mode
, tmp
);
5560 /* Generate insns for a VEC_COND_EXPR with mask, given its TYPE and its
5564 expand_vec_cond_mask_expr (tree vec_cond_type
, tree op0
, tree op1
, tree op2
,
5567 struct expand_operand ops
[4];
5568 machine_mode mode
= TYPE_MODE (vec_cond_type
);
5569 machine_mode mask_mode
= TYPE_MODE (TREE_TYPE (op0
));
5570 enum insn_code icode
= get_vcond_mask_icode (mode
, mask_mode
);
5571 rtx mask
, rtx_op1
, rtx_op2
;
5573 if (icode
== CODE_FOR_nothing
)
5576 mask
= expand_normal (op0
);
5577 rtx_op1
= expand_normal (op1
);
5578 rtx_op2
= expand_normal (op2
);
5580 mask
= force_reg (mask_mode
, mask
);
5581 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
5583 create_output_operand (&ops
[0], target
, mode
);
5584 create_input_operand (&ops
[1], rtx_op1
, mode
);
5585 create_input_operand (&ops
[2], rtx_op2
, mode
);
5586 create_input_operand (&ops
[3], mask
, mask_mode
);
5587 expand_insn (icode
, 4, ops
);
5589 return ops
[0].value
;
5592 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
5596 expand_vec_cond_expr (tree vec_cond_type
, tree op0
, tree op1
, tree op2
,
5599 struct expand_operand ops
[6];
5600 enum insn_code icode
;
5601 rtx comparison
, rtx_op1
, rtx_op2
;
5602 machine_mode mode
= TYPE_MODE (vec_cond_type
);
5603 machine_mode cmp_op_mode
;
5606 enum tree_code tcode
;
5608 if (COMPARISON_CLASS_P (op0
))
5610 op0a
= TREE_OPERAND (op0
, 0);
5611 op0b
= TREE_OPERAND (op0
, 1);
5612 tcode
= TREE_CODE (op0
);
5616 gcc_assert (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (op0
)));
5617 if (get_vcond_mask_icode (mode
, TYPE_MODE (TREE_TYPE (op0
)))
5618 != CODE_FOR_nothing
)
5619 return expand_vec_cond_mask_expr (vec_cond_type
, op0
, op1
,
5624 gcc_assert (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (op0
)))
5625 == MODE_VECTOR_INT
);
5627 op0b
= build_zero_cst (TREE_TYPE (op0
));
5631 cmp_op_mode
= TYPE_MODE (TREE_TYPE (op0a
));
5632 unsignedp
= TYPE_UNSIGNED (TREE_TYPE (op0a
));
5635 gcc_assert (GET_MODE_SIZE (mode
) == GET_MODE_SIZE (cmp_op_mode
)
5636 && GET_MODE_NUNITS (mode
) == GET_MODE_NUNITS (cmp_op_mode
));
5638 icode
= get_vcond_icode (mode
, cmp_op_mode
, unsignedp
);
5639 if (icode
== CODE_FOR_nothing
)
5641 if (tcode
== EQ_EXPR
|| tcode
== NE_EXPR
)
5642 icode
= get_vcond_eq_icode (mode
, cmp_op_mode
);
5643 if (icode
== CODE_FOR_nothing
)
5647 comparison
= vector_compare_rtx (VOIDmode
, tcode
, op0a
, op0b
, unsignedp
,
5649 rtx_op1
= expand_normal (op1
);
5650 rtx_op2
= expand_normal (op2
);
5652 create_output_operand (&ops
[0], target
, mode
);
5653 create_input_operand (&ops
[1], rtx_op1
, mode
);
5654 create_input_operand (&ops
[2], rtx_op2
, mode
);
5655 create_fixed_operand (&ops
[3], comparison
);
5656 create_fixed_operand (&ops
[4], XEXP (comparison
, 0));
5657 create_fixed_operand (&ops
[5], XEXP (comparison
, 1));
5658 expand_insn (icode
, 6, ops
);
5659 return ops
[0].value
;
5662 /* Generate insns for a vector comparison into a mask. */
5665 expand_vec_cmp_expr (tree type
, tree exp
, rtx target
)
5667 struct expand_operand ops
[4];
5668 enum insn_code icode
;
5670 machine_mode mask_mode
= TYPE_MODE (type
);
5674 enum tree_code tcode
;
5676 op0a
= TREE_OPERAND (exp
, 0);
5677 op0b
= TREE_OPERAND (exp
, 1);
5678 tcode
= TREE_CODE (exp
);
5680 unsignedp
= TYPE_UNSIGNED (TREE_TYPE (op0a
));
5681 vmode
= TYPE_MODE (TREE_TYPE (op0a
));
5683 icode
= get_vec_cmp_icode (vmode
, mask_mode
, unsignedp
);
5684 if (icode
== CODE_FOR_nothing
)
5686 if (tcode
== EQ_EXPR
|| tcode
== NE_EXPR
)
5687 icode
= get_vec_cmp_eq_icode (vmode
, mask_mode
);
5688 if (icode
== CODE_FOR_nothing
)
5692 comparison
= vector_compare_rtx (mask_mode
, tcode
, op0a
, op0b
,
5693 unsignedp
, icode
, 2);
5694 create_output_operand (&ops
[0], target
, mask_mode
);
5695 create_fixed_operand (&ops
[1], comparison
);
5696 create_fixed_operand (&ops
[2], XEXP (comparison
, 0));
5697 create_fixed_operand (&ops
[3], XEXP (comparison
, 1));
5698 expand_insn (icode
, 4, ops
);
5699 return ops
[0].value
;
5702 /* Expand a highpart multiply. */
5705 expand_mult_highpart (machine_mode mode
, rtx op0
, rtx op1
,
5706 rtx target
, bool uns_p
)
5708 struct expand_operand eops
[3];
5709 enum insn_code icode
;
5710 int method
, i
, nunits
;
5716 method
= can_mult_highpart_p (mode
, uns_p
);
5722 tab1
= uns_p
? umul_highpart_optab
: smul_highpart_optab
;
5723 return expand_binop (mode
, tab1
, op0
, op1
, target
, uns_p
,
5726 tab1
= uns_p
? vec_widen_umult_even_optab
: vec_widen_smult_even_optab
;
5727 tab2
= uns_p
? vec_widen_umult_odd_optab
: vec_widen_smult_odd_optab
;
5730 tab1
= uns_p
? vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
5731 tab2
= uns_p
? vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
5732 if (BYTES_BIG_ENDIAN
)
5733 std::swap (tab1
, tab2
);
5739 icode
= optab_handler (tab1
, mode
);
5740 nunits
= GET_MODE_NUNITS (mode
);
5741 wmode
= insn_data
[icode
].operand
[0].mode
;
5742 gcc_checking_assert (2 * GET_MODE_NUNITS (wmode
) == nunits
);
5743 gcc_checking_assert (GET_MODE_SIZE (wmode
) == GET_MODE_SIZE (mode
));
5745 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
5746 create_input_operand (&eops
[1], op0
, mode
);
5747 create_input_operand (&eops
[2], op1
, mode
);
5748 expand_insn (icode
, 3, eops
);
5749 m1
= gen_lowpart (mode
, eops
[0].value
);
5751 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
5752 create_input_operand (&eops
[1], op0
, mode
);
5753 create_input_operand (&eops
[2], op1
, mode
);
5754 expand_insn (optab_handler (tab2
, mode
), 3, eops
);
5755 m2
= gen_lowpart (mode
, eops
[0].value
);
5757 v
= rtvec_alloc (nunits
);
5760 for (i
= 0; i
< nunits
; ++i
)
5761 RTVEC_ELT (v
, i
) = GEN_INT (!BYTES_BIG_ENDIAN
+ (i
& ~1)
5762 + ((i
& 1) ? nunits
: 0));
5766 for (i
= 0; i
< nunits
; ++i
)
5767 RTVEC_ELT (v
, i
) = GEN_INT (2 * i
+ (BYTES_BIG_ENDIAN
? 0 : 1));
5769 perm
= gen_rtx_CONST_VECTOR (mode
, v
);
5771 return expand_vec_perm (mode
, m1
, m2
, perm
, target
);
5774 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
5778 find_cc_set (rtx x
, const_rtx pat
, void *data
)
5780 if (REG_P (x
) && GET_MODE_CLASS (GET_MODE (x
)) == MODE_CC
5781 && GET_CODE (pat
) == SET
)
5783 rtx
*p_cc_reg
= (rtx
*) data
;
5784 gcc_assert (!*p_cc_reg
);
5789 /* This is a helper function for the other atomic operations. This function
5790 emits a loop that contains SEQ that iterates until a compare-and-swap
5791 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5792 a set of instructions that takes a value from OLD_REG as an input and
5793 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5794 set to the current contents of MEM. After SEQ, a compare-and-swap will
5795 attempt to update MEM with NEW_REG. The function returns true when the
5796 loop was generated successfully. */
5799 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
5801 machine_mode mode
= GET_MODE (mem
);
5802 rtx_code_label
*label
;
5803 rtx cmp_reg
, success
, oldval
;
5805 /* The loop we want to generate looks like
5811 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
5815 Note that we only do the plain load from memory once. Subsequent
5816 iterations use the value loaded by the compare-and-swap pattern. */
5818 label
= gen_label_rtx ();
5819 cmp_reg
= gen_reg_rtx (mode
);
5821 emit_move_insn (cmp_reg
, mem
);
5823 emit_move_insn (old_reg
, cmp_reg
);
5829 if (!expand_atomic_compare_and_swap (&success
, &oldval
, mem
, old_reg
,
5830 new_reg
, false, MEMMODEL_SYNC_SEQ_CST
,
5834 if (oldval
!= cmp_reg
)
5835 emit_move_insn (cmp_reg
, oldval
);
5837 /* Mark this jump predicted not taken. */
5838 emit_cmp_and_jump_insns (success
, const0_rtx
, EQ
, const0_rtx
,
5839 GET_MODE (success
), 1, label
, 0);
5844 /* This function tries to emit an atomic_exchange intruction. VAL is written
5845 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
5846 using TARGET if possible. */
5849 maybe_emit_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
5851 machine_mode mode
= GET_MODE (mem
);
5852 enum insn_code icode
;
5854 /* If the target supports the exchange directly, great. */
5855 icode
= direct_optab_handler (atomic_exchange_optab
, mode
);
5856 if (icode
!= CODE_FOR_nothing
)
5858 struct expand_operand ops
[4];
5860 create_output_operand (&ops
[0], target
, mode
);
5861 create_fixed_operand (&ops
[1], mem
);
5862 create_input_operand (&ops
[2], val
, mode
);
5863 create_integer_operand (&ops
[3], model
);
5864 if (maybe_expand_insn (icode
, 4, ops
))
5865 return ops
[0].value
;
5871 /* This function tries to implement an atomic exchange operation using
5872 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
5873 The previous contents of *MEM are returned, using TARGET if possible.
5874 Since this instructionn is an acquire barrier only, stronger memory
5875 models may require additional barriers to be emitted. */
5878 maybe_emit_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
,
5879 enum memmodel model
)
5881 machine_mode mode
= GET_MODE (mem
);
5882 enum insn_code icode
;
5883 rtx_insn
*last_insn
= get_last_insn ();
5885 icode
= optab_handler (sync_lock_test_and_set_optab
, mode
);
5887 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
5888 exists, and the memory model is stronger than acquire, add a release
5889 barrier before the instruction. */
5891 if (is_mm_seq_cst (model
) || is_mm_release (model
) || is_mm_acq_rel (model
))
5892 expand_mem_thread_fence (model
);
5894 if (icode
!= CODE_FOR_nothing
)
5896 struct expand_operand ops
[3];
5897 create_output_operand (&ops
[0], target
, mode
);
5898 create_fixed_operand (&ops
[1], mem
);
5899 create_input_operand (&ops
[2], val
, mode
);
5900 if (maybe_expand_insn (icode
, 3, ops
))
5901 return ops
[0].value
;
5904 /* If an external test-and-set libcall is provided, use that instead of
5905 any external compare-and-swap that we might get from the compare-and-
5906 swap-loop expansion later. */
5907 if (!can_compare_and_swap_p (mode
, false))
5909 rtx libfunc
= optab_libfunc (sync_lock_test_and_set_optab
, mode
);
5910 if (libfunc
!= NULL
)
5914 addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
5915 return emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
5916 mode
, 2, addr
, ptr_mode
,
5921 /* If the test_and_set can't be emitted, eliminate any barrier that might
5922 have been emitted. */
5923 delete_insns_since (last_insn
);
5927 /* This function tries to implement an atomic exchange operation using a
5928 compare_and_swap loop. VAL is written to *MEM. The previous contents of
5929 *MEM are returned, using TARGET if possible. No memory model is required
5930 since a compare_and_swap loop is seq-cst. */
5933 maybe_emit_compare_and_swap_exchange_loop (rtx target
, rtx mem
, rtx val
)
5935 machine_mode mode
= GET_MODE (mem
);
5937 if (can_compare_and_swap_p (mode
, true))
5939 if (!target
|| !register_operand (target
, mode
))
5940 target
= gen_reg_rtx (mode
);
5941 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
5948 /* This function tries to implement an atomic test-and-set operation
5949 using the atomic_test_and_set instruction pattern. A boolean value
5950 is returned from the operation, using TARGET if possible. */
5953 maybe_emit_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
5955 machine_mode pat_bool_mode
;
5956 struct expand_operand ops
[3];
5958 if (!targetm
.have_atomic_test_and_set ())
5961 /* While we always get QImode from __atomic_test_and_set, we get
5962 other memory modes from __sync_lock_test_and_set. Note that we
5963 use no endian adjustment here. This matches the 4.6 behavior
5964 in the Sparc backend. */
5965 enum insn_code icode
= targetm
.code_for_atomic_test_and_set
;
5966 gcc_checking_assert (insn_data
[icode
].operand
[1].mode
== QImode
);
5967 if (GET_MODE (mem
) != QImode
)
5968 mem
= adjust_address_nv (mem
, QImode
, 0);
5970 pat_bool_mode
= insn_data
[icode
].operand
[0].mode
;
5971 create_output_operand (&ops
[0], target
, pat_bool_mode
);
5972 create_fixed_operand (&ops
[1], mem
);
5973 create_integer_operand (&ops
[2], model
);
5975 if (maybe_expand_insn (icode
, 3, ops
))
5976 return ops
[0].value
;
5980 /* This function expands the legacy _sync_lock test_and_set operation which is
5981 generally an atomic exchange. Some limited targets only allow the
5982 constant 1 to be stored. This is an ACQUIRE operation.
5984 TARGET is an optional place to stick the return value.
5985 MEM is where VAL is stored. */
5988 expand_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
)
5992 /* Try an atomic_exchange first. */
5993 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, MEMMODEL_SYNC_ACQUIRE
);
5997 ret
= maybe_emit_sync_lock_test_and_set (target
, mem
, val
,
5998 MEMMODEL_SYNC_ACQUIRE
);
6002 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
6006 /* If there are no other options, try atomic_test_and_set if the value
6007 being stored is 1. */
6008 if (val
== const1_rtx
)
6009 ret
= maybe_emit_atomic_test_and_set (target
, mem
, MEMMODEL_SYNC_ACQUIRE
);
6014 /* This function expands the atomic test_and_set operation:
6015 atomically store a boolean TRUE into MEM and return the previous value.
6017 MEMMODEL is the memory model variant to use.
6018 TARGET is an optional place to stick the return value. */
6021 expand_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
6023 machine_mode mode
= GET_MODE (mem
);
6024 rtx ret
, trueval
, subtarget
;
6026 ret
= maybe_emit_atomic_test_and_set (target
, mem
, model
);
6030 /* Be binary compatible with non-default settings of trueval, and different
6031 cpu revisions. E.g. one revision may have atomic-test-and-set, but
6032 another only has atomic-exchange. */
6033 if (targetm
.atomic_test_and_set_trueval
== 1)
6035 trueval
= const1_rtx
;
6036 subtarget
= target
? target
: gen_reg_rtx (mode
);
6040 trueval
= gen_int_mode (targetm
.atomic_test_and_set_trueval
, mode
);
6041 subtarget
= gen_reg_rtx (mode
);
6044 /* Try the atomic-exchange optab... */
6045 ret
= maybe_emit_atomic_exchange (subtarget
, mem
, trueval
, model
);
6047 /* ... then an atomic-compare-and-swap loop ... */
6049 ret
= maybe_emit_compare_and_swap_exchange_loop (subtarget
, mem
, trueval
);
6051 /* ... before trying the vaguely defined legacy lock_test_and_set. */
6053 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, trueval
, model
);
6055 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
6056 things with the value 1. Thus we try again without trueval. */
6057 if (!ret
&& targetm
.atomic_test_and_set_trueval
!= 1)
6058 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, const1_rtx
, model
);
6060 /* Failing all else, assume a single threaded environment and simply
6061 perform the operation. */
6064 /* If the result is ignored skip the move to target. */
6065 if (subtarget
!= const0_rtx
)
6066 emit_move_insn (subtarget
, mem
);
6068 emit_move_insn (mem
, trueval
);
6072 /* Recall that have to return a boolean value; rectify if trueval
6073 is not exactly one. */
6074 if (targetm
.atomic_test_and_set_trueval
!= 1)
6075 ret
= emit_store_flag_force (target
, NE
, ret
, const0_rtx
, mode
, 0, 1);
6080 /* This function expands the atomic exchange operation:
6081 atomically store VAL in MEM and return the previous value in MEM.
6083 MEMMODEL is the memory model variant to use.
6084 TARGET is an optional place to stick the return value. */
6087 expand_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
6089 machine_mode mode
= GET_MODE (mem
);
6092 /* If loads are not atomic for the required size and we are not called to
6093 provide a __sync builtin, do not do anything so that we stay consistent
6094 with atomic loads of the same size. */
6095 if (!can_atomic_load_p (mode
) && !is_mm_sync (model
))
6098 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6100 /* Next try a compare-and-swap loop for the exchange. */
6102 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
6107 /* This function expands the atomic compare exchange operation:
6109 *PTARGET_BOOL is an optional place to store the boolean success/failure.
6110 *PTARGET_OVAL is an optional place to store the old value from memory.
6111 Both target parameters may be NULL or const0_rtx to indicate that we do
6112 not care about that return value. Both target parameters are updated on
6113 success to the actual location of the corresponding result.
6115 MEMMODEL is the memory model variant to use.
6117 The return value of the function is true for success. */
6120 expand_atomic_compare_and_swap (rtx
*ptarget_bool
, rtx
*ptarget_oval
,
6121 rtx mem
, rtx expected
, rtx desired
,
6122 bool is_weak
, enum memmodel succ_model
,
6123 enum memmodel fail_model
)
6125 machine_mode mode
= GET_MODE (mem
);
6126 struct expand_operand ops
[8];
6127 enum insn_code icode
;
6128 rtx target_oval
, target_bool
= NULL_RTX
;
6131 /* If loads are not atomic for the required size and we are not called to
6132 provide a __sync builtin, do not do anything so that we stay consistent
6133 with atomic loads of the same size. */
6134 if (!can_atomic_load_p (mode
) && !is_mm_sync (succ_model
))
6137 /* Load expected into a register for the compare and swap. */
6138 if (MEM_P (expected
))
6139 expected
= copy_to_reg (expected
);
6141 /* Make sure we always have some place to put the return oldval.
6142 Further, make sure that place is distinct from the input expected,
6143 just in case we need that path down below. */
6144 if (ptarget_oval
&& *ptarget_oval
== const0_rtx
)
6145 ptarget_oval
= NULL
;
6147 if (ptarget_oval
== NULL
6148 || (target_oval
= *ptarget_oval
) == NULL
6149 || reg_overlap_mentioned_p (expected
, target_oval
))
6150 target_oval
= gen_reg_rtx (mode
);
6152 icode
= direct_optab_handler (atomic_compare_and_swap_optab
, mode
);
6153 if (icode
!= CODE_FOR_nothing
)
6155 machine_mode bool_mode
= insn_data
[icode
].operand
[0].mode
;
6157 if (ptarget_bool
&& *ptarget_bool
== const0_rtx
)
6158 ptarget_bool
= NULL
;
6160 /* Make sure we always have a place for the bool operand. */
6161 if (ptarget_bool
== NULL
6162 || (target_bool
= *ptarget_bool
) == NULL
6163 || GET_MODE (target_bool
) != bool_mode
)
6164 target_bool
= gen_reg_rtx (bool_mode
);
6166 /* Emit the compare_and_swap. */
6167 create_output_operand (&ops
[0], target_bool
, bool_mode
);
6168 create_output_operand (&ops
[1], target_oval
, mode
);
6169 create_fixed_operand (&ops
[2], mem
);
6170 create_input_operand (&ops
[3], expected
, mode
);
6171 create_input_operand (&ops
[4], desired
, mode
);
6172 create_integer_operand (&ops
[5], is_weak
);
6173 create_integer_operand (&ops
[6], succ_model
);
6174 create_integer_operand (&ops
[7], fail_model
);
6175 if (maybe_expand_insn (icode
, 8, ops
))
6177 /* Return success/failure. */
6178 target_bool
= ops
[0].value
;
6179 target_oval
= ops
[1].value
;
6184 /* Otherwise fall back to the original __sync_val_compare_and_swap
6185 which is always seq-cst. */
6186 icode
= optab_handler (sync_compare_and_swap_optab
, mode
);
6187 if (icode
!= CODE_FOR_nothing
)
6191 create_output_operand (&ops
[0], target_oval
, mode
);
6192 create_fixed_operand (&ops
[1], mem
);
6193 create_input_operand (&ops
[2], expected
, mode
);
6194 create_input_operand (&ops
[3], desired
, mode
);
6195 if (!maybe_expand_insn (icode
, 4, ops
))
6198 target_oval
= ops
[0].value
;
6200 /* If the caller isn't interested in the boolean return value,
6201 skip the computation of it. */
6202 if (ptarget_bool
== NULL
)
6205 /* Otherwise, work out if the compare-and-swap succeeded. */
6207 if (have_insn_for (COMPARE
, CCmode
))
6208 note_stores (PATTERN (get_last_insn ()), find_cc_set
, &cc_reg
);
6211 target_bool
= emit_store_flag_force (target_bool
, EQ
, cc_reg
,
6212 const0_rtx
, VOIDmode
, 0, 1);
6215 goto success_bool_from_val
;
6218 /* Also check for library support for __sync_val_compare_and_swap. */
6219 libfunc
= optab_libfunc (sync_compare_and_swap_optab
, mode
);
6220 if (libfunc
!= NULL
)
6222 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
6223 rtx target
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
6224 mode
, 3, addr
, ptr_mode
,
6225 expected
, mode
, desired
, mode
);
6226 emit_move_insn (target_oval
, target
);
6228 /* Compute the boolean return value only if requested. */
6230 goto success_bool_from_val
;
6238 success_bool_from_val
:
6239 target_bool
= emit_store_flag_force (target_bool
, EQ
, target_oval
,
6240 expected
, VOIDmode
, 1, 1);
6242 /* Make sure that the oval output winds up where the caller asked. */
6244 *ptarget_oval
= target_oval
;
6246 *ptarget_bool
= target_bool
;
6250 /* Generate asm volatile("" : : : "memory") as the memory barrier. */
6253 expand_asm_memory_barrier (void)
6257 asm_op
= gen_rtx_ASM_OPERANDS (VOIDmode
, empty_string
, empty_string
, 0,
6258 rtvec_alloc (0), rtvec_alloc (0),
6259 rtvec_alloc (0), UNKNOWN_LOCATION
);
6260 MEM_VOLATILE_P (asm_op
) = 1;
6262 clob
= gen_rtx_SCRATCH (VOIDmode
);
6263 clob
= gen_rtx_MEM (BLKmode
, clob
);
6264 clob
= gen_rtx_CLOBBER (VOIDmode
, clob
);
6266 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, asm_op
, clob
)));
6269 /* This routine will either emit the mem_thread_fence pattern or issue a
6270 sync_synchronize to generate a fence for memory model MEMMODEL. */
6273 expand_mem_thread_fence (enum memmodel model
)
6275 if (targetm
.have_mem_thread_fence ())
6276 emit_insn (targetm
.gen_mem_thread_fence (GEN_INT (model
)));
6277 else if (!is_mm_relaxed (model
))
6279 if (targetm
.have_memory_barrier ())
6280 emit_insn (targetm
.gen_memory_barrier ());
6281 else if (synchronize_libfunc
!= NULL_RTX
)
6282 emit_library_call (synchronize_libfunc
, LCT_NORMAL
, VOIDmode
, 0);
6284 expand_asm_memory_barrier ();
6288 /* This routine will either emit the mem_signal_fence pattern or issue a
6289 sync_synchronize to generate a fence for memory model MEMMODEL. */
6292 expand_mem_signal_fence (enum memmodel model
)
6294 if (targetm
.have_mem_signal_fence ())
6295 emit_insn (targetm
.gen_mem_signal_fence (GEN_INT (model
)));
6296 else if (!is_mm_relaxed (model
))
6298 /* By default targets are coherent between a thread and the signal
6299 handler running on the same thread. Thus this really becomes a
6300 compiler barrier, in that stores must not be sunk past
6301 (or raised above) a given point. */
6302 expand_asm_memory_barrier ();
6306 /* This function expands the atomic load operation:
6307 return the atomically loaded value in MEM.
6309 MEMMODEL is the memory model variant to use.
6310 TARGET is an option place to stick the return value. */
6313 expand_atomic_load (rtx target
, rtx mem
, enum memmodel model
)
6315 machine_mode mode
= GET_MODE (mem
);
6316 enum insn_code icode
;
6318 /* If the target supports the load directly, great. */
6319 icode
= direct_optab_handler (atomic_load_optab
, mode
);
6320 if (icode
!= CODE_FOR_nothing
)
6322 struct expand_operand ops
[3];
6324 create_output_operand (&ops
[0], target
, mode
);
6325 create_fixed_operand (&ops
[1], mem
);
6326 create_integer_operand (&ops
[2], model
);
6327 if (maybe_expand_insn (icode
, 3, ops
))
6328 return ops
[0].value
;
6331 /* If the size of the object is greater than word size on this target,
6332 then we assume that a load will not be atomic. We could try to
6333 emulate a load with a compare-and-swap operation, but the store that
6334 doing this could result in would be incorrect if this is a volatile
6335 atomic load or targetting read-only-mapped memory. */
6336 if (GET_MODE_PRECISION (mode
) > BITS_PER_WORD
)
6337 /* If there is no atomic load, leave the library call. */
6340 /* Otherwise assume loads are atomic, and emit the proper barriers. */
6341 if (!target
|| target
== const0_rtx
)
6342 target
= gen_reg_rtx (mode
);
6344 /* For SEQ_CST, emit a barrier before the load. */
6345 if (is_mm_seq_cst (model
))
6346 expand_mem_thread_fence (model
);
6348 emit_move_insn (target
, mem
);
6350 /* Emit the appropriate barrier after the load. */
6351 expand_mem_thread_fence (model
);
6356 /* This function expands the atomic store operation:
6357 Atomically store VAL in MEM.
6358 MEMMODEL is the memory model variant to use.
6359 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
6360 function returns const0_rtx if a pattern was emitted. */
6363 expand_atomic_store (rtx mem
, rtx val
, enum memmodel model
, bool use_release
)
6365 machine_mode mode
= GET_MODE (mem
);
6366 enum insn_code icode
;
6367 struct expand_operand ops
[3];
6369 /* If the target supports the store directly, great. */
6370 icode
= direct_optab_handler (atomic_store_optab
, mode
);
6371 if (icode
!= CODE_FOR_nothing
)
6373 create_fixed_operand (&ops
[0], mem
);
6374 create_input_operand (&ops
[1], val
, mode
);
6375 create_integer_operand (&ops
[2], model
);
6376 if (maybe_expand_insn (icode
, 3, ops
))
6380 /* If using __sync_lock_release is a viable alternative, try it.
6381 Note that this will not be set to true if we are expanding a generic
6382 __atomic_store_n. */
6385 icode
= direct_optab_handler (sync_lock_release_optab
, mode
);
6386 if (icode
!= CODE_FOR_nothing
)
6388 create_fixed_operand (&ops
[0], mem
);
6389 create_input_operand (&ops
[1], const0_rtx
, mode
);
6390 if (maybe_expand_insn (icode
, 2, ops
))
6392 /* lock_release is only a release barrier. */
6393 if (is_mm_seq_cst (model
))
6394 expand_mem_thread_fence (model
);
6400 /* If the size of the object is greater than word size on this target,
6401 a default store will not be atomic. */
6402 if (GET_MODE_PRECISION (mode
) > BITS_PER_WORD
)
6404 /* If loads are atomic or we are called to provide a __sync builtin,
6405 we can try a atomic_exchange and throw away the result. Otherwise,
6406 don't do anything so that we do not create an inconsistency between
6407 loads and stores. */
6408 if (can_atomic_load_p (mode
) || is_mm_sync (model
))
6410 rtx target
= maybe_emit_atomic_exchange (NULL_RTX
, mem
, val
, model
);
6412 target
= maybe_emit_compare_and_swap_exchange_loop (NULL_RTX
, mem
,
6420 /* Otherwise assume stores are atomic, and emit the proper barriers. */
6421 expand_mem_thread_fence (model
);
6423 emit_move_insn (mem
, val
);
6425 /* For SEQ_CST, also emit a barrier after the store. */
6426 if (is_mm_seq_cst (model
))
6427 expand_mem_thread_fence (model
);
6433 /* Structure containing the pointers and values required to process the
6434 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
6436 struct atomic_op_functions
6438 direct_optab mem_fetch_before
;
6439 direct_optab mem_fetch_after
;
6440 direct_optab mem_no_result
;
6443 direct_optab no_result
;
6444 enum rtx_code reverse_code
;
6448 /* Fill in structure pointed to by OP with the various optab entries for an
6449 operation of type CODE. */
6452 get_atomic_op_for_code (struct atomic_op_functions
*op
, enum rtx_code code
)
6454 gcc_assert (op
!= NULL
);
6456 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
6457 in the source code during compilation, and the optab entries are not
6458 computable until runtime. Fill in the values at runtime. */
6462 op
->mem_fetch_before
= atomic_fetch_add_optab
;
6463 op
->mem_fetch_after
= atomic_add_fetch_optab
;
6464 op
->mem_no_result
= atomic_add_optab
;
6465 op
->fetch_before
= sync_old_add_optab
;
6466 op
->fetch_after
= sync_new_add_optab
;
6467 op
->no_result
= sync_add_optab
;
6468 op
->reverse_code
= MINUS
;
6471 op
->mem_fetch_before
= atomic_fetch_sub_optab
;
6472 op
->mem_fetch_after
= atomic_sub_fetch_optab
;
6473 op
->mem_no_result
= atomic_sub_optab
;
6474 op
->fetch_before
= sync_old_sub_optab
;
6475 op
->fetch_after
= sync_new_sub_optab
;
6476 op
->no_result
= sync_sub_optab
;
6477 op
->reverse_code
= PLUS
;
6480 op
->mem_fetch_before
= atomic_fetch_xor_optab
;
6481 op
->mem_fetch_after
= atomic_xor_fetch_optab
;
6482 op
->mem_no_result
= atomic_xor_optab
;
6483 op
->fetch_before
= sync_old_xor_optab
;
6484 op
->fetch_after
= sync_new_xor_optab
;
6485 op
->no_result
= sync_xor_optab
;
6486 op
->reverse_code
= XOR
;
6489 op
->mem_fetch_before
= atomic_fetch_and_optab
;
6490 op
->mem_fetch_after
= atomic_and_fetch_optab
;
6491 op
->mem_no_result
= atomic_and_optab
;
6492 op
->fetch_before
= sync_old_and_optab
;
6493 op
->fetch_after
= sync_new_and_optab
;
6494 op
->no_result
= sync_and_optab
;
6495 op
->reverse_code
= UNKNOWN
;
6498 op
->mem_fetch_before
= atomic_fetch_or_optab
;
6499 op
->mem_fetch_after
= atomic_or_fetch_optab
;
6500 op
->mem_no_result
= atomic_or_optab
;
6501 op
->fetch_before
= sync_old_ior_optab
;
6502 op
->fetch_after
= sync_new_ior_optab
;
6503 op
->no_result
= sync_ior_optab
;
6504 op
->reverse_code
= UNKNOWN
;
6507 op
->mem_fetch_before
= atomic_fetch_nand_optab
;
6508 op
->mem_fetch_after
= atomic_nand_fetch_optab
;
6509 op
->mem_no_result
= atomic_nand_optab
;
6510 op
->fetch_before
= sync_old_nand_optab
;
6511 op
->fetch_after
= sync_new_nand_optab
;
6512 op
->no_result
= sync_nand_optab
;
6513 op
->reverse_code
= UNKNOWN
;
6520 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
6521 using memory order MODEL. If AFTER is true the operation needs to return
6522 the value of *MEM after the operation, otherwise the previous value.
6523 TARGET is an optional place to place the result. The result is unused if
6525 Return the result if there is a better sequence, otherwise NULL_RTX. */
6528 maybe_optimize_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
6529 enum memmodel model
, bool after
)
6531 /* If the value is prefetched, or not used, it may be possible to replace
6532 the sequence with a native exchange operation. */
6533 if (!after
|| target
== const0_rtx
)
6535 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
6536 if (code
== AND
&& val
== const0_rtx
)
6538 if (target
== const0_rtx
)
6539 target
= gen_reg_rtx (GET_MODE (mem
));
6540 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6543 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
6544 if (code
== IOR
&& val
== constm1_rtx
)
6546 if (target
== const0_rtx
)
6547 target
= gen_reg_rtx (GET_MODE (mem
));
6548 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6555 /* Try to emit an instruction for a specific operation varaition.
6556 OPTAB contains the OP functions.
6557 TARGET is an optional place to return the result. const0_rtx means unused.
6558 MEM is the memory location to operate on.
6559 VAL is the value to use in the operation.
6560 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
6561 MODEL is the memory model, if used.
6562 AFTER is true if the returned result is the value after the operation. */
6565 maybe_emit_op (const struct atomic_op_functions
*optab
, rtx target
, rtx mem
,
6566 rtx val
, bool use_memmodel
, enum memmodel model
, bool after
)
6568 machine_mode mode
= GET_MODE (mem
);
6569 struct expand_operand ops
[4];
6570 enum insn_code icode
;
6574 /* Check to see if there is a result returned. */
6575 if (target
== const0_rtx
)
6579 icode
= direct_optab_handler (optab
->mem_no_result
, mode
);
6580 create_integer_operand (&ops
[2], model
);
6585 icode
= direct_optab_handler (optab
->no_result
, mode
);
6589 /* Otherwise, we need to generate a result. */
6594 icode
= direct_optab_handler (after
? optab
->mem_fetch_after
6595 : optab
->mem_fetch_before
, mode
);
6596 create_integer_operand (&ops
[3], model
);
6601 icode
= optab_handler (after
? optab
->fetch_after
6602 : optab
->fetch_before
, mode
);
6605 create_output_operand (&ops
[op_counter
++], target
, mode
);
6607 if (icode
== CODE_FOR_nothing
)
6610 create_fixed_operand (&ops
[op_counter
++], mem
);
6611 /* VAL may have been promoted to a wider mode. Shrink it if so. */
6612 create_convert_operand_to (&ops
[op_counter
++], val
, mode
, true);
6614 if (maybe_expand_insn (icode
, num_ops
, ops
))
6615 return (target
== const0_rtx
? const0_rtx
: ops
[0].value
);
6621 /* This function expands an atomic fetch_OP or OP_fetch operation:
6622 TARGET is an option place to stick the return value. const0_rtx indicates
6623 the result is unused.
6624 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6625 CODE is the operation being performed (OP)
6626 MEMMODEL is the memory model variant to use.
6627 AFTER is true to return the result of the operation (OP_fetch).
6628 AFTER is false to return the value before the operation (fetch_OP).
6630 This function will *only* generate instructions if there is a direct
6631 optab. No compare and swap loops or libcalls will be generated. */
6634 expand_atomic_fetch_op_no_fallback (rtx target
, rtx mem
, rtx val
,
6635 enum rtx_code code
, enum memmodel model
,
6638 machine_mode mode
= GET_MODE (mem
);
6639 struct atomic_op_functions optab
;
6641 bool unused_result
= (target
== const0_rtx
);
6643 get_atomic_op_for_code (&optab
, code
);
6645 /* Check to see if there are any better instructions. */
6646 result
= maybe_optimize_fetch_op (target
, mem
, val
, code
, model
, after
);
6650 /* Check for the case where the result isn't used and try those patterns. */
6653 /* Try the memory model variant first. */
6654 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, true);
6658 /* Next try the old style withuot a memory model. */
6659 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, true);
6663 /* There is no no-result pattern, so try patterns with a result. */
6667 /* Try the __atomic version. */
6668 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, after
);
6672 /* Try the older __sync version. */
6673 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, after
);
6677 /* If the fetch value can be calculated from the other variation of fetch,
6678 try that operation. */
6679 if (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
)
6681 /* Try the __atomic version, then the older __sync version. */
6682 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, !after
);
6684 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, !after
);
6688 /* If the result isn't used, no need to do compensation code. */
6692 /* Issue compensation code. Fetch_after == fetch_before OP val.
6693 Fetch_before == after REVERSE_OP val. */
6695 code
= optab
.reverse_code
;
6698 result
= expand_simple_binop (mode
, AND
, result
, val
, NULL_RTX
,
6699 true, OPTAB_LIB_WIDEN
);
6700 result
= expand_simple_unop (mode
, NOT
, result
, target
, true);
6703 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
6704 true, OPTAB_LIB_WIDEN
);
6709 /* No direct opcode can be generated. */
6715 /* This function expands an atomic fetch_OP or OP_fetch operation:
6716 TARGET is an option place to stick the return value. const0_rtx indicates
6717 the result is unused.
6718 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6719 CODE is the operation being performed (OP)
6720 MEMMODEL is the memory model variant to use.
6721 AFTER is true to return the result of the operation (OP_fetch).
6722 AFTER is false to return the value before the operation (fetch_OP). */
6724 expand_atomic_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
6725 enum memmodel model
, bool after
)
6727 machine_mode mode
= GET_MODE (mem
);
6729 bool unused_result
= (target
== const0_rtx
);
6731 /* If loads are not atomic for the required size and we are not called to
6732 provide a __sync builtin, do not do anything so that we stay consistent
6733 with atomic loads of the same size. */
6734 if (!can_atomic_load_p (mode
) && !is_mm_sync (model
))
6737 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, val
, code
, model
,
6743 /* Add/sub can be implemented by doing the reverse operation with -(val). */
6744 if (code
== PLUS
|| code
== MINUS
)
6747 enum rtx_code reverse
= (code
== PLUS
? MINUS
: PLUS
);
6750 tmp
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, true);
6751 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, tmp
, reverse
,
6755 /* PLUS worked so emit the insns and return. */
6762 /* PLUS did not work, so throw away the negation code and continue. */
6766 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
6767 if (!can_compare_and_swap_p (mode
, false))
6771 enum rtx_code orig_code
= code
;
6772 struct atomic_op_functions optab
;
6774 get_atomic_op_for_code (&optab
, code
);
6775 libfunc
= optab_libfunc (after
? optab
.fetch_after
6776 : optab
.fetch_before
, mode
);
6778 && (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
))
6782 code
= optab
.reverse_code
;
6783 libfunc
= optab_libfunc (after
? optab
.fetch_before
6784 : optab
.fetch_after
, mode
);
6786 if (libfunc
!= NULL
)
6788 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
6789 result
= emit_library_call_value (libfunc
, NULL
, LCT_NORMAL
, mode
,
6790 2, addr
, ptr_mode
, val
, mode
);
6792 if (!unused_result
&& fixup
)
6793 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
6794 true, OPTAB_LIB_WIDEN
);
6798 /* We need the original code for any further attempts. */
6802 /* If nothing else has succeeded, default to a compare and swap loop. */
6803 if (can_compare_and_swap_p (mode
, true))
6806 rtx t0
= gen_reg_rtx (mode
), t1
;
6810 /* If the result is used, get a register for it. */
6813 if (!target
|| !register_operand (target
, mode
))
6814 target
= gen_reg_rtx (mode
);
6815 /* If fetch_before, copy the value now. */
6817 emit_move_insn (target
, t0
);
6820 target
= const0_rtx
;
6825 t1
= expand_simple_binop (mode
, AND
, t1
, val
, NULL_RTX
,
6826 true, OPTAB_LIB_WIDEN
);
6827 t1
= expand_simple_unop (mode
, code
, t1
, NULL_RTX
, true);
6830 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
, true,
6833 /* For after, copy the value now. */
6834 if (!unused_result
&& after
)
6835 emit_move_insn (target
, t1
);
6836 insn
= get_insns ();
6839 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
6846 /* Return true if OPERAND is suitable for operand number OPNO of
6847 instruction ICODE. */
6850 insn_operand_matches (enum insn_code icode
, unsigned int opno
, rtx operand
)
6852 return (!insn_data
[(int) icode
].operand
[opno
].predicate
6853 || (insn_data
[(int) icode
].operand
[opno
].predicate
6854 (operand
, insn_data
[(int) icode
].operand
[opno
].mode
)));
6857 /* TARGET is a target of a multiword operation that we are going to
6858 implement as a series of word-mode operations. Return true if
6859 TARGET is suitable for this purpose. */
6862 valid_multiword_target_p (rtx target
)
6867 mode
= GET_MODE (target
);
6868 for (i
= 0; i
< GET_MODE_SIZE (mode
); i
+= UNITS_PER_WORD
)
6869 if (!validate_subreg (word_mode
, mode
, target
, i
))
6874 /* Like maybe_legitimize_operand, but do not change the code of the
6875 current rtx value. */
6878 maybe_legitimize_operand_same_code (enum insn_code icode
, unsigned int opno
,
6879 struct expand_operand
*op
)
6881 /* See if the operand matches in its current form. */
6882 if (insn_operand_matches (icode
, opno
, op
->value
))
6885 /* If the operand is a memory whose address has no side effects,
6886 try forcing the address into a non-virtual pseudo register.
6887 The check for side effects is important because copy_to_mode_reg
6888 cannot handle things like auto-modified addresses. */
6889 if (insn_data
[(int) icode
].operand
[opno
].allows_mem
&& MEM_P (op
->value
))
6894 addr
= XEXP (mem
, 0);
6895 if (!(REG_P (addr
) && REGNO (addr
) > LAST_VIRTUAL_REGISTER
)
6896 && !side_effects_p (addr
))
6901 last
= get_last_insn ();
6902 mode
= get_address_mode (mem
);
6903 mem
= replace_equiv_address (mem
, copy_to_mode_reg (mode
, addr
));
6904 if (insn_operand_matches (icode
, opno
, mem
))
6909 delete_insns_since (last
);
6916 /* Try to make OP match operand OPNO of instruction ICODE. Return true
6917 on success, storing the new operand value back in OP. */
6920 maybe_legitimize_operand (enum insn_code icode
, unsigned int opno
,
6921 struct expand_operand
*op
)
6923 machine_mode mode
, imode
;
6924 bool old_volatile_ok
, result
;
6930 old_volatile_ok
= volatile_ok
;
6932 result
= maybe_legitimize_operand_same_code (icode
, opno
, op
);
6933 volatile_ok
= old_volatile_ok
;
6937 gcc_assert (mode
!= VOIDmode
);
6939 && op
->value
!= const0_rtx
6940 && GET_MODE (op
->value
) == mode
6941 && maybe_legitimize_operand_same_code (icode
, opno
, op
))
6944 op
->value
= gen_reg_rtx (mode
);
6949 gcc_assert (mode
!= VOIDmode
);
6950 gcc_assert (GET_MODE (op
->value
) == VOIDmode
6951 || GET_MODE (op
->value
) == mode
);
6952 if (maybe_legitimize_operand_same_code (icode
, opno
, op
))
6955 op
->value
= copy_to_mode_reg (mode
, op
->value
);
6958 case EXPAND_CONVERT_TO
:
6959 gcc_assert (mode
!= VOIDmode
);
6960 op
->value
= convert_to_mode (mode
, op
->value
, op
->unsigned_p
);
6963 case EXPAND_CONVERT_FROM
:
6964 if (GET_MODE (op
->value
) != VOIDmode
)
6965 mode
= GET_MODE (op
->value
);
6967 /* The caller must tell us what mode this value has. */
6968 gcc_assert (mode
!= VOIDmode
);
6970 imode
= insn_data
[(int) icode
].operand
[opno
].mode
;
6971 if (imode
!= VOIDmode
&& imode
!= mode
)
6973 op
->value
= convert_modes (imode
, mode
, op
->value
, op
->unsigned_p
);
6978 case EXPAND_ADDRESS
:
6979 gcc_assert (mode
!= VOIDmode
);
6980 op
->value
= convert_memory_address (mode
, op
->value
);
6983 case EXPAND_INTEGER
:
6984 mode
= insn_data
[(int) icode
].operand
[opno
].mode
;
6985 if (mode
!= VOIDmode
&& const_int_operand (op
->value
, mode
))
6989 return insn_operand_matches (icode
, opno
, op
->value
);
6992 /* Make OP describe an input operand that should have the same value
6993 as VALUE, after any mode conversion that the target might request.
6994 TYPE is the type of VALUE. */
6997 create_convert_operand_from_type (struct expand_operand
*op
,
6998 rtx value
, tree type
)
7000 create_convert_operand_from (op
, value
, TYPE_MODE (type
),
7001 TYPE_UNSIGNED (type
));
7004 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
7005 of instruction ICODE. Return true on success, leaving the new operand
7006 values in the OPS themselves. Emit no code on failure. */
7009 maybe_legitimize_operands (enum insn_code icode
, unsigned int opno
,
7010 unsigned int nops
, struct expand_operand
*ops
)
7015 last
= get_last_insn ();
7016 for (i
= 0; i
< nops
; i
++)
7017 if (!maybe_legitimize_operand (icode
, opno
+ i
, &ops
[i
]))
7019 delete_insns_since (last
);
7025 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
7026 as its operands. Return the instruction pattern on success,
7027 and emit any necessary set-up code. Return null and emit no
7031 maybe_gen_insn (enum insn_code icode
, unsigned int nops
,
7032 struct expand_operand
*ops
)
7034 gcc_assert (nops
== (unsigned int) insn_data
[(int) icode
].n_generator_args
);
7035 if (!maybe_legitimize_operands (icode
, 0, nops
, ops
))
7041 return GEN_FCN (icode
) (ops
[0].value
);
7043 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
);
7045 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
);
7047 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7050 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7051 ops
[3].value
, ops
[4].value
);
7053 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7054 ops
[3].value
, ops
[4].value
, ops
[5].value
);
7056 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7057 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7060 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7061 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7062 ops
[6].value
, ops
[7].value
);
7064 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7065 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7066 ops
[6].value
, ops
[7].value
, ops
[8].value
);
7071 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7072 as its operands. Return true on success and emit no code on failure. */
7075 maybe_expand_insn (enum insn_code icode
, unsigned int nops
,
7076 struct expand_operand
*ops
)
7078 rtx_insn
*pat
= maybe_gen_insn (icode
, nops
, ops
);
7087 /* Like maybe_expand_insn, but for jumps. */
7090 maybe_expand_jump_insn (enum insn_code icode
, unsigned int nops
,
7091 struct expand_operand
*ops
)
7093 rtx_insn
*pat
= maybe_gen_insn (icode
, nops
, ops
);
7096 emit_jump_insn (pat
);
7102 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7106 expand_insn (enum insn_code icode
, unsigned int nops
,
7107 struct expand_operand
*ops
)
7109 if (!maybe_expand_insn (icode
, nops
, ops
))
7113 /* Like expand_insn, but for jumps. */
7116 expand_jump_insn (enum insn_code icode
, unsigned int nops
,
7117 struct expand_operand
*ops
)
7119 if (!maybe_expand_jump_insn (icode
, nops
, ops
))