1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
26 #include "diagnostic-core.h"
28 /* Include insn-config.h before expr.h so that HAVE_conditional_move
29 is properly defined. */
30 #include "insn-config.h"
34 #include "tree-hasher.h"
35 #include "stor-layout.h"
46 #include "insn-codes.h"
48 #include "optabs-tree.h"
54 static void prepare_float_lib_cmp (rtx
, rtx
, enum rtx_code
, rtx
*,
56 static rtx
expand_unop_direct (machine_mode
, optab
, rtx
, rtx
, int);
57 static void emit_libcall_block_1 (rtx_insn
*, rtx
, rtx
, rtx
, bool);
59 /* Debug facility for use in GDB. */
60 void debug_optab_libfuncs (void);
62 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
63 the result of operation CODE applied to OP0 (and OP1 if it is a binary
66 If the last insn does not set TARGET, don't do anything, but return 1.
68 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
69 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
70 try again, ensuring that TARGET is not one of the operands. */
73 add_equal_note (rtx_insn
*insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
79 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
81 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
82 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
83 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
84 && GET_RTX_CLASS (code
) != RTX_COMPARE
85 && GET_RTX_CLASS (code
) != RTX_UNARY
)
88 if (GET_CODE (target
) == ZERO_EXTRACT
)
91 for (last_insn
= insns
;
92 NEXT_INSN (last_insn
) != NULL_RTX
;
93 last_insn
= NEXT_INSN (last_insn
))
96 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
97 a value changing in the insn, so the note would be invalid for CSE. */
98 if (reg_overlap_mentioned_p (target
, op0
)
99 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
102 && (rtx_equal_p (target
, op0
)
103 || (op1
&& rtx_equal_p (target
, op1
))))
105 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
106 over expanding it as temp = MEM op X, MEM = temp. If the target
107 supports MEM = MEM op X instructions, it is sometimes too hard
108 to reconstruct that form later, especially if X is also a memory,
109 and due to multiple occurrences of addresses the address might
110 be forced into register unnecessarily.
111 Note that not emitting the REG_EQUIV note might inhibit
112 CSE in some cases. */
113 set
= single_set (last_insn
);
115 && GET_CODE (SET_SRC (set
)) == code
116 && MEM_P (SET_DEST (set
))
117 && (rtx_equal_p (SET_DEST (set
), XEXP (SET_SRC (set
), 0))
118 || (op1
&& rtx_equal_p (SET_DEST (set
),
119 XEXP (SET_SRC (set
), 1)))))
125 set
= set_for_reg_notes (last_insn
);
129 if (! rtx_equal_p (SET_DEST (set
), target
)
130 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
131 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
132 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
135 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
145 if (GET_MODE (op0
) != VOIDmode
&& GET_MODE (target
) != GET_MODE (op0
))
147 note
= gen_rtx_fmt_e (code
, GET_MODE (op0
), copy_rtx (op0
));
148 if (GET_MODE_SIZE (GET_MODE (op0
))
149 > GET_MODE_SIZE (GET_MODE (target
)))
150 note
= simplify_gen_unary (TRUNCATE
, GET_MODE (target
),
151 note
, GET_MODE (op0
));
153 note
= simplify_gen_unary (ZERO_EXTEND
, GET_MODE (target
),
154 note
, GET_MODE (op0
));
159 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
163 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
165 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
170 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
171 for a widening operation would be. In most cases this would be OP0, but if
172 that's a constant it'll be VOIDmode, which isn't useful. */
175 widened_mode (machine_mode to_mode
, rtx op0
, rtx op1
)
177 machine_mode m0
= GET_MODE (op0
);
178 machine_mode m1
= GET_MODE (op1
);
181 if (m0
== VOIDmode
&& m1
== VOIDmode
)
183 else if (m0
== VOIDmode
|| GET_MODE_SIZE (m0
) < GET_MODE_SIZE (m1
))
188 if (GET_MODE_SIZE (result
) > GET_MODE_SIZE (to_mode
))
194 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
195 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
196 not actually do a sign-extend or zero-extend, but can leave the
197 higher-order bits of the result rtx undefined, for example, in the case
198 of logical operations, but not right shifts. */
201 widen_operand (rtx op
, machine_mode mode
, machine_mode oldmode
,
202 int unsignedp
, int no_extend
)
206 /* If we don't have to extend and this is a constant, return it. */
207 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
210 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
211 extend since it will be more efficient to do so unless the signedness of
212 a promoted object differs from our extension. */
214 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
215 && SUBREG_CHECK_PROMOTED_SIGN (op
, unsignedp
)))
216 return convert_modes (mode
, oldmode
, op
, unsignedp
);
218 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
220 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
221 return gen_lowpart (mode
, force_reg (GET_MODE (op
), op
));
223 /* Otherwise, get an object of MODE, clobber it, and set the low-order
226 result
= gen_reg_rtx (mode
);
227 emit_clobber (result
);
228 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
232 /* Expand vector widening operations.
234 There are two different classes of operations handled here:
235 1) Operations whose result is wider than all the arguments to the operation.
236 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
237 In this case OP0 and optionally OP1 would be initialized,
238 but WIDE_OP wouldn't (not relevant for this case).
239 2) Operations whose result is of the same size as the last argument to the
240 operation, but wider than all the other arguments to the operation.
241 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
242 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
244 E.g, when called to expand the following operations, this is how
245 the arguments will be initialized:
247 widening-sum 2 oprnd0 - oprnd1
248 widening-dot-product 3 oprnd0 oprnd1 oprnd2
249 widening-mult 2 oprnd0 oprnd1 -
250 type-promotion (vec-unpack) 1 oprnd0 - - */
253 expand_widen_pattern_expr (sepops ops
, rtx op0
, rtx op1
, rtx wide_op
,
254 rtx target
, int unsignedp
)
256 struct expand_operand eops
[4];
257 tree oprnd0
, oprnd1
, oprnd2
;
258 machine_mode wmode
= VOIDmode
, tmode0
, tmode1
= VOIDmode
;
259 optab widen_pattern_optab
;
260 enum insn_code icode
;
261 int nops
= TREE_CODE_LENGTH (ops
->code
);
265 tmode0
= TYPE_MODE (TREE_TYPE (oprnd0
));
266 widen_pattern_optab
=
267 optab_for_tree_code (ops
->code
, TREE_TYPE (oprnd0
), optab_default
);
268 if (ops
->code
== WIDEN_MULT_PLUS_EXPR
269 || ops
->code
== WIDEN_MULT_MINUS_EXPR
)
270 icode
= find_widening_optab_handler (widen_pattern_optab
,
271 TYPE_MODE (TREE_TYPE (ops
->op2
)),
274 icode
= optab_handler (widen_pattern_optab
, tmode0
);
275 gcc_assert (icode
!= CODE_FOR_nothing
);
280 tmode1
= TYPE_MODE (TREE_TYPE (oprnd1
));
283 /* The last operand is of a wider mode than the rest of the operands. */
288 gcc_assert (tmode1
== tmode0
);
291 wmode
= TYPE_MODE (TREE_TYPE (oprnd2
));
295 create_output_operand (&eops
[op
++], target
, TYPE_MODE (ops
->type
));
296 create_convert_operand_from (&eops
[op
++], op0
, tmode0
, unsignedp
);
298 create_convert_operand_from (&eops
[op
++], op1
, tmode1
, unsignedp
);
300 create_convert_operand_from (&eops
[op
++], wide_op
, wmode
, unsignedp
);
301 expand_insn (icode
, op
, eops
);
302 return eops
[0].value
;
305 /* Generate code to perform an operation specified by TERNARY_OPTAB
306 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
308 UNSIGNEDP is for the case where we have to widen the operands
309 to perform the operation. It says to use zero-extension.
311 If TARGET is nonzero, the value
312 is generated there, if it is convenient to do so.
313 In all cases an rtx is returned for the locus of the value;
314 this may or may not be TARGET. */
317 expand_ternary_op (machine_mode mode
, optab ternary_optab
, rtx op0
,
318 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
320 struct expand_operand ops
[4];
321 enum insn_code icode
= optab_handler (ternary_optab
, mode
);
323 gcc_assert (optab_handler (ternary_optab
, mode
) != CODE_FOR_nothing
);
325 create_output_operand (&ops
[0], target
, mode
);
326 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
327 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
328 create_convert_operand_from (&ops
[3], op2
, mode
, unsignedp
);
329 expand_insn (icode
, 4, ops
);
334 /* Like expand_binop, but return a constant rtx if the result can be
335 calculated at compile time. The arguments and return value are
336 otherwise the same as for expand_binop. */
339 simplify_expand_binop (machine_mode mode
, optab binoptab
,
340 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
341 enum optab_methods methods
)
343 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
345 rtx x
= simplify_binary_operation (optab_to_code (binoptab
),
351 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
354 /* Like simplify_expand_binop, but always put the result in TARGET.
355 Return true if the expansion succeeded. */
358 force_expand_binop (machine_mode mode
, optab binoptab
,
359 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
360 enum optab_methods methods
)
362 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
363 target
, unsignedp
, methods
);
367 emit_move_insn (target
, x
);
371 /* Create a new vector value in VMODE with all elements set to OP. The
372 mode of OP must be the element mode of VMODE. If OP is a constant,
373 then the return value will be a constant. */
376 expand_vector_broadcast (machine_mode vmode
, rtx op
)
378 enum insn_code icode
;
383 gcc_checking_assert (VECTOR_MODE_P (vmode
));
385 n
= GET_MODE_NUNITS (vmode
);
386 vec
= rtvec_alloc (n
);
387 for (i
= 0; i
< n
; ++i
)
388 RTVEC_ELT (vec
, i
) = op
;
391 return gen_rtx_CONST_VECTOR (vmode
, vec
);
393 /* ??? If the target doesn't have a vec_init, then we have no easy way
394 of performing this operation. Most of this sort of generic support
395 is hidden away in the vector lowering support in gimple. */
396 icode
= optab_handler (vec_init_optab
, vmode
);
397 if (icode
== CODE_FOR_nothing
)
400 ret
= gen_reg_rtx (vmode
);
401 emit_insn (GEN_FCN (icode
) (ret
, gen_rtx_PARALLEL (vmode
, vec
)));
406 /* This subroutine of expand_doubleword_shift handles the cases in which
407 the effective shift value is >= BITS_PER_WORD. The arguments and return
408 value are the same as for the parent routine, except that SUPERWORD_OP1
409 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
410 INTO_TARGET may be null if the caller has decided to calculate it. */
413 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
414 rtx outof_target
, rtx into_target
,
415 int unsignedp
, enum optab_methods methods
)
417 if (into_target
!= 0)
418 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
419 into_target
, unsignedp
, methods
))
422 if (outof_target
!= 0)
424 /* For a signed right shift, we must fill OUTOF_TARGET with copies
425 of the sign bit, otherwise we must fill it with zeros. */
426 if (binoptab
!= ashr_optab
)
427 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
429 if (!force_expand_binop (word_mode
, binoptab
,
430 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
431 outof_target
, unsignedp
, methods
))
437 /* This subroutine of expand_doubleword_shift handles the cases in which
438 the effective shift value is < BITS_PER_WORD. The arguments and return
439 value are the same as for the parent routine. */
442 expand_subword_shift (machine_mode op1_mode
, optab binoptab
,
443 rtx outof_input
, rtx into_input
, rtx op1
,
444 rtx outof_target
, rtx into_target
,
445 int unsignedp
, enum optab_methods methods
,
446 unsigned HOST_WIDE_INT shift_mask
)
448 optab reverse_unsigned_shift
, unsigned_shift
;
451 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
452 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
454 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
455 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
456 the opposite direction to BINOPTAB. */
457 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
459 carries
= outof_input
;
460 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
,
461 op1_mode
), op1_mode
);
462 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
467 /* We must avoid shifting by BITS_PER_WORD bits since that is either
468 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
469 has unknown behavior. Do a single shift first, then shift by the
470 remainder. It's OK to use ~OP1 as the remainder if shift counts
471 are truncated to the mode size. */
472 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
473 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
474 if (shift_mask
== BITS_PER_WORD
- 1)
476 tmp
= immed_wide_int_const
477 (wi::minus_one (GET_MODE_PRECISION (op1_mode
)), op1_mode
);
478 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
483 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
- 1,
484 op1_mode
), op1_mode
);
485 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
489 if (tmp
== 0 || carries
== 0)
491 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
492 carries
, tmp
, 0, unsignedp
, methods
);
496 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
497 so the result can go directly into INTO_TARGET if convenient. */
498 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
499 into_target
, unsignedp
, methods
);
503 /* Now OR in the bits carried over from OUTOF_INPUT. */
504 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
505 into_target
, unsignedp
, methods
))
508 /* Use a standard word_mode shift for the out-of half. */
509 if (outof_target
!= 0)
510 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
511 outof_target
, unsignedp
, methods
))
518 /* Try implementing expand_doubleword_shift using conditional moves.
519 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
520 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
521 are the shift counts to use in the former and latter case. All other
522 arguments are the same as the parent routine. */
525 expand_doubleword_shift_condmove (machine_mode op1_mode
, optab binoptab
,
526 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
527 rtx outof_input
, rtx into_input
,
528 rtx subword_op1
, rtx superword_op1
,
529 rtx outof_target
, rtx into_target
,
530 int unsignedp
, enum optab_methods methods
,
531 unsigned HOST_WIDE_INT shift_mask
)
533 rtx outof_superword
, into_superword
;
535 /* Put the superword version of the output into OUTOF_SUPERWORD and
537 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
538 if (outof_target
!= 0 && subword_op1
== superword_op1
)
540 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
541 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
542 into_superword
= outof_target
;
543 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
544 outof_superword
, 0, unsignedp
, methods
))
549 into_superword
= gen_reg_rtx (word_mode
);
550 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
551 outof_superword
, into_superword
,
556 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
557 if (!expand_subword_shift (op1_mode
, binoptab
,
558 outof_input
, into_input
, subword_op1
,
559 outof_target
, into_target
,
560 unsignedp
, methods
, shift_mask
))
563 /* Select between them. Do the INTO half first because INTO_SUPERWORD
564 might be the current value of OUTOF_TARGET. */
565 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
566 into_target
, into_superword
, word_mode
, false))
569 if (outof_target
!= 0)
570 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
571 outof_target
, outof_superword
,
578 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
579 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
580 input operand; the shift moves bits in the direction OUTOF_INPUT->
581 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
582 of the target. OP1 is the shift count and OP1_MODE is its mode.
583 If OP1 is constant, it will have been truncated as appropriate
584 and is known to be nonzero.
586 If SHIFT_MASK is zero, the result of word shifts is undefined when the
587 shift count is outside the range [0, BITS_PER_WORD). This routine must
588 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
590 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
591 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
592 fill with zeros or sign bits as appropriate.
594 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
595 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
596 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
597 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
600 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
601 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
602 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
603 function wants to calculate it itself.
605 Return true if the shift could be successfully synthesized. */
608 expand_doubleword_shift (machine_mode op1_mode
, optab binoptab
,
609 rtx outof_input
, rtx into_input
, rtx op1
,
610 rtx outof_target
, rtx into_target
,
611 int unsignedp
, enum optab_methods methods
,
612 unsigned HOST_WIDE_INT shift_mask
)
614 rtx superword_op1
, tmp
, cmp1
, cmp2
;
615 enum rtx_code cmp_code
;
617 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
618 fill the result with sign or zero bits as appropriate. If so, the value
619 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
620 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
621 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
623 This isn't worthwhile for constant shifts since the optimizers will
624 cope better with in-range shift counts. */
625 if (shift_mask
>= BITS_PER_WORD
627 && !CONSTANT_P (op1
))
629 if (!expand_doubleword_shift (op1_mode
, binoptab
,
630 outof_input
, into_input
, op1
,
632 unsignedp
, methods
, shift_mask
))
634 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
635 outof_target
, unsignedp
, methods
))
640 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
641 is true when the effective shift value is less than BITS_PER_WORD.
642 Set SUPERWORD_OP1 to the shift count that should be used to shift
643 OUTOF_INPUT into INTO_TARGET when the condition is false. */
644 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
, op1_mode
), op1_mode
);
645 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
647 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
648 is a subword shift count. */
649 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
651 cmp2
= CONST0_RTX (op1_mode
);
657 /* Set CMP1 to OP1 - BITS_PER_WORD. */
658 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
660 cmp2
= CONST0_RTX (op1_mode
);
662 superword_op1
= cmp1
;
667 /* If we can compute the condition at compile time, pick the
668 appropriate subroutine. */
669 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
670 if (tmp
!= 0 && CONST_INT_P (tmp
))
672 if (tmp
== const0_rtx
)
673 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
674 outof_target
, into_target
,
677 return expand_subword_shift (op1_mode
, binoptab
,
678 outof_input
, into_input
, op1
,
679 outof_target
, into_target
,
680 unsignedp
, methods
, shift_mask
);
683 /* Try using conditional moves to generate straight-line code. */
684 if (HAVE_conditional_move
)
686 rtx_insn
*start
= get_last_insn ();
687 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
688 cmp_code
, cmp1
, cmp2
,
689 outof_input
, into_input
,
691 outof_target
, into_target
,
692 unsignedp
, methods
, shift_mask
))
694 delete_insns_since (start
);
697 /* As a last resort, use branches to select the correct alternative. */
698 rtx_code_label
*subword_label
= gen_label_rtx ();
699 rtx_code_label
*done_label
= gen_label_rtx ();
702 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
703 0, 0, subword_label
, -1);
706 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
707 outof_target
, into_target
,
711 emit_jump_insn (targetm
.gen_jump (done_label
));
713 emit_label (subword_label
);
715 if (!expand_subword_shift (op1_mode
, binoptab
,
716 outof_input
, into_input
, op1
,
717 outof_target
, into_target
,
718 unsignedp
, methods
, shift_mask
))
721 emit_label (done_label
);
725 /* Subroutine of expand_binop. Perform a double word multiplication of
726 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
727 as the target's word_mode. This function return NULL_RTX if anything
728 goes wrong, in which case it may have already emitted instructions
729 which need to be deleted.
731 If we want to multiply two two-word values and have normal and widening
732 multiplies of single-word values, we can do this with three smaller
735 The multiplication proceeds as follows:
736 _______________________
737 [__op0_high_|__op0_low__]
738 _______________________
739 * [__op1_high_|__op1_low__]
740 _______________________________________________
741 _______________________
742 (1) [__op0_low__*__op1_low__]
743 _______________________
744 (2a) [__op0_low__*__op1_high_]
745 _______________________
746 (2b) [__op0_high_*__op1_low__]
747 _______________________
748 (3) [__op0_high_*__op1_high_]
751 This gives a 4-word result. Since we are only interested in the
752 lower 2 words, partial result (3) and the upper words of (2a) and
753 (2b) don't need to be calculated. Hence (2a) and (2b) can be
754 calculated using non-widening multiplication.
756 (1), however, needs to be calculated with an unsigned widening
757 multiplication. If this operation is not directly supported we
758 try using a signed widening multiplication and adjust the result.
759 This adjustment works as follows:
761 If both operands are positive then no adjustment is needed.
763 If the operands have different signs, for example op0_low < 0 and
764 op1_low >= 0, the instruction treats the most significant bit of
765 op0_low as a sign bit instead of a bit with significance
766 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
767 with 2**BITS_PER_WORD - op0_low, and two's complements the
768 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
771 Similarly, if both operands are negative, we need to add
772 (op0_low + op1_low) * 2**BITS_PER_WORD.
774 We use a trick to adjust quickly. We logically shift op0_low right
775 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
776 op0_high (op1_high) before it is used to calculate 2b (2a). If no
777 logical shift exists, we do an arithmetic right shift and subtract
781 expand_doubleword_mult (machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
782 bool umulp
, enum optab_methods methods
)
784 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
785 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
786 rtx wordm1
= umulp
? NULL_RTX
: GEN_INT (BITS_PER_WORD
- 1);
787 rtx product
, adjust
, product_high
, temp
;
789 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
790 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
791 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
792 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
794 /* If we're using an unsigned multiply to directly compute the product
795 of the low-order words of the operands and perform any required
796 adjustments of the operands, we begin by trying two more multiplications
797 and then computing the appropriate sum.
799 We have checked above that the required addition is provided.
800 Full-word addition will normally always succeed, especially if
801 it is provided at all, so we don't worry about its failure. The
802 multiplication may well fail, however, so we do handle that. */
806 /* ??? This could be done with emit_store_flag where available. */
807 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
808 NULL_RTX
, 1, methods
);
810 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
811 NULL_RTX
, 0, OPTAB_DIRECT
);
814 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
815 NULL_RTX
, 0, methods
);
818 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
819 NULL_RTX
, 0, OPTAB_DIRECT
);
826 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
827 NULL_RTX
, 0, OPTAB_DIRECT
);
831 /* OP0_HIGH should now be dead. */
835 /* ??? This could be done with emit_store_flag where available. */
836 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
837 NULL_RTX
, 1, methods
);
839 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
840 NULL_RTX
, 0, OPTAB_DIRECT
);
843 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
844 NULL_RTX
, 0, methods
);
847 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
848 NULL_RTX
, 0, OPTAB_DIRECT
);
855 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
856 NULL_RTX
, 0, OPTAB_DIRECT
);
860 /* OP1_HIGH should now be dead. */
862 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
863 NULL_RTX
, 0, OPTAB_DIRECT
);
865 if (target
&& !REG_P (target
))
869 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
870 target
, 1, OPTAB_DIRECT
);
872 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
873 target
, 1, OPTAB_DIRECT
);
878 product_high
= operand_subword (product
, high
, 1, mode
);
879 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
880 NULL_RTX
, 0, OPTAB_DIRECT
);
881 emit_move_insn (product_high
, adjust
);
885 /* Wrapper around expand_binop which takes an rtx code to specify
886 the operation to perform, not an optab pointer. All other
887 arguments are the same. */
889 expand_simple_binop (machine_mode mode
, enum rtx_code code
, rtx op0
,
890 rtx op1
, rtx target
, int unsignedp
,
891 enum optab_methods methods
)
893 optab binop
= code_to_optab (code
);
896 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
899 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
900 binop. Order them according to commutative_operand_precedence and, if
901 possible, try to put TARGET or a pseudo first. */
903 swap_commutative_operands_with_target (rtx target
, rtx op0
, rtx op1
)
905 int op0_prec
= commutative_operand_precedence (op0
);
906 int op1_prec
= commutative_operand_precedence (op1
);
908 if (op0_prec
< op1_prec
)
911 if (op0_prec
> op1_prec
)
914 /* With equal precedence, both orders are ok, but it is better if the
915 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
916 if (target
== 0 || REG_P (target
))
917 return (REG_P (op1
) && !REG_P (op0
)) || target
== op1
;
919 return rtx_equal_p (op1
, target
);
922 /* Return true if BINOPTAB implements a shift operation. */
925 shift_optab_p (optab binoptab
)
927 switch (optab_to_code (binoptab
))
943 /* Return true if BINOPTAB implements a commutative binary operation. */
946 commutative_optab_p (optab binoptab
)
948 return (GET_RTX_CLASS (optab_to_code (binoptab
)) == RTX_COMM_ARITH
949 || binoptab
== smul_widen_optab
950 || binoptab
== umul_widen_optab
951 || binoptab
== smul_highpart_optab
952 || binoptab
== umul_highpart_optab
);
955 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
956 optimizing, and if the operand is a constant that costs more than
957 1 instruction, force the constant into a register and return that
958 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
961 avoid_expensive_constant (machine_mode mode
, optab binoptab
,
962 int opn
, rtx x
, bool unsignedp
)
964 bool speed
= optimize_insn_for_speed_p ();
969 && (rtx_cost (x
, mode
, optab_to_code (binoptab
), opn
, speed
)
970 > set_src_cost (x
, mode
, speed
)))
974 HOST_WIDE_INT intval
= trunc_int_for_mode (INTVAL (x
), mode
);
975 if (intval
!= INTVAL (x
))
976 x
= GEN_INT (intval
);
979 x
= convert_modes (mode
, VOIDmode
, x
, unsignedp
);
980 x
= force_reg (mode
, x
);
985 /* Helper function for expand_binop: handle the case where there
986 is an insn that directly implements the indicated operation.
987 Returns null if this is not possible. */
989 expand_binop_directly (machine_mode mode
, optab binoptab
,
991 rtx target
, int unsignedp
, enum optab_methods methods
,
994 machine_mode from_mode
= widened_mode (mode
, op0
, op1
);
995 enum insn_code icode
= find_widening_optab_handler (binoptab
, mode
,
997 machine_mode xmode0
= insn_data
[(int) icode
].operand
[1].mode
;
998 machine_mode xmode1
= insn_data
[(int) icode
].operand
[2].mode
;
999 machine_mode mode0
, mode1
, tmp_mode
;
1000 struct expand_operand ops
[3];
1003 rtx xop0
= op0
, xop1
= op1
;
1005 /* If it is a commutative operator and the modes would match
1006 if we would swap the operands, we can save the conversions. */
1007 commutative_p
= commutative_optab_p (binoptab
);
1009 && GET_MODE (xop0
) != xmode0
&& GET_MODE (xop1
) != xmode1
1010 && GET_MODE (xop0
) == xmode1
&& GET_MODE (xop1
) == xmode1
)
1011 std::swap (xop0
, xop1
);
1013 /* If we are optimizing, force expensive constants into a register. */
1014 xop0
= avoid_expensive_constant (xmode0
, binoptab
, 0, xop0
, unsignedp
);
1015 if (!shift_optab_p (binoptab
))
1016 xop1
= avoid_expensive_constant (xmode1
, binoptab
, 1, xop1
, unsignedp
);
1018 /* In case the insn wants input operands in modes different from
1019 those of the actual operands, convert the operands. It would
1020 seem that we don't need to convert CONST_INTs, but we do, so
1021 that they're properly zero-extended, sign-extended or truncated
1024 mode0
= GET_MODE (xop0
) != VOIDmode
? GET_MODE (xop0
) : mode
;
1025 if (xmode0
!= VOIDmode
&& xmode0
!= mode0
)
1027 xop0
= convert_modes (xmode0
, mode0
, xop0
, unsignedp
);
1031 mode1
= GET_MODE (xop1
) != VOIDmode
? GET_MODE (xop1
) : mode
;
1032 if (xmode1
!= VOIDmode
&& xmode1
!= mode1
)
1034 xop1
= convert_modes (xmode1
, mode1
, xop1
, unsignedp
);
1038 /* If operation is commutative,
1039 try to make the first operand a register.
1040 Even better, try to make it the same as the target.
1041 Also try to make the last operand a constant. */
1043 && swap_commutative_operands_with_target (target
, xop0
, xop1
))
1044 std::swap (xop0
, xop1
);
1046 /* Now, if insn's predicates don't allow our operands, put them into
1049 if (binoptab
== vec_pack_trunc_optab
1050 || binoptab
== vec_pack_usat_optab
1051 || binoptab
== vec_pack_ssat_optab
1052 || binoptab
== vec_pack_ufix_trunc_optab
1053 || binoptab
== vec_pack_sfix_trunc_optab
)
1055 /* The mode of the result is different then the mode of the
1057 tmp_mode
= insn_data
[(int) icode
].operand
[0].mode
;
1058 if (GET_MODE_NUNITS (tmp_mode
) != 2 * GET_MODE_NUNITS (mode
))
1060 delete_insns_since (last
);
1067 create_output_operand (&ops
[0], target
, tmp_mode
);
1068 create_input_operand (&ops
[1], xop0
, mode0
);
1069 create_input_operand (&ops
[2], xop1
, mode1
);
1070 pat
= maybe_gen_insn (icode
, 3, ops
);
1073 /* If PAT is composed of more than one insn, try to add an appropriate
1074 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1075 operand, call expand_binop again, this time without a target. */
1076 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1077 && ! add_equal_note (pat
, ops
[0].value
,
1078 optab_to_code (binoptab
),
1079 ops
[1].value
, ops
[2].value
))
1081 delete_insns_since (last
);
1082 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1083 unsignedp
, methods
);
1087 return ops
[0].value
;
1089 delete_insns_since (last
);
1093 /* Generate code to perform an operation specified by BINOPTAB
1094 on operands OP0 and OP1, with result having machine-mode MODE.
1096 UNSIGNEDP is for the case where we have to widen the operands
1097 to perform the operation. It says to use zero-extension.
1099 If TARGET is nonzero, the value
1100 is generated there, if it is convenient to do so.
1101 In all cases an rtx is returned for the locus of the value;
1102 this may or may not be TARGET. */
1105 expand_binop (machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1106 rtx target
, int unsignedp
, enum optab_methods methods
)
1108 enum optab_methods next_methods
1109 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1110 ? OPTAB_WIDEN
: methods
);
1111 enum mode_class mclass
;
1112 machine_mode wider_mode
;
1115 rtx_insn
*entry_last
= get_last_insn ();
1118 mclass
= GET_MODE_CLASS (mode
);
1120 /* If subtracting an integer constant, convert this into an addition of
1121 the negated constant. */
1123 if (binoptab
== sub_optab
&& CONST_INT_P (op1
))
1125 op1
= negate_rtx (mode
, op1
);
1126 binoptab
= add_optab
;
1129 /* Record where to delete back to if we backtrack. */
1130 last
= get_last_insn ();
1132 /* If we can do it with a three-operand insn, do so. */
1134 if (methods
!= OPTAB_MUST_WIDEN
1135 && find_widening_optab_handler (binoptab
, mode
,
1136 widened_mode (mode
, op0
, op1
), 1)
1137 != CODE_FOR_nothing
)
1139 temp
= expand_binop_directly (mode
, binoptab
, op0
, op1
, target
,
1140 unsignedp
, methods
, last
);
1145 /* If we were trying to rotate, and that didn't work, try rotating
1146 the other direction before falling back to shifts and bitwise-or. */
1147 if (((binoptab
== rotl_optab
1148 && optab_handler (rotr_optab
, mode
) != CODE_FOR_nothing
)
1149 || (binoptab
== rotr_optab
1150 && optab_handler (rotl_optab
, mode
) != CODE_FOR_nothing
))
1151 && mclass
== MODE_INT
)
1153 optab otheroptab
= (binoptab
== rotl_optab
? rotr_optab
: rotl_optab
);
1155 unsigned int bits
= GET_MODE_PRECISION (mode
);
1157 if (CONST_INT_P (op1
))
1158 newop1
= GEN_INT (bits
- INTVAL (op1
));
1159 else if (targetm
.shift_truncation_mask (mode
) == bits
- 1)
1160 newop1
= negate_rtx (GET_MODE (op1
), op1
);
1162 newop1
= expand_binop (GET_MODE (op1
), sub_optab
,
1163 gen_int_mode (bits
, GET_MODE (op1
)), op1
,
1164 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1166 temp
= expand_binop_directly (mode
, otheroptab
, op0
, newop1
,
1167 target
, unsignedp
, methods
, last
);
1172 /* If this is a multiply, see if we can do a widening operation that
1173 takes operands of this mode and makes a wider mode. */
1175 if (binoptab
== smul_optab
1176 && GET_MODE_2XWIDER_MODE (mode
) != VOIDmode
1177 && (widening_optab_handler ((unsignedp
? umul_widen_optab
1178 : smul_widen_optab
),
1179 GET_MODE_2XWIDER_MODE (mode
), mode
)
1180 != CODE_FOR_nothing
))
1182 temp
= expand_binop (GET_MODE_2XWIDER_MODE (mode
),
1183 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1184 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1188 if (GET_MODE_CLASS (mode
) == MODE_INT
1189 && TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (temp
)))
1190 return gen_lowpart (mode
, temp
);
1192 return convert_to_mode (mode
, temp
, unsignedp
);
1196 /* If this is a vector shift by a scalar, see if we can do a vector
1197 shift by a vector. If so, broadcast the scalar into a vector. */
1198 if (mclass
== MODE_VECTOR_INT
)
1200 optab otheroptab
= unknown_optab
;
1202 if (binoptab
== ashl_optab
)
1203 otheroptab
= vashl_optab
;
1204 else if (binoptab
== ashr_optab
)
1205 otheroptab
= vashr_optab
;
1206 else if (binoptab
== lshr_optab
)
1207 otheroptab
= vlshr_optab
;
1208 else if (binoptab
== rotl_optab
)
1209 otheroptab
= vrotl_optab
;
1210 else if (binoptab
== rotr_optab
)
1211 otheroptab
= vrotr_optab
;
1213 if (otheroptab
&& optab_handler (otheroptab
, mode
) != CODE_FOR_nothing
)
1215 /* The scalar may have been extended to be too wide. Truncate
1216 it back to the proper size to fit in the broadcast vector. */
1217 machine_mode inner_mode
= GET_MODE_INNER (mode
);
1218 if (!CONST_INT_P (op1
)
1219 && (GET_MODE_BITSIZE (inner_mode
)
1220 < GET_MODE_BITSIZE (GET_MODE (op1
))))
1221 op1
= force_reg (inner_mode
,
1222 simplify_gen_unary (TRUNCATE
, inner_mode
, op1
,
1224 rtx vop1
= expand_vector_broadcast (mode
, op1
);
1227 temp
= expand_binop_directly (mode
, otheroptab
, op0
, vop1
,
1228 target
, unsignedp
, methods
, last
);
1235 /* Look for a wider mode of the same class for which we think we
1236 can open-code the operation. Check for a widening multiply at the
1237 wider mode as well. */
1239 if (CLASS_HAS_WIDER_MODES_P (mclass
)
1240 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1241 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1242 wider_mode
!= VOIDmode
;
1243 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1245 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
1246 || (binoptab
== smul_optab
1247 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
1248 && (find_widening_optab_handler ((unsignedp
1250 : smul_widen_optab
),
1251 GET_MODE_WIDER_MODE (wider_mode
),
1253 != CODE_FOR_nothing
)))
1255 rtx xop0
= op0
, xop1
= op1
;
1258 /* For certain integer operations, we need not actually extend
1259 the narrow operands, as long as we will truncate
1260 the results to the same narrowness. */
1262 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1263 || binoptab
== xor_optab
1264 || binoptab
== add_optab
|| binoptab
== sub_optab
1265 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1266 && mclass
== MODE_INT
)
1269 xop0
= avoid_expensive_constant (mode
, binoptab
, 0,
1271 if (binoptab
!= ashl_optab
)
1272 xop1
= avoid_expensive_constant (mode
, binoptab
, 1,
1276 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1278 /* The second operand of a shift must always be extended. */
1279 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1280 no_extend
&& binoptab
!= ashl_optab
);
1282 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1283 unsignedp
, OPTAB_DIRECT
);
1286 if (mclass
!= MODE_INT
1287 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
1290 target
= gen_reg_rtx (mode
);
1291 convert_move (target
, temp
, 0);
1295 return gen_lowpart (mode
, temp
);
1298 delete_insns_since (last
);
1302 /* If operation is commutative,
1303 try to make the first operand a register.
1304 Even better, try to make it the same as the target.
1305 Also try to make the last operand a constant. */
1306 if (commutative_optab_p (binoptab
)
1307 && swap_commutative_operands_with_target (target
, op0
, op1
))
1308 std::swap (op0
, op1
);
1310 /* These can be done a word at a time. */
1311 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1312 && mclass
== MODE_INT
1313 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1314 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1319 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1320 won't be accurate, so use a new target. */
1324 || !valid_multiword_target_p (target
))
1325 target
= gen_reg_rtx (mode
);
1329 /* Do the actual arithmetic. */
1330 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1332 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1333 rtx x
= expand_binop (word_mode
, binoptab
,
1334 operand_subword_force (op0
, i
, mode
),
1335 operand_subword_force (op1
, i
, mode
),
1336 target_piece
, unsignedp
, next_methods
);
1341 if (target_piece
!= x
)
1342 emit_move_insn (target_piece
, x
);
1345 insns
= get_insns ();
1348 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1355 /* Synthesize double word shifts from single word shifts. */
1356 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1357 || binoptab
== ashr_optab
)
1358 && mclass
== MODE_INT
1359 && (CONST_INT_P (op1
) || optimize_insn_for_speed_p ())
1360 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1361 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
)
1362 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
1363 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1364 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1366 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1367 machine_mode op1_mode
;
1369 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1370 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1371 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1373 /* Apply the truncation to constant shifts. */
1374 if (double_shift_mask
> 0 && CONST_INT_P (op1
))
1375 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1377 if (op1
== CONST0_RTX (op1_mode
))
1380 /* Make sure that this is a combination that expand_doubleword_shift
1381 can handle. See the comments there for details. */
1382 if (double_shift_mask
== 0
1383 || (shift_mask
== BITS_PER_WORD
- 1
1384 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1387 rtx into_target
, outof_target
;
1388 rtx into_input
, outof_input
;
1389 int left_shift
, outof_word
;
1391 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1392 won't be accurate, so use a new target. */
1396 || !valid_multiword_target_p (target
))
1397 target
= gen_reg_rtx (mode
);
1401 /* OUTOF_* is the word we are shifting bits away from, and
1402 INTO_* is the word that we are shifting bits towards, thus
1403 they differ depending on the direction of the shift and
1404 WORDS_BIG_ENDIAN. */
1406 left_shift
= binoptab
== ashl_optab
;
1407 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1409 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1410 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1412 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1413 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1415 if (expand_doubleword_shift (op1_mode
, binoptab
,
1416 outof_input
, into_input
, op1
,
1417 outof_target
, into_target
,
1418 unsignedp
, next_methods
, shift_mask
))
1420 insns
= get_insns ();
1430 /* Synthesize double word rotates from single word shifts. */
1431 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1432 && mclass
== MODE_INT
1433 && CONST_INT_P (op1
)
1434 && GET_MODE_PRECISION (mode
) == 2 * BITS_PER_WORD
1435 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1436 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1439 rtx into_target
, outof_target
;
1440 rtx into_input
, outof_input
;
1442 int shift_count
, left_shift
, outof_word
;
1444 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1445 won't be accurate, so use a new target. Do this also if target is not
1446 a REG, first because having a register instead may open optimization
1447 opportunities, and second because if target and op0 happen to be MEMs
1448 designating the same location, we would risk clobbering it too early
1449 in the code sequence we generate below. */
1454 || !valid_multiword_target_p (target
))
1455 target
= gen_reg_rtx (mode
);
1459 shift_count
= INTVAL (op1
);
1461 /* OUTOF_* is the word we are shifting bits away from, and
1462 INTO_* is the word that we are shifting bits towards, thus
1463 they differ depending on the direction of the shift and
1464 WORDS_BIG_ENDIAN. */
1466 left_shift
= (binoptab
== rotl_optab
);
1467 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1469 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1470 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1472 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1473 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1475 if (shift_count
== BITS_PER_WORD
)
1477 /* This is just a word swap. */
1478 emit_move_insn (outof_target
, into_input
);
1479 emit_move_insn (into_target
, outof_input
);
1484 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1485 rtx first_shift_count
, second_shift_count
;
1486 optab reverse_unsigned_shift
, unsigned_shift
;
1488 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1489 ? lshr_optab
: ashl_optab
);
1491 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1492 ? ashl_optab
: lshr_optab
);
1494 if (shift_count
> BITS_PER_WORD
)
1496 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1497 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1501 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1502 second_shift_count
= GEN_INT (shift_count
);
1505 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1506 outof_input
, first_shift_count
,
1507 NULL_RTX
, unsignedp
, next_methods
);
1508 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1509 into_input
, second_shift_count
,
1510 NULL_RTX
, unsignedp
, next_methods
);
1512 if (into_temp1
!= 0 && into_temp2
!= 0)
1513 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1514 into_target
, unsignedp
, next_methods
);
1518 if (inter
!= 0 && inter
!= into_target
)
1519 emit_move_insn (into_target
, inter
);
1521 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1522 into_input
, first_shift_count
,
1523 NULL_RTX
, unsignedp
, next_methods
);
1524 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1525 outof_input
, second_shift_count
,
1526 NULL_RTX
, unsignedp
, next_methods
);
1528 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1529 inter
= expand_binop (word_mode
, ior_optab
,
1530 outof_temp1
, outof_temp2
,
1531 outof_target
, unsignedp
, next_methods
);
1533 if (inter
!= 0 && inter
!= outof_target
)
1534 emit_move_insn (outof_target
, inter
);
1537 insns
= get_insns ();
1547 /* These can be done a word at a time by propagating carries. */
1548 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1549 && mclass
== MODE_INT
1550 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1551 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1554 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1555 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1556 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1557 rtx xop0
, xop1
, xtarget
;
1559 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1560 value is one of those, use it. Otherwise, use 1 since it is the
1561 one easiest to get. */
1562 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1563 int normalizep
= STORE_FLAG_VALUE
;
1568 /* Prepare the operands. */
1569 xop0
= force_reg (mode
, op0
);
1570 xop1
= force_reg (mode
, op1
);
1572 xtarget
= gen_reg_rtx (mode
);
1574 if (target
== 0 || !REG_P (target
) || !valid_multiword_target_p (target
))
1577 /* Indicate for flow that the entire target reg is being set. */
1579 emit_clobber (xtarget
);
1581 /* Do the actual arithmetic. */
1582 for (i
= 0; i
< nwords
; i
++)
1584 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1585 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1586 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1587 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1590 /* Main add/subtract of the input operands. */
1591 x
= expand_binop (word_mode
, binoptab
,
1592 op0_piece
, op1_piece
,
1593 target_piece
, unsignedp
, next_methods
);
1599 /* Store carry from main add/subtract. */
1600 carry_out
= gen_reg_rtx (word_mode
);
1601 carry_out
= emit_store_flag_force (carry_out
,
1602 (binoptab
== add_optab
1605 word_mode
, 1, normalizep
);
1612 /* Add/subtract previous carry to main result. */
1613 newx
= expand_binop (word_mode
,
1614 normalizep
== 1 ? binoptab
: otheroptab
,
1616 NULL_RTX
, 1, next_methods
);
1620 /* Get out carry from adding/subtracting carry in. */
1621 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1622 carry_tmp
= emit_store_flag_force (carry_tmp
,
1623 (binoptab
== add_optab
1626 word_mode
, 1, normalizep
);
1628 /* Logical-ior the two poss. carry together. */
1629 carry_out
= expand_binop (word_mode
, ior_optab
,
1630 carry_out
, carry_tmp
,
1631 carry_out
, 0, next_methods
);
1635 emit_move_insn (target_piece
, newx
);
1639 if (x
!= target_piece
)
1640 emit_move_insn (target_piece
, x
);
1643 carry_in
= carry_out
;
1646 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1648 if (optab_handler (mov_optab
, mode
) != CODE_FOR_nothing
1649 || ! rtx_equal_p (target
, xtarget
))
1651 rtx_insn
*temp
= emit_move_insn (target
, xtarget
);
1653 set_dst_reg_note (temp
, REG_EQUAL
,
1654 gen_rtx_fmt_ee (optab_to_code (binoptab
),
1655 mode
, copy_rtx (xop0
),
1666 delete_insns_since (last
);
1669 /* Attempt to synthesize double word multiplies using a sequence of word
1670 mode multiplications. We first attempt to generate a sequence using a
1671 more efficient unsigned widening multiply, and if that fails we then
1672 try using a signed widening multiply. */
1674 if (binoptab
== smul_optab
1675 && mclass
== MODE_INT
1676 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1677 && optab_handler (smul_optab
, word_mode
) != CODE_FOR_nothing
1678 && optab_handler (add_optab
, word_mode
) != CODE_FOR_nothing
)
1680 rtx product
= NULL_RTX
;
1681 if (widening_optab_handler (umul_widen_optab
, mode
, word_mode
)
1682 != CODE_FOR_nothing
)
1684 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1687 delete_insns_since (last
);
1690 if (product
== NULL_RTX
1691 && widening_optab_handler (smul_widen_optab
, mode
, word_mode
)
1692 != CODE_FOR_nothing
)
1694 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1697 delete_insns_since (last
);
1700 if (product
!= NULL_RTX
)
1702 if (optab_handler (mov_optab
, mode
) != CODE_FOR_nothing
)
1704 temp
= emit_move_insn (target
? target
: product
, product
);
1705 set_dst_reg_note (temp
,
1707 gen_rtx_fmt_ee (MULT
, mode
,
1710 target
? target
: product
);
1716 /* It can't be open-coded in this mode.
1717 Use a library call if one is available and caller says that's ok. */
1719 libfunc
= optab_libfunc (binoptab
, mode
);
1721 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1725 machine_mode op1_mode
= mode
;
1730 if (shift_optab_p (binoptab
))
1732 op1_mode
= targetm
.libgcc_shift_count_mode ();
1733 /* Specify unsigned here,
1734 since negative shift counts are meaningless. */
1735 op1x
= convert_to_mode (op1_mode
, op1
, 1);
1738 if (GET_MODE (op0
) != VOIDmode
1739 && GET_MODE (op0
) != mode
)
1740 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1742 /* Pass 1 for NO_QUEUE so we don't lose any increments
1743 if the libcall is cse'd or moved. */
1744 value
= emit_library_call_value (libfunc
,
1745 NULL_RTX
, LCT_CONST
, mode
, 2,
1746 op0
, mode
, op1x
, op1_mode
);
1748 insns
= get_insns ();
1751 bool trapv
= trapv_binoptab_p (binoptab
);
1752 target
= gen_reg_rtx (mode
);
1753 emit_libcall_block_1 (insns
, target
, value
,
1755 : gen_rtx_fmt_ee (optab_to_code (binoptab
),
1756 mode
, op0
, op1
), trapv
);
1761 delete_insns_since (last
);
1763 /* It can't be done in this mode. Can we do it in a wider mode? */
1765 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1766 || methods
== OPTAB_MUST_WIDEN
))
1768 /* Caller says, don't even try. */
1769 delete_insns_since (entry_last
);
1773 /* Compute the value of METHODS to pass to recursive calls.
1774 Don't allow widening to be tried recursively. */
1776 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1778 /* Look for a wider mode of the same class for which it appears we can do
1781 if (CLASS_HAS_WIDER_MODES_P (mclass
))
1783 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1784 wider_mode
!= VOIDmode
;
1785 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1787 if (find_widening_optab_handler (binoptab
, wider_mode
, mode
, 1)
1789 || (methods
== OPTAB_LIB
1790 && optab_libfunc (binoptab
, wider_mode
)))
1792 rtx xop0
= op0
, xop1
= op1
;
1795 /* For certain integer operations, we need not actually extend
1796 the narrow operands, as long as we will truncate
1797 the results to the same narrowness. */
1799 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1800 || binoptab
== xor_optab
1801 || binoptab
== add_optab
|| binoptab
== sub_optab
1802 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1803 && mclass
== MODE_INT
)
1806 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1807 unsignedp
, no_extend
);
1809 /* The second operand of a shift must always be extended. */
1810 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1811 no_extend
&& binoptab
!= ashl_optab
);
1813 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1814 unsignedp
, methods
);
1817 if (mclass
!= MODE_INT
1818 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
1821 target
= gen_reg_rtx (mode
);
1822 convert_move (target
, temp
, 0);
1826 return gen_lowpart (mode
, temp
);
1829 delete_insns_since (last
);
1834 delete_insns_since (entry_last
);
1838 /* Expand a binary operator which has both signed and unsigned forms.
1839 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1842 If we widen unsigned operands, we may use a signed wider operation instead
1843 of an unsigned wider operation, since the result would be the same. */
1846 sign_expand_binop (machine_mode mode
, optab uoptab
, optab soptab
,
1847 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
1848 enum optab_methods methods
)
1851 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1854 /* Do it without widening, if possible. */
1855 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1856 unsignedp
, OPTAB_DIRECT
);
1857 if (temp
|| methods
== OPTAB_DIRECT
)
1860 /* Try widening to a signed int. Disable any direct use of any
1861 signed insn in the current mode. */
1862 save_enable
= swap_optab_enable (soptab
, mode
, false);
1864 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
1865 unsignedp
, OPTAB_WIDEN
);
1867 /* For unsigned operands, try widening to an unsigned int. */
1868 if (!temp
&& unsignedp
)
1869 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1870 unsignedp
, OPTAB_WIDEN
);
1871 if (temp
|| methods
== OPTAB_WIDEN
)
1874 /* Use the right width libcall if that exists. */
1875 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1876 unsignedp
, OPTAB_LIB
);
1877 if (temp
|| methods
== OPTAB_LIB
)
1880 /* Must widen and use a libcall, use either signed or unsigned. */
1881 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
1882 unsignedp
, methods
);
1883 if (!temp
&& unsignedp
)
1884 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1885 unsignedp
, methods
);
1888 /* Undo the fiddling above. */
1890 swap_optab_enable (soptab
, mode
, true);
1894 /* Generate code to perform an operation specified by UNOPPTAB
1895 on operand OP0, with two results to TARG0 and TARG1.
1896 We assume that the order of the operands for the instruction
1897 is TARG0, TARG1, OP0.
1899 Either TARG0 or TARG1 may be zero, but what that means is that
1900 the result is not actually wanted. We will generate it into
1901 a dummy pseudo-reg and discard it. They may not both be zero.
1903 Returns 1 if this operation can be performed; 0 if not. */
1906 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
1909 machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1910 enum mode_class mclass
;
1911 machine_mode wider_mode
;
1912 rtx_insn
*entry_last
= get_last_insn ();
1915 mclass
= GET_MODE_CLASS (mode
);
1918 targ0
= gen_reg_rtx (mode
);
1920 targ1
= gen_reg_rtx (mode
);
1922 /* Record where to go back to if we fail. */
1923 last
= get_last_insn ();
1925 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
1927 struct expand_operand ops
[3];
1928 enum insn_code icode
= optab_handler (unoptab
, mode
);
1930 create_fixed_operand (&ops
[0], targ0
);
1931 create_fixed_operand (&ops
[1], targ1
);
1932 create_convert_operand_from (&ops
[2], op0
, mode
, unsignedp
);
1933 if (maybe_expand_insn (icode
, 3, ops
))
1937 /* It can't be done in this mode. Can we do it in a wider mode? */
1939 if (CLASS_HAS_WIDER_MODES_P (mclass
))
1941 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
1942 wider_mode
!= VOIDmode
;
1943 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1945 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
1947 rtx t0
= gen_reg_rtx (wider_mode
);
1948 rtx t1
= gen_reg_rtx (wider_mode
);
1949 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1951 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
1953 convert_move (targ0
, t0
, unsignedp
);
1954 convert_move (targ1
, t1
, unsignedp
);
1958 delete_insns_since (last
);
1963 delete_insns_since (entry_last
);
1967 /* Generate code to perform an operation specified by BINOPTAB
1968 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1969 We assume that the order of the operands for the instruction
1970 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1971 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1973 Either TARG0 or TARG1 may be zero, but what that means is that
1974 the result is not actually wanted. We will generate it into
1975 a dummy pseudo-reg and discard it. They may not both be zero.
1977 Returns 1 if this operation can be performed; 0 if not. */
1980 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
1983 machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1984 enum mode_class mclass
;
1985 machine_mode wider_mode
;
1986 rtx_insn
*entry_last
= get_last_insn ();
1989 mclass
= GET_MODE_CLASS (mode
);
1992 targ0
= gen_reg_rtx (mode
);
1994 targ1
= gen_reg_rtx (mode
);
1996 /* Record where to go back to if we fail. */
1997 last
= get_last_insn ();
1999 if (optab_handler (binoptab
, mode
) != CODE_FOR_nothing
)
2001 struct expand_operand ops
[4];
2002 enum insn_code icode
= optab_handler (binoptab
, mode
);
2003 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2004 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2005 rtx xop0
= op0
, xop1
= op1
;
2007 /* If we are optimizing, force expensive constants into a register. */
2008 xop0
= avoid_expensive_constant (mode0
, binoptab
, 0, xop0
, unsignedp
);
2009 xop1
= avoid_expensive_constant (mode1
, binoptab
, 1, xop1
, unsignedp
);
2011 create_fixed_operand (&ops
[0], targ0
);
2012 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
2013 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
2014 create_fixed_operand (&ops
[3], targ1
);
2015 if (maybe_expand_insn (icode
, 4, ops
))
2017 delete_insns_since (last
);
2020 /* It can't be done in this mode. Can we do it in a wider mode? */
2022 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2024 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2025 wider_mode
!= VOIDmode
;
2026 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2028 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
)
2030 rtx t0
= gen_reg_rtx (wider_mode
);
2031 rtx t1
= gen_reg_rtx (wider_mode
);
2032 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2033 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2035 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2038 convert_move (targ0
, t0
, unsignedp
);
2039 convert_move (targ1
, t1
, unsignedp
);
2043 delete_insns_since (last
);
2048 delete_insns_since (entry_last
);
2052 /* Expand the two-valued library call indicated by BINOPTAB, but
2053 preserve only one of the values. If TARG0 is non-NULL, the first
2054 value is placed into TARG0; otherwise the second value is placed
2055 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2056 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2057 This routine assumes that the value returned by the library call is
2058 as if the return value was of an integral mode twice as wide as the
2059 mode of OP0. Returns 1 if the call was successful. */
2062 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2063 rtx targ0
, rtx targ1
, enum rtx_code code
)
2066 machine_mode libval_mode
;
2071 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2072 gcc_assert (!targ0
!= !targ1
);
2074 mode
= GET_MODE (op0
);
2075 libfunc
= optab_libfunc (binoptab
, mode
);
2079 /* The value returned by the library function will have twice as
2080 many bits as the nominal MODE. */
2081 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2084 libval
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
2088 /* Get the part of VAL containing the value that we want. */
2089 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2090 targ0
? 0 : GET_MODE_SIZE (mode
));
2091 insns
= get_insns ();
2093 /* Move the into the desired location. */
2094 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2095 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2101 /* Wrapper around expand_unop which takes an rtx code to specify
2102 the operation to perform, not an optab pointer. All other
2103 arguments are the same. */
2105 expand_simple_unop (machine_mode mode
, enum rtx_code code
, rtx op0
,
2106 rtx target
, int unsignedp
)
2108 optab unop
= code_to_optab (code
);
2111 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2117 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2119 A similar operation can be used for clrsb. UNOPTAB says which operation
2120 we are trying to expand. */
2122 widen_leading (machine_mode mode
, rtx op0
, rtx target
, optab unoptab
)
2124 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2125 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2127 machine_mode wider_mode
;
2128 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2129 wider_mode
!= VOIDmode
;
2130 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2132 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2137 last
= get_last_insn ();
2140 target
= gen_reg_rtx (mode
);
2141 xop0
= widen_operand (op0
, wider_mode
, mode
,
2142 unoptab
!= clrsb_optab
, false);
2143 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2144 unoptab
!= clrsb_optab
);
2147 (wider_mode
, sub_optab
, temp
,
2148 gen_int_mode (GET_MODE_PRECISION (wider_mode
)
2149 - GET_MODE_PRECISION (mode
),
2151 target
, true, OPTAB_DIRECT
);
2153 delete_insns_since (last
);
2162 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2163 quantities, choosing which based on whether the high word is nonzero. */
2165 expand_doubleword_clz (machine_mode mode
, rtx op0
, rtx target
)
2167 rtx xop0
= force_reg (mode
, op0
);
2168 rtx subhi
= gen_highpart (word_mode
, xop0
);
2169 rtx sublo
= gen_lowpart (word_mode
, xop0
);
2170 rtx_code_label
*hi0_label
= gen_label_rtx ();
2171 rtx_code_label
*after_label
= gen_label_rtx ();
2175 /* If we were not given a target, use a word_mode register, not a
2176 'mode' register. The result will fit, and nobody is expecting
2177 anything bigger (the return type of __builtin_clz* is int). */
2179 target
= gen_reg_rtx (word_mode
);
2181 /* In any case, write to a word_mode scratch in both branches of the
2182 conditional, so we can ensure there is a single move insn setting
2183 'target' to tag a REG_EQUAL note on. */
2184 result
= gen_reg_rtx (word_mode
);
2188 /* If the high word is not equal to zero,
2189 then clz of the full value is clz of the high word. */
2190 emit_cmp_and_jump_insns (subhi
, CONST0_RTX (word_mode
), EQ
, 0,
2191 word_mode
, true, hi0_label
);
2193 temp
= expand_unop_direct (word_mode
, clz_optab
, subhi
, result
, true);
2198 convert_move (result
, temp
, true);
2200 emit_jump_insn (targetm
.gen_jump (after_label
));
2203 /* Else clz of the full value is clz of the low word plus the number
2204 of bits in the high word. */
2205 emit_label (hi0_label
);
2207 temp
= expand_unop_direct (word_mode
, clz_optab
, sublo
, 0, true);
2210 temp
= expand_binop (word_mode
, add_optab
, temp
,
2211 gen_int_mode (GET_MODE_BITSIZE (word_mode
), word_mode
),
2212 result
, true, OPTAB_DIRECT
);
2216 convert_move (result
, temp
, true);
2218 emit_label (after_label
);
2219 convert_move (target
, result
, true);
2224 add_equal_note (seq
, target
, CLZ
, xop0
, 0);
2236 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2238 widen_bswap (machine_mode mode
, rtx op0
, rtx target
)
2240 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2241 machine_mode wider_mode
;
2245 if (!CLASS_HAS_WIDER_MODES_P (mclass
))
2248 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2249 wider_mode
!= VOIDmode
;
2250 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2251 if (optab_handler (bswap_optab
, wider_mode
) != CODE_FOR_nothing
)
2256 last
= get_last_insn ();
2258 x
= widen_operand (op0
, wider_mode
, mode
, true, true);
2259 x
= expand_unop (wider_mode
, bswap_optab
, x
, NULL_RTX
, true);
2261 gcc_assert (GET_MODE_PRECISION (wider_mode
) == GET_MODE_BITSIZE (wider_mode
)
2262 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
));
2264 x
= expand_shift (RSHIFT_EXPR
, wider_mode
, x
,
2265 GET_MODE_BITSIZE (wider_mode
)
2266 - GET_MODE_BITSIZE (mode
),
2272 target
= gen_reg_rtx (mode
);
2273 emit_move_insn (target
, gen_lowpart (mode
, x
));
2276 delete_insns_since (last
);
2281 /* Try calculating bswap as two bswaps of two word-sized operands. */
2284 expand_doubleword_bswap (machine_mode mode
, rtx op
, rtx target
)
2288 t1
= expand_unop (word_mode
, bswap_optab
,
2289 operand_subword_force (op
, 0, mode
), NULL_RTX
, true);
2290 t0
= expand_unop (word_mode
, bswap_optab
,
2291 operand_subword_force (op
, 1, mode
), NULL_RTX
, true);
2293 if (target
== 0 || !valid_multiword_target_p (target
))
2294 target
= gen_reg_rtx (mode
);
2296 emit_clobber (target
);
2297 emit_move_insn (operand_subword (target
, 0, 1, mode
), t0
);
2298 emit_move_insn (operand_subword (target
, 1, 1, mode
), t1
);
2303 /* Try calculating (parity x) as (and (popcount x) 1), where
2304 popcount can also be done in a wider mode. */
2306 expand_parity (machine_mode mode
, rtx op0
, rtx target
)
2308 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2309 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2311 machine_mode wider_mode
;
2312 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2313 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2315 if (optab_handler (popcount_optab
, wider_mode
) != CODE_FOR_nothing
)
2320 last
= get_last_insn ();
2323 target
= gen_reg_rtx (mode
);
2324 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2325 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2328 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2329 target
, true, OPTAB_DIRECT
);
2331 delete_insns_since (last
);
2340 /* Try calculating ctz(x) as K - clz(x & -x) ,
2341 where K is GET_MODE_PRECISION(mode) - 1.
2343 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2344 don't have to worry about what the hardware does in that case. (If
2345 the clz instruction produces the usual value at 0, which is K, the
2346 result of this code sequence will be -1; expand_ffs, below, relies
2347 on this. It might be nice to have it be K instead, for consistency
2348 with the (very few) processors that provide a ctz with a defined
2349 value, but that would take one more instruction, and it would be
2350 less convenient for expand_ffs anyway. */
2353 expand_ctz (machine_mode mode
, rtx op0
, rtx target
)
2358 if (optab_handler (clz_optab
, mode
) == CODE_FOR_nothing
)
2363 temp
= expand_unop_direct (mode
, neg_optab
, op0
, NULL_RTX
, true);
2365 temp
= expand_binop (mode
, and_optab
, op0
, temp
, NULL_RTX
,
2366 true, OPTAB_DIRECT
);
2368 temp
= expand_unop_direct (mode
, clz_optab
, temp
, NULL_RTX
, true);
2370 temp
= expand_binop (mode
, sub_optab
,
2371 gen_int_mode (GET_MODE_PRECISION (mode
) - 1, mode
),
2373 true, OPTAB_DIRECT
);
2383 add_equal_note (seq
, temp
, CTZ
, op0
, 0);
2389 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2390 else with the sequence used by expand_clz.
2392 The ffs builtin promises to return zero for a zero value and ctz/clz
2393 may have an undefined value in that case. If they do not give us a
2394 convenient value, we have to generate a test and branch. */
2396 expand_ffs (machine_mode mode
, rtx op0
, rtx target
)
2398 HOST_WIDE_INT val
= 0;
2399 bool defined_at_zero
= false;
2403 if (optab_handler (ctz_optab
, mode
) != CODE_FOR_nothing
)
2407 temp
= expand_unop_direct (mode
, ctz_optab
, op0
, 0, true);
2411 defined_at_zero
= (CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2);
2413 else if (optab_handler (clz_optab
, mode
) != CODE_FOR_nothing
)
2416 temp
= expand_ctz (mode
, op0
, 0);
2420 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2)
2422 defined_at_zero
= true;
2423 val
= (GET_MODE_PRECISION (mode
) - 1) - val
;
2429 if (defined_at_zero
&& val
== -1)
2430 /* No correction needed at zero. */;
2433 /* We don't try to do anything clever with the situation found
2434 on some processors (eg Alpha) where ctz(0:mode) ==
2435 bitsize(mode). If someone can think of a way to send N to -1
2436 and leave alone all values in the range 0..N-1 (where N is a
2437 power of two), cheaper than this test-and-branch, please add it.
2439 The test-and-branch is done after the operation itself, in case
2440 the operation sets condition codes that can be recycled for this.
2441 (This is true on i386, for instance.) */
2443 rtx_code_label
*nonzero_label
= gen_label_rtx ();
2444 emit_cmp_and_jump_insns (op0
, CONST0_RTX (mode
), NE
, 0,
2445 mode
, true, nonzero_label
);
2447 convert_move (temp
, GEN_INT (-1), false);
2448 emit_label (nonzero_label
);
2451 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2452 to produce a value in the range 0..bitsize. */
2453 temp
= expand_binop (mode
, add_optab
, temp
, gen_int_mode (1, mode
),
2454 target
, false, OPTAB_DIRECT
);
2461 add_equal_note (seq
, temp
, FFS
, op0
, 0);
2470 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2471 conditions, VAL may already be a SUBREG against which we cannot generate
2472 a further SUBREG. In this case, we expect forcing the value into a
2473 register will work around the situation. */
2476 lowpart_subreg_maybe_copy (machine_mode omode
, rtx val
,
2480 ret
= lowpart_subreg (omode
, val
, imode
);
2483 val
= force_reg (imode
, val
);
2484 ret
= lowpart_subreg (omode
, val
, imode
);
2485 gcc_assert (ret
!= NULL
);
2490 /* Expand a floating point absolute value or negation operation via a
2491 logical operation on the sign bit. */
2494 expand_absneg_bit (enum rtx_code code
, machine_mode mode
,
2495 rtx op0
, rtx target
)
2497 const struct real_format
*fmt
;
2498 int bitpos
, word
, nwords
, i
;
2503 /* The format has to have a simple sign bit. */
2504 fmt
= REAL_MODE_FORMAT (mode
);
2508 bitpos
= fmt
->signbit_rw
;
2512 /* Don't create negative zeros if the format doesn't support them. */
2513 if (code
== NEG
&& !fmt
->has_signed_zero
)
2516 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2518 imode
= int_mode_for_mode (mode
);
2519 if (imode
== BLKmode
)
2528 if (FLOAT_WORDS_BIG_ENDIAN
)
2529 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2531 word
= bitpos
/ BITS_PER_WORD
;
2532 bitpos
= bitpos
% BITS_PER_WORD
;
2533 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2536 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
2542 || (nwords
> 1 && !valid_multiword_target_p (target
)))
2543 target
= gen_reg_rtx (mode
);
2549 for (i
= 0; i
< nwords
; ++i
)
2551 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2552 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2556 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2558 immed_wide_int_const (mask
, imode
),
2559 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2560 if (temp
!= targ_piece
)
2561 emit_move_insn (targ_piece
, temp
);
2564 emit_move_insn (targ_piece
, op0_piece
);
2567 insns
= get_insns ();
2574 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2575 gen_lowpart (imode
, op0
),
2576 immed_wide_int_const (mask
, imode
),
2577 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2578 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2580 set_dst_reg_note (get_last_insn (), REG_EQUAL
,
2581 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)),
2588 /* As expand_unop, but will fail rather than attempt the operation in a
2589 different mode or with a libcall. */
2591 expand_unop_direct (machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2594 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
2596 struct expand_operand ops
[2];
2597 enum insn_code icode
= optab_handler (unoptab
, mode
);
2598 rtx_insn
*last
= get_last_insn ();
2601 create_output_operand (&ops
[0], target
, mode
);
2602 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
2603 pat
= maybe_gen_insn (icode
, 2, ops
);
2606 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2607 && ! add_equal_note (pat
, ops
[0].value
,
2608 optab_to_code (unoptab
),
2609 ops
[1].value
, NULL_RTX
))
2611 delete_insns_since (last
);
2612 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2617 return ops
[0].value
;
2623 /* Generate code to perform an operation specified by UNOPTAB
2624 on operand OP0, with result having machine-mode MODE.
2626 UNSIGNEDP is for the case where we have to widen the operands
2627 to perform the operation. It says to use zero-extension.
2629 If TARGET is nonzero, the value
2630 is generated there, if it is convenient to do so.
2631 In all cases an rtx is returned for the locus of the value;
2632 this may or may not be TARGET. */
2635 expand_unop (machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2638 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2639 machine_mode wider_mode
;
2643 temp
= expand_unop_direct (mode
, unoptab
, op0
, target
, unsignedp
);
2647 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2649 /* Widening (or narrowing) clz needs special treatment. */
2650 if (unoptab
== clz_optab
)
2652 temp
= widen_leading (mode
, op0
, target
, unoptab
);
2656 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2657 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2659 temp
= expand_doubleword_clz (mode
, op0
, target
);
2667 if (unoptab
== clrsb_optab
)
2669 temp
= widen_leading (mode
, op0
, target
, unoptab
);
2675 /* Widening (or narrowing) bswap needs special treatment. */
2676 if (unoptab
== bswap_optab
)
2678 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
2679 or ROTATERT. First try these directly; if this fails, then try the
2680 obvious pair of shifts with allowed widening, as this will probably
2681 be always more efficient than the other fallback methods. */
2687 if (optab_handler (rotl_optab
, mode
) != CODE_FOR_nothing
)
2689 temp
= expand_binop (mode
, rotl_optab
, op0
, GEN_INT (8), target
,
2690 unsignedp
, OPTAB_DIRECT
);
2695 if (optab_handler (rotr_optab
, mode
) != CODE_FOR_nothing
)
2697 temp
= expand_binop (mode
, rotr_optab
, op0
, GEN_INT (8), target
,
2698 unsignedp
, OPTAB_DIRECT
);
2703 last
= get_last_insn ();
2705 temp1
= expand_binop (mode
, ashl_optab
, op0
, GEN_INT (8), NULL_RTX
,
2706 unsignedp
, OPTAB_WIDEN
);
2707 temp2
= expand_binop (mode
, lshr_optab
, op0
, GEN_INT (8), NULL_RTX
,
2708 unsignedp
, OPTAB_WIDEN
);
2711 temp
= expand_binop (mode
, ior_optab
, temp1
, temp2
, target
,
2712 unsignedp
, OPTAB_WIDEN
);
2717 delete_insns_since (last
);
2720 temp
= widen_bswap (mode
, op0
, target
);
2724 if (GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
2725 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2727 temp
= expand_doubleword_bswap (mode
, op0
, target
);
2735 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2736 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2737 wider_mode
!= VOIDmode
;
2738 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2740 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2743 rtx_insn
*last
= get_last_insn ();
2745 /* For certain operations, we need not actually extend
2746 the narrow operand, as long as we will truncate the
2747 results to the same narrowness. */
2749 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2750 (unoptab
== neg_optab
2751 || unoptab
== one_cmpl_optab
)
2752 && mclass
== MODE_INT
);
2754 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2759 if (mclass
!= MODE_INT
2760 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2763 target
= gen_reg_rtx (mode
);
2764 convert_move (target
, temp
, 0);
2768 return gen_lowpart (mode
, temp
);
2771 delete_insns_since (last
);
2775 /* These can be done a word at a time. */
2776 if (unoptab
== one_cmpl_optab
2777 && mclass
== MODE_INT
2778 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2779 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2784 if (target
== 0 || target
== op0
|| !valid_multiword_target_p (target
))
2785 target
= gen_reg_rtx (mode
);
2789 /* Do the actual arithmetic. */
2790 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2792 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2793 rtx x
= expand_unop (word_mode
, unoptab
,
2794 operand_subword_force (op0
, i
, mode
),
2795 target_piece
, unsignedp
);
2797 if (target_piece
!= x
)
2798 emit_move_insn (target_piece
, x
);
2801 insns
= get_insns ();
2808 if (optab_to_code (unoptab
) == NEG
)
2810 /* Try negating floating point values by flipping the sign bit. */
2811 if (SCALAR_FLOAT_MODE_P (mode
))
2813 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
2818 /* If there is no negation pattern, and we have no negative zero,
2819 try subtracting from zero. */
2820 if (!HONOR_SIGNED_ZEROS (mode
))
2822 temp
= expand_binop (mode
, (unoptab
== negv_optab
2823 ? subv_optab
: sub_optab
),
2824 CONST0_RTX (mode
), op0
, target
,
2825 unsignedp
, OPTAB_DIRECT
);
2831 /* Try calculating parity (x) as popcount (x) % 2. */
2832 if (unoptab
== parity_optab
)
2834 temp
= expand_parity (mode
, op0
, target
);
2839 /* Try implementing ffs (x) in terms of clz (x). */
2840 if (unoptab
== ffs_optab
)
2842 temp
= expand_ffs (mode
, op0
, target
);
2847 /* Try implementing ctz (x) in terms of clz (x). */
2848 if (unoptab
== ctz_optab
)
2850 temp
= expand_ctz (mode
, op0
, target
);
2856 /* Now try a library call in this mode. */
2857 libfunc
= optab_libfunc (unoptab
, mode
);
2863 machine_mode outmode
= mode
;
2865 /* All of these functions return small values. Thus we choose to
2866 have them return something that isn't a double-word. */
2867 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2868 || unoptab
== clrsb_optab
|| unoptab
== popcount_optab
2869 || unoptab
== parity_optab
)
2871 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
),
2872 optab_libfunc (unoptab
, mode
)));
2876 /* Pass 1 for NO_QUEUE so we don't lose any increments
2877 if the libcall is cse'd or moved. */
2878 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, outmode
,
2880 insns
= get_insns ();
2883 target
= gen_reg_rtx (outmode
);
2884 bool trapv
= trapv_unoptab_p (unoptab
);
2886 eq_value
= NULL_RTX
;
2889 eq_value
= gen_rtx_fmt_e (optab_to_code (unoptab
), mode
, op0
);
2890 if (GET_MODE_SIZE (outmode
) < GET_MODE_SIZE (mode
))
2891 eq_value
= simplify_gen_unary (TRUNCATE
, outmode
, eq_value
, mode
);
2892 else if (GET_MODE_SIZE (outmode
) > GET_MODE_SIZE (mode
))
2893 eq_value
= simplify_gen_unary (ZERO_EXTEND
,
2894 outmode
, eq_value
, mode
);
2896 emit_libcall_block_1 (insns
, target
, value
, eq_value
, trapv
);
2901 /* It can't be done in this mode. Can we do it in a wider mode? */
2903 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2905 for (wider_mode
= GET_MODE_WIDER_MODE (mode
);
2906 wider_mode
!= VOIDmode
;
2907 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2909 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
2910 || optab_libfunc (unoptab
, wider_mode
))
2913 rtx_insn
*last
= get_last_insn ();
2915 /* For certain operations, we need not actually extend
2916 the narrow operand, as long as we will truncate the
2917 results to the same narrowness. */
2918 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2919 (unoptab
== neg_optab
2920 || unoptab
== one_cmpl_optab
2921 || unoptab
== bswap_optab
)
2922 && mclass
== MODE_INT
);
2924 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2927 /* If we are generating clz using wider mode, adjust the
2928 result. Similarly for clrsb. */
2929 if ((unoptab
== clz_optab
|| unoptab
== clrsb_optab
)
2932 (wider_mode
, sub_optab
, temp
,
2933 gen_int_mode (GET_MODE_PRECISION (wider_mode
)
2934 - GET_MODE_PRECISION (mode
),
2936 target
, true, OPTAB_DIRECT
);
2938 /* Likewise for bswap. */
2939 if (unoptab
== bswap_optab
&& temp
!= 0)
2941 gcc_assert (GET_MODE_PRECISION (wider_mode
)
2942 == GET_MODE_BITSIZE (wider_mode
)
2943 && GET_MODE_PRECISION (mode
)
2944 == GET_MODE_BITSIZE (mode
));
2946 temp
= expand_shift (RSHIFT_EXPR
, wider_mode
, temp
,
2947 GET_MODE_BITSIZE (wider_mode
)
2948 - GET_MODE_BITSIZE (mode
),
2954 if (mclass
!= MODE_INT
)
2957 target
= gen_reg_rtx (mode
);
2958 convert_move (target
, temp
, 0);
2962 return gen_lowpart (mode
, temp
);
2965 delete_insns_since (last
);
2970 /* One final attempt at implementing negation via subtraction,
2971 this time allowing widening of the operand. */
2972 if (optab_to_code (unoptab
) == NEG
&& !HONOR_SIGNED_ZEROS (mode
))
2975 temp
= expand_binop (mode
,
2976 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2977 CONST0_RTX (mode
), op0
,
2978 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2986 /* Emit code to compute the absolute value of OP0, with result to
2987 TARGET if convenient. (TARGET may be 0.) The return value says
2988 where the result actually is to be found.
2990 MODE is the mode of the operand; the mode of the result is
2991 different but can be deduced from MODE.
2996 expand_abs_nojump (machine_mode mode
, rtx op0
, rtx target
,
2997 int result_unsignedp
)
3001 if (GET_MODE_CLASS (mode
) != MODE_INT
3003 result_unsignedp
= 1;
3005 /* First try to do it with a special abs instruction. */
3006 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
3011 /* For floating point modes, try clearing the sign bit. */
3012 if (SCALAR_FLOAT_MODE_P (mode
))
3014 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
3019 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3020 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
3021 && !HONOR_SIGNED_ZEROS (mode
))
3023 rtx_insn
*last
= get_last_insn ();
3025 temp
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3028 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3034 delete_insns_since (last
);
3037 /* If this machine has expensive jumps, we can do integer absolute
3038 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3039 where W is the width of MODE. */
3041 if (GET_MODE_CLASS (mode
) == MODE_INT
3042 && BRANCH_COST (optimize_insn_for_speed_p (),
3045 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3046 GET_MODE_PRECISION (mode
) - 1,
3049 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
3052 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
3053 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
3063 expand_abs (machine_mode mode
, rtx op0
, rtx target
,
3064 int result_unsignedp
, int safe
)
3067 rtx_code_label
*op1
;
3069 if (GET_MODE_CLASS (mode
) != MODE_INT
3071 result_unsignedp
= 1;
3073 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
3077 /* If that does not win, use conditional jump and negate. */
3079 /* It is safe to use the target if it is the same
3080 as the source if this is also a pseudo register */
3081 if (op0
== target
&& REG_P (op0
)
3082 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
3085 op1
= gen_label_rtx ();
3086 if (target
== 0 || ! safe
3087 || GET_MODE (target
) != mode
3088 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
3090 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
3091 target
= gen_reg_rtx (mode
);
3093 emit_move_insn (target
, op0
);
3096 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
3097 NULL_RTX
, NULL
, op1
, -1);
3099 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3102 emit_move_insn (target
, op0
);
3108 /* Emit code to compute the one's complement absolute value of OP0
3109 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3110 (TARGET may be NULL_RTX.) The return value says where the result
3111 actually is to be found.
3113 MODE is the mode of the operand; the mode of the result is
3114 different but can be deduced from MODE. */
3117 expand_one_cmpl_abs_nojump (machine_mode mode
, rtx op0
, rtx target
)
3121 /* Not applicable for floating point modes. */
3122 if (FLOAT_MODE_P (mode
))
3125 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3126 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
)
3128 rtx_insn
*last
= get_last_insn ();
3130 temp
= expand_unop (mode
, one_cmpl_optab
, op0
, NULL_RTX
, 0);
3132 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3138 delete_insns_since (last
);
3141 /* If this machine has expensive jumps, we can do one's complement
3142 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3144 if (GET_MODE_CLASS (mode
) == MODE_INT
3145 && BRANCH_COST (optimize_insn_for_speed_p (),
3148 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3149 GET_MODE_PRECISION (mode
) - 1,
3152 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
3162 /* A subroutine of expand_copysign, perform the copysign operation using the
3163 abs and neg primitives advertised to exist on the target. The assumption
3164 is that we have a split register file, and leaving op0 in fp registers,
3165 and not playing with subregs so much, will help the register allocator. */
3168 expand_copysign_absneg (machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3169 int bitpos
, bool op0_is_abs
)
3172 enum insn_code icode
;
3174 rtx_code_label
*label
;
3179 /* Check if the back end provides an insn that handles signbit for the
3181 icode
= optab_handler (signbit_optab
, mode
);
3182 if (icode
!= CODE_FOR_nothing
)
3184 imode
= insn_data
[(int) icode
].operand
[0].mode
;
3185 sign
= gen_reg_rtx (imode
);
3186 emit_unop_insn (icode
, sign
, op1
, UNKNOWN
);
3190 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3192 imode
= int_mode_for_mode (mode
);
3193 if (imode
== BLKmode
)
3195 op1
= gen_lowpart (imode
, op1
);
3202 if (FLOAT_WORDS_BIG_ENDIAN
)
3203 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3205 word
= bitpos
/ BITS_PER_WORD
;
3206 bitpos
= bitpos
% BITS_PER_WORD
;
3207 op1
= operand_subword_force (op1
, word
, mode
);
3210 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3211 sign
= expand_binop (imode
, and_optab
, op1
,
3212 immed_wide_int_const (mask
, imode
),
3213 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3218 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
3225 if (target
== NULL_RTX
)
3226 target
= copy_to_reg (op0
);
3228 emit_move_insn (target
, op0
);
3231 label
= gen_label_rtx ();
3232 emit_cmp_and_jump_insns (sign
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
3234 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
3235 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3237 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
3239 emit_move_insn (target
, op0
);
3247 /* A subroutine of expand_copysign, perform the entire copysign operation
3248 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3249 is true if op0 is known to have its sign bit clear. */
3252 expand_copysign_bit (machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3253 int bitpos
, bool op0_is_abs
)
3256 int word
, nwords
, i
;
3260 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3262 imode
= int_mode_for_mode (mode
);
3263 if (imode
== BLKmode
)
3272 if (FLOAT_WORDS_BIG_ENDIAN
)
3273 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3275 word
= bitpos
/ BITS_PER_WORD
;
3276 bitpos
= bitpos
% BITS_PER_WORD
;
3277 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3280 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3285 || (nwords
> 1 && !valid_multiword_target_p (target
)))
3286 target
= gen_reg_rtx (mode
);
3292 for (i
= 0; i
< nwords
; ++i
)
3294 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3295 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3301 = expand_binop (imode
, and_optab
, op0_piece
,
3302 immed_wide_int_const (~mask
, imode
),
3303 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3304 op1
= expand_binop (imode
, and_optab
,
3305 operand_subword_force (op1
, i
, mode
),
3306 immed_wide_int_const (mask
, imode
),
3307 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3309 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
3310 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3311 if (temp
!= targ_piece
)
3312 emit_move_insn (targ_piece
, temp
);
3315 emit_move_insn (targ_piece
, op0_piece
);
3318 insns
= get_insns ();
3325 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
3326 immed_wide_int_const (mask
, imode
),
3327 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3329 op0
= gen_lowpart (imode
, op0
);
3331 op0
= expand_binop (imode
, and_optab
, op0
,
3332 immed_wide_int_const (~mask
, imode
),
3333 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3335 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
3336 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3337 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3343 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3344 scalar floating point mode. Return NULL if we do not know how to
3345 expand the operation inline. */
3348 expand_copysign (rtx op0
, rtx op1
, rtx target
)
3350 machine_mode mode
= GET_MODE (op0
);
3351 const struct real_format
*fmt
;
3355 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
3356 gcc_assert (GET_MODE (op1
) == mode
);
3358 /* First try to do it with a special instruction. */
3359 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
3360 target
, 0, OPTAB_DIRECT
);
3364 fmt
= REAL_MODE_FORMAT (mode
);
3365 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
3369 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
3371 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
3372 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
3376 if (fmt
->signbit_ro
>= 0
3377 && (CONST_DOUBLE_AS_FLOAT_P (op0
)
3378 || (optab_handler (neg_optab
, mode
) != CODE_FOR_nothing
3379 && optab_handler (abs_optab
, mode
) != CODE_FOR_nothing
)))
3381 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
3382 fmt
->signbit_ro
, op0_is_abs
);
3387 if (fmt
->signbit_rw
< 0)
3389 return expand_copysign_bit (mode
, op0
, op1
, target
,
3390 fmt
->signbit_rw
, op0_is_abs
);
3393 /* Generate an instruction whose insn-code is INSN_CODE,
3394 with two operands: an output TARGET and an input OP0.
3395 TARGET *must* be nonzero, and the output is always stored there.
3396 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3397 the value that is stored into TARGET.
3399 Return false if expansion failed. */
3402 maybe_emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
,
3405 struct expand_operand ops
[2];
3408 create_output_operand (&ops
[0], target
, GET_MODE (target
));
3409 create_input_operand (&ops
[1], op0
, GET_MODE (op0
));
3410 pat
= maybe_gen_insn (icode
, 2, ops
);
3414 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
3416 add_equal_note (pat
, ops
[0].value
, code
, ops
[1].value
, NULL_RTX
);
3420 if (ops
[0].value
!= target
)
3421 emit_move_insn (target
, ops
[0].value
);
3424 /* Generate an instruction whose insn-code is INSN_CODE,
3425 with two operands: an output TARGET and an input OP0.
3426 TARGET *must* be nonzero, and the output is always stored there.
3427 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3428 the value that is stored into TARGET. */
3431 emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
, enum rtx_code code
)
3433 bool ok
= maybe_emit_unop_insn (icode
, target
, op0
, code
);
3437 struct no_conflict_data
3440 rtx_insn
*first
, *insn
;
3444 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3445 the currently examined clobber / store has to stay in the list of
3446 insns that constitute the actual libcall block. */
3448 no_conflict_move_test (rtx dest
, const_rtx set
, void *p0
)
3450 struct no_conflict_data
*p
= (struct no_conflict_data
*) p0
;
3452 /* If this inns directly contributes to setting the target, it must stay. */
3453 if (reg_overlap_mentioned_p (p
->target
, dest
))
3454 p
->must_stay
= true;
3455 /* If we haven't committed to keeping any other insns in the list yet,
3456 there is nothing more to check. */
3457 else if (p
->insn
== p
->first
)
3459 /* If this insn sets / clobbers a register that feeds one of the insns
3460 already in the list, this insn has to stay too. */
3461 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
3462 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
3463 || reg_used_between_p (dest
, p
->first
, p
->insn
)
3464 /* Likewise if this insn depends on a register set by a previous
3465 insn in the list, or if it sets a result (presumably a hard
3466 register) that is set or clobbered by a previous insn.
3467 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3468 SET_DEST perform the former check on the address, and the latter
3469 check on the MEM. */
3470 || (GET_CODE (set
) == SET
3471 && (modified_in_p (SET_SRC (set
), p
->first
)
3472 || modified_in_p (SET_DEST (set
), p
->first
)
3473 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
3474 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
3475 p
->must_stay
= true;
3479 /* Emit code to make a call to a constant function or a library call.
3481 INSNS is a list containing all insns emitted in the call.
3482 These insns leave the result in RESULT. Our block is to copy RESULT
3483 to TARGET, which is logically equivalent to EQUIV.
3485 We first emit any insns that set a pseudo on the assumption that these are
3486 loading constants into registers; doing so allows them to be safely cse'ed
3487 between blocks. Then we emit all the other insns in the block, followed by
3488 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3489 note with an operand of EQUIV. */
3492 emit_libcall_block_1 (rtx_insn
*insns
, rtx target
, rtx result
, rtx equiv
,
3493 bool equiv_may_trap
)
3495 rtx final_dest
= target
;
3496 rtx_insn
*next
, *last
, *insn
;
3498 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3499 into a MEM later. Protect the libcall block from this change. */
3500 if (! REG_P (target
) || REG_USERVAR_P (target
))
3501 target
= gen_reg_rtx (GET_MODE (target
));
3503 /* If we're using non-call exceptions, a libcall corresponding to an
3504 operation that may trap may also trap. */
3505 /* ??? See the comment in front of make_reg_eh_region_note. */
3506 if (cfun
->can_throw_non_call_exceptions
3507 && (equiv_may_trap
|| may_trap_p (equiv
)))
3509 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3512 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3515 int lp_nr
= INTVAL (XEXP (note
, 0));
3516 if (lp_nr
== 0 || lp_nr
== INT_MIN
)
3517 remove_note (insn
, note
);
3523 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3524 reg note to indicate that this call cannot throw or execute a nonlocal
3525 goto (unless there is already a REG_EH_REGION note, in which case
3527 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3529 make_reg_eh_region_note_nothrow_nononlocal (insn
);
3532 /* First emit all insns that set pseudos. Remove them from the list as
3533 we go. Avoid insns that set pseudos which were referenced in previous
3534 insns. These can be generated by move_by_pieces, for example,
3535 to update an address. Similarly, avoid insns that reference things
3536 set in previous insns. */
3538 for (insn
= insns
; insn
; insn
= next
)
3540 rtx set
= single_set (insn
);
3542 next
= NEXT_INSN (insn
);
3544 if (set
!= 0 && REG_P (SET_DEST (set
))
3545 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3547 struct no_conflict_data data
;
3549 data
.target
= const0_rtx
;
3553 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3554 if (! data
.must_stay
)
3556 if (PREV_INSN (insn
))
3557 SET_NEXT_INSN (PREV_INSN (insn
)) = next
;
3562 SET_PREV_INSN (next
) = PREV_INSN (insn
);
3568 /* Some ports use a loop to copy large arguments onto the stack.
3569 Don't move anything outside such a loop. */
3574 /* Write the remaining insns followed by the final copy. */
3575 for (insn
= insns
; insn
; insn
= next
)
3577 next
= NEXT_INSN (insn
);
3582 last
= emit_move_insn (target
, result
);
3584 set_dst_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
), target
);
3586 if (final_dest
!= target
)
3587 emit_move_insn (final_dest
, target
);
3591 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3593 emit_libcall_block_1 (safe_as_a
<rtx_insn
*> (insns
),
3594 target
, result
, equiv
, false);
3597 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3598 PURPOSE describes how this comparison will be used. CODE is the rtx
3599 comparison code we will be using.
3601 ??? Actually, CODE is slightly weaker than that. A target is still
3602 required to implement all of the normal bcc operations, but not
3603 required to implement all (or any) of the unordered bcc operations. */
3606 can_compare_p (enum rtx_code code
, machine_mode mode
,
3607 enum can_compare_purpose purpose
)
3610 test
= gen_rtx_fmt_ee (code
, mode
, const0_rtx
, const0_rtx
);
3613 enum insn_code icode
;
3615 if (purpose
== ccp_jump
3616 && (icode
= optab_handler (cbranch_optab
, mode
)) != CODE_FOR_nothing
3617 && insn_operand_matches (icode
, 0, test
))
3619 if (purpose
== ccp_store_flag
3620 && (icode
= optab_handler (cstore_optab
, mode
)) != CODE_FOR_nothing
3621 && insn_operand_matches (icode
, 1, test
))
3623 if (purpose
== ccp_cmov
3624 && optab_handler (cmov_optab
, mode
) != CODE_FOR_nothing
)
3627 mode
= GET_MODE_WIDER_MODE (mode
);
3628 PUT_MODE (test
, mode
);
3630 while (mode
!= VOIDmode
);
3635 /* This function is called when we are going to emit a compare instruction that
3636 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3638 *PMODE is the mode of the inputs (in case they are const_int).
3639 *PUNSIGNEDP nonzero says that the operands are unsigned;
3640 this matters if they need to be widened (as given by METHODS).
3642 If they have mode BLKmode, then SIZE specifies the size of both operands.
3644 This function performs all the setup necessary so that the caller only has
3645 to emit a single comparison insn. This setup can involve doing a BLKmode
3646 comparison or emitting a library call to perform the comparison if no insn
3647 is available to handle it.
3648 The values which are passed in through pointers can be modified; the caller
3649 should perform the comparison on the modified values. Constant
3650 comparisons must have already been folded. */
3653 prepare_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3654 int unsignedp
, enum optab_methods methods
,
3655 rtx
*ptest
, machine_mode
*pmode
)
3657 machine_mode mode
= *pmode
;
3659 machine_mode cmp_mode
;
3660 enum mode_class mclass
;
3662 /* The other methods are not needed. */
3663 gcc_assert (methods
== OPTAB_DIRECT
|| methods
== OPTAB_WIDEN
3664 || methods
== OPTAB_LIB_WIDEN
);
3666 /* If we are optimizing, force expensive constants into a register. */
3667 if (CONSTANT_P (x
) && optimize
3668 && (rtx_cost (x
, mode
, COMPARE
, 0, optimize_insn_for_speed_p ())
3669 > COSTS_N_INSNS (1)))
3670 x
= force_reg (mode
, x
);
3672 if (CONSTANT_P (y
) && optimize
3673 && (rtx_cost (y
, mode
, COMPARE
, 1, optimize_insn_for_speed_p ())
3674 > COSTS_N_INSNS (1)))
3675 y
= force_reg (mode
, y
);
3678 /* Make sure if we have a canonical comparison. The RTL
3679 documentation states that canonical comparisons are required only
3680 for targets which have cc0. */
3681 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
3684 /* Don't let both operands fail to indicate the mode. */
3685 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3686 x
= force_reg (mode
, x
);
3687 if (mode
== VOIDmode
)
3688 mode
= GET_MODE (x
) != VOIDmode
? GET_MODE (x
) : GET_MODE (y
);
3690 /* Handle all BLKmode compares. */
3692 if (mode
== BLKmode
)
3694 machine_mode result_mode
;
3695 enum insn_code cmp_code
;
3700 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3704 /* Try to use a memory block compare insn - either cmpstr
3705 or cmpmem will do. */
3706 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
3707 cmp_mode
!= VOIDmode
;
3708 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
3710 cmp_code
= direct_optab_handler (cmpmem_optab
, cmp_mode
);
3711 if (cmp_code
== CODE_FOR_nothing
)
3712 cmp_code
= direct_optab_handler (cmpstr_optab
, cmp_mode
);
3713 if (cmp_code
== CODE_FOR_nothing
)
3714 cmp_code
= direct_optab_handler (cmpstrn_optab
, cmp_mode
);
3715 if (cmp_code
== CODE_FOR_nothing
)
3718 /* Must make sure the size fits the insn's mode. */
3719 if ((CONST_INT_P (size
)
3720 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
3721 || (GET_MODE_BITSIZE (GET_MODE (size
))
3722 > GET_MODE_BITSIZE (cmp_mode
)))
3725 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3726 result
= gen_reg_rtx (result_mode
);
3727 size
= convert_to_mode (cmp_mode
, size
, 1);
3728 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3730 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, result
, const0_rtx
);
3731 *pmode
= result_mode
;
3735 if (methods
!= OPTAB_LIB
&& methods
!= OPTAB_LIB_WIDEN
)
3738 /* Otherwise call a library function, memcmp. */
3739 libfunc
= memcmp_libfunc
;
3740 length_type
= sizetype
;
3741 result_mode
= TYPE_MODE (integer_type_node
);
3742 cmp_mode
= TYPE_MODE (length_type
);
3743 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
3744 TYPE_UNSIGNED (length_type
));
3746 result
= emit_library_call_value (libfunc
, 0, LCT_PURE
,
3754 methods
= OPTAB_LIB_WIDEN
;
3758 /* Don't allow operands to the compare to trap, as that can put the
3759 compare and branch in different basic blocks. */
3760 if (cfun
->can_throw_non_call_exceptions
)
3763 x
= force_reg (mode
, x
);
3765 y
= force_reg (mode
, y
);
3768 if (GET_MODE_CLASS (mode
) == MODE_CC
)
3770 enum insn_code icode
= optab_handler (cbranch_optab
, CCmode
);
3771 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
3772 gcc_assert (icode
!= CODE_FOR_nothing
3773 && insn_operand_matches (icode
, 0, test
));
3778 mclass
= GET_MODE_CLASS (mode
);
3779 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
3783 enum insn_code icode
;
3784 icode
= optab_handler (cbranch_optab
, cmp_mode
);
3785 if (icode
!= CODE_FOR_nothing
3786 && insn_operand_matches (icode
, 0, test
))
3788 rtx_insn
*last
= get_last_insn ();
3789 rtx op0
= prepare_operand (icode
, x
, 1, mode
, cmp_mode
, unsignedp
);
3790 rtx op1
= prepare_operand (icode
, y
, 2, mode
, cmp_mode
, unsignedp
);
3792 && insn_operand_matches (icode
, 1, op0
)
3793 && insn_operand_matches (icode
, 2, op1
))
3795 XEXP (test
, 0) = op0
;
3796 XEXP (test
, 1) = op1
;
3801 delete_insns_since (last
);
3804 if (methods
== OPTAB_DIRECT
|| !CLASS_HAS_WIDER_MODES_P (mclass
))
3806 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
);
3808 while (cmp_mode
!= VOIDmode
);
3810 if (methods
!= OPTAB_LIB_WIDEN
)
3813 if (!SCALAR_FLOAT_MODE_P (mode
))
3816 machine_mode ret_mode
;
3818 /* Handle a libcall just for the mode we are using. */
3819 libfunc
= optab_libfunc (cmp_optab
, mode
);
3820 gcc_assert (libfunc
);
3822 /* If we want unsigned, and this mode has a distinct unsigned
3823 comparison routine, use that. */
3826 rtx ulibfunc
= optab_libfunc (ucmp_optab
, mode
);
3831 ret_mode
= targetm
.libgcc_cmp_return_mode ();
3832 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
3833 ret_mode
, 2, x
, mode
, y
, mode
);
3835 /* There are two kinds of comparison routines. Biased routines
3836 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3837 of gcc expect that the comparison operation is equivalent
3838 to the modified comparison. For signed comparisons compare the
3839 result against 1 in the biased case, and zero in the unbiased
3840 case. For unsigned comparisons always compare against 1 after
3841 biasing the unbiased result by adding 1. This gives us a way to
3843 The comparisons in the fixed-point helper library are always
3848 if (!TARGET_LIB_INT_CMP_BIASED
&& !ALL_FIXED_POINT_MODE_P (mode
))
3851 x
= plus_constant (ret_mode
, result
, 1);
3857 prepare_cmp_insn (x
, y
, comparison
, NULL_RTX
, unsignedp
, methods
,
3861 prepare_float_lib_cmp (x
, y
, comparison
, ptest
, pmode
);
3869 /* Before emitting an insn with code ICODE, make sure that X, which is going
3870 to be used for operand OPNUM of the insn, is converted from mode MODE to
3871 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3872 that it is accepted by the operand predicate. Return the new value. */
3875 prepare_operand (enum insn_code icode
, rtx x
, int opnum
, machine_mode mode
,
3876 machine_mode wider_mode
, int unsignedp
)
3878 if (mode
!= wider_mode
)
3879 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3881 if (!insn_operand_matches (icode
, opnum
, x
))
3883 machine_mode op_mode
= insn_data
[(int) icode
].operand
[opnum
].mode
;
3884 if (reload_completed
)
3886 if (GET_MODE (x
) != op_mode
&& GET_MODE (x
) != VOIDmode
)
3888 x
= copy_to_mode_reg (op_mode
, x
);
3894 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3895 we can do the branch. */
3898 emit_cmp_and_jump_insn_1 (rtx test
, machine_mode mode
, rtx label
, int prob
)
3900 machine_mode optab_mode
;
3901 enum mode_class mclass
;
3902 enum insn_code icode
;
3905 mclass
= GET_MODE_CLASS (mode
);
3906 optab_mode
= (mclass
== MODE_CC
) ? CCmode
: mode
;
3907 icode
= optab_handler (cbranch_optab
, optab_mode
);
3909 gcc_assert (icode
!= CODE_FOR_nothing
);
3910 gcc_assert (insn_operand_matches (icode
, 0, test
));
3911 insn
= emit_jump_insn (GEN_FCN (icode
) (test
, XEXP (test
, 0),
3912 XEXP (test
, 1), label
));
3914 && profile_status_for_fn (cfun
) != PROFILE_ABSENT
3917 && any_condjump_p (insn
)
3918 && !find_reg_note (insn
, REG_BR_PROB
, 0))
3919 add_int_reg_note (insn
, REG_BR_PROB
, prob
);
3922 /* Generate code to compare X with Y so that the condition codes are
3923 set and to jump to LABEL if the condition is true. If X is a
3924 constant and Y is not a constant, then the comparison is swapped to
3925 ensure that the comparison RTL has the canonical form.
3927 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3928 need to be widened. UNSIGNEDP is also used to select the proper
3929 branch condition code.
3931 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3933 MODE is the mode of the inputs (in case they are const_int).
3935 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
3936 It will be potentially converted into an unsigned variant based on
3937 UNSIGNEDP to select a proper jump instruction.
3939 PROB is the probability of jumping to LABEL. */
3942 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3943 machine_mode mode
, int unsignedp
, rtx label
,
3946 rtx op0
= x
, op1
= y
;
3949 /* Swap operands and condition to ensure canonical RTL. */
3950 if (swap_commutative_operands_p (x
, y
)
3951 && can_compare_p (swap_condition (comparison
), mode
, ccp_jump
))
3954 comparison
= swap_condition (comparison
);
3957 /* If OP0 is still a constant, then both X and Y must be constants
3958 or the opposite comparison is not supported. Force X into a register
3959 to create canonical RTL. */
3960 if (CONSTANT_P (op0
))
3961 op0
= force_reg (mode
, op0
);
3964 comparison
= unsigned_condition (comparison
);
3966 prepare_cmp_insn (op0
, op1
, comparison
, size
, unsignedp
, OPTAB_LIB_WIDEN
,
3968 emit_cmp_and_jump_insn_1 (test
, mode
, label
, prob
);
3972 /* Emit a library call comparison between floating point X and Y.
3973 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3976 prepare_float_lib_cmp (rtx x
, rtx y
, enum rtx_code comparison
,
3977 rtx
*ptest
, machine_mode
*pmode
)
3979 enum rtx_code swapped
= swap_condition (comparison
);
3980 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
3981 machine_mode orig_mode
= GET_MODE (x
);
3982 machine_mode mode
, cmp_mode
;
3983 rtx true_rtx
, false_rtx
;
3984 rtx value
, target
, equiv
;
3987 bool reversed_p
= false;
3988 cmp_mode
= targetm
.libgcc_cmp_return_mode ();
3990 for (mode
= orig_mode
;
3992 mode
= GET_MODE_WIDER_MODE (mode
))
3994 if (code_to_optab (comparison
)
3995 && (libfunc
= optab_libfunc (code_to_optab (comparison
), mode
)))
3998 if (code_to_optab (swapped
)
3999 && (libfunc
= optab_libfunc (code_to_optab (swapped
), mode
)))
4002 comparison
= swapped
;
4006 if (code_to_optab (reversed
)
4007 && (libfunc
= optab_libfunc (code_to_optab (reversed
), mode
)))
4009 comparison
= reversed
;
4015 gcc_assert (mode
!= VOIDmode
);
4017 if (mode
!= orig_mode
)
4019 x
= convert_to_mode (mode
, x
, 0);
4020 y
= convert_to_mode (mode
, y
, 0);
4023 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4024 the RTL. The allows the RTL optimizers to delete the libcall if the
4025 condition can be determined at compile-time. */
4026 if (comparison
== UNORDERED
4027 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4029 true_rtx
= const_true_rtx
;
4030 false_rtx
= const0_rtx
;
4037 true_rtx
= const0_rtx
;
4038 false_rtx
= const_true_rtx
;
4042 true_rtx
= const_true_rtx
;
4043 false_rtx
= const0_rtx
;
4047 true_rtx
= const1_rtx
;
4048 false_rtx
= const0_rtx
;
4052 true_rtx
= const0_rtx
;
4053 false_rtx
= constm1_rtx
;
4057 true_rtx
= constm1_rtx
;
4058 false_rtx
= const0_rtx
;
4062 true_rtx
= const0_rtx
;
4063 false_rtx
= const1_rtx
;
4071 if (comparison
== UNORDERED
)
4073 rtx temp
= simplify_gen_relational (NE
, cmp_mode
, mode
, x
, x
);
4074 equiv
= simplify_gen_relational (NE
, cmp_mode
, mode
, y
, y
);
4075 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4076 temp
, const_true_rtx
, equiv
);
4080 equiv
= simplify_gen_relational (comparison
, cmp_mode
, mode
, x
, y
);
4081 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4082 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4083 equiv
, true_rtx
, false_rtx
);
4087 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4088 cmp_mode
, 2, x
, mode
, y
, mode
);
4089 insns
= get_insns ();
4092 target
= gen_reg_rtx (cmp_mode
);
4093 emit_libcall_block (insns
, target
, value
, equiv
);
4095 if (comparison
== UNORDERED
4096 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
)
4098 *ptest
= gen_rtx_fmt_ee (reversed_p
? EQ
: NE
, VOIDmode
, target
, false_rtx
);
4100 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, target
, const0_rtx
);
4105 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4108 emit_indirect_jump (rtx loc
)
4110 if (!targetm
.have_indirect_jump ())
4111 sorry ("indirect jumps are not available on this target");
4114 struct expand_operand ops
[1];
4115 create_address_operand (&ops
[0], loc
);
4116 expand_jump_insn (targetm
.code_for_indirect_jump
, 1, ops
);
4122 /* Emit a conditional move instruction if the machine supports one for that
4123 condition and machine mode.
4125 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4126 the mode to use should they be constants. If it is VOIDmode, they cannot
4129 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4130 should be stored there. MODE is the mode to use should they be constants.
4131 If it is VOIDmode, they cannot both be constants.
4133 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4134 is not supported. */
4137 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4138 machine_mode cmode
, rtx op2
, rtx op3
,
4139 machine_mode mode
, int unsignedp
)
4143 enum insn_code icode
;
4144 enum rtx_code reversed
;
4146 /* If one operand is constant, make it the second one. Only do this
4147 if the other operand is not constant as well. */
4149 if (swap_commutative_operands_p (op0
, op1
))
4151 std::swap (op0
, op1
);
4152 code
= swap_condition (code
);
4155 /* get_condition will prefer to generate LT and GT even if the old
4156 comparison was against zero, so undo that canonicalization here since
4157 comparisons against zero are cheaper. */
4158 if (code
== LT
&& op1
== const1_rtx
)
4159 code
= LE
, op1
= const0_rtx
;
4160 else if (code
== GT
&& op1
== constm1_rtx
)
4161 code
= GE
, op1
= const0_rtx
;
4163 if (cmode
== VOIDmode
)
4164 cmode
= GET_MODE (op0
);
4166 if (swap_commutative_operands_p (op2
, op3
)
4167 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4170 std::swap (op2
, op3
);
4174 if (mode
== VOIDmode
)
4175 mode
= GET_MODE (op2
);
4177 icode
= direct_optab_handler (movcc_optab
, mode
);
4179 if (icode
== CODE_FOR_nothing
)
4183 target
= gen_reg_rtx (mode
);
4185 code
= unsignedp
? unsigned_condition (code
) : code
;
4186 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4188 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4189 return NULL and let the caller figure out how best to deal with this
4191 if (!COMPARISON_P (comparison
))
4194 saved_pending_stack_adjust save
;
4195 save_pending_stack_adjust (&save
);
4196 last
= get_last_insn ();
4197 do_pending_stack_adjust ();
4198 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4199 GET_CODE (comparison
), NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4200 &comparison
, &cmode
);
4203 struct expand_operand ops
[4];
4205 create_output_operand (&ops
[0], target
, mode
);
4206 create_fixed_operand (&ops
[1], comparison
);
4207 create_input_operand (&ops
[2], op2
, mode
);
4208 create_input_operand (&ops
[3], op3
, mode
);
4209 if (maybe_expand_insn (icode
, 4, ops
))
4211 if (ops
[0].value
!= target
)
4212 convert_move (target
, ops
[0].value
, false);
4216 delete_insns_since (last
);
4217 restore_pending_stack_adjust (&save
);
4221 /* Emit a conditional addition instruction if the machine supports one for that
4222 condition and machine mode.
4224 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4225 the mode to use should they be constants. If it is VOIDmode, they cannot
4228 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
4229 should be stored there. MODE is the mode to use should they be constants.
4230 If it is VOIDmode, they cannot both be constants.
4232 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4233 is not supported. */
4236 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4237 machine_mode cmode
, rtx op2
, rtx op3
,
4238 machine_mode mode
, int unsignedp
)
4242 enum insn_code icode
;
4244 /* If one operand is constant, make it the second one. Only do this
4245 if the other operand is not constant as well. */
4247 if (swap_commutative_operands_p (op0
, op1
))
4249 std::swap (op0
, op1
);
4250 code
= swap_condition (code
);
4253 /* get_condition will prefer to generate LT and GT even if the old
4254 comparison was against zero, so undo that canonicalization here since
4255 comparisons against zero are cheaper. */
4256 if (code
== LT
&& op1
== const1_rtx
)
4257 code
= LE
, op1
= const0_rtx
;
4258 else if (code
== GT
&& op1
== constm1_rtx
)
4259 code
= GE
, op1
= const0_rtx
;
4261 if (cmode
== VOIDmode
)
4262 cmode
= GET_MODE (op0
);
4264 if (mode
== VOIDmode
)
4265 mode
= GET_MODE (op2
);
4267 icode
= optab_handler (addcc_optab
, mode
);
4269 if (icode
== CODE_FOR_nothing
)
4273 target
= gen_reg_rtx (mode
);
4275 code
= unsignedp
? unsigned_condition (code
) : code
;
4276 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4278 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4279 return NULL and let the caller figure out how best to deal with this
4281 if (!COMPARISON_P (comparison
))
4284 do_pending_stack_adjust ();
4285 last
= get_last_insn ();
4286 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4287 GET_CODE (comparison
), NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4288 &comparison
, &cmode
);
4291 struct expand_operand ops
[4];
4293 create_output_operand (&ops
[0], target
, mode
);
4294 create_fixed_operand (&ops
[1], comparison
);
4295 create_input_operand (&ops
[2], op2
, mode
);
4296 create_input_operand (&ops
[3], op3
, mode
);
4297 if (maybe_expand_insn (icode
, 4, ops
))
4299 if (ops
[0].value
!= target
)
4300 convert_move (target
, ops
[0].value
, false);
4304 delete_insns_since (last
);
4308 /* These functions attempt to generate an insn body, rather than
4309 emitting the insn, but if the gen function already emits them, we
4310 make no attempt to turn them back into naked patterns. */
4312 /* Generate and return an insn body to add Y to X. */
4315 gen_add2_insn (rtx x
, rtx y
)
4317 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (x
));
4319 gcc_assert (insn_operand_matches (icode
, 0, x
));
4320 gcc_assert (insn_operand_matches (icode
, 1, x
));
4321 gcc_assert (insn_operand_matches (icode
, 2, y
));
4323 return GEN_FCN (icode
) (x
, x
, y
);
4326 /* Generate and return an insn body to add r1 and c,
4327 storing the result in r0. */
4330 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4332 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (r0
));
4334 if (icode
== CODE_FOR_nothing
4335 || !insn_operand_matches (icode
, 0, r0
)
4336 || !insn_operand_matches (icode
, 1, r1
)
4337 || !insn_operand_matches (icode
, 2, c
))
4340 return GEN_FCN (icode
) (r0
, r1
, c
);
4344 have_add2_insn (rtx x
, rtx y
)
4346 enum insn_code icode
;
4348 gcc_assert (GET_MODE (x
) != VOIDmode
);
4350 icode
= optab_handler (add_optab
, GET_MODE (x
));
4352 if (icode
== CODE_FOR_nothing
)
4355 if (!insn_operand_matches (icode
, 0, x
)
4356 || !insn_operand_matches (icode
, 1, x
)
4357 || !insn_operand_matches (icode
, 2, y
))
4363 /* Generate and return an insn body to add Y to X. */
4366 gen_addptr3_insn (rtx x
, rtx y
, rtx z
)
4368 enum insn_code icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
4370 gcc_assert (insn_operand_matches (icode
, 0, x
));
4371 gcc_assert (insn_operand_matches (icode
, 1, y
));
4372 gcc_assert (insn_operand_matches (icode
, 2, z
));
4374 return GEN_FCN (icode
) (x
, y
, z
);
4377 /* Return true if the target implements an addptr pattern and X, Y,
4378 and Z are valid for the pattern predicates. */
4381 have_addptr3_insn (rtx x
, rtx y
, rtx z
)
4383 enum insn_code icode
;
4385 gcc_assert (GET_MODE (x
) != VOIDmode
);
4387 icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
4389 if (icode
== CODE_FOR_nothing
)
4392 if (!insn_operand_matches (icode
, 0, x
)
4393 || !insn_operand_matches (icode
, 1, y
)
4394 || !insn_operand_matches (icode
, 2, z
))
4400 /* Generate and return an insn body to subtract Y from X. */
4403 gen_sub2_insn (rtx x
, rtx y
)
4405 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (x
));
4407 gcc_assert (insn_operand_matches (icode
, 0, x
));
4408 gcc_assert (insn_operand_matches (icode
, 1, x
));
4409 gcc_assert (insn_operand_matches (icode
, 2, y
));
4411 return GEN_FCN (icode
) (x
, x
, y
);
4414 /* Generate and return an insn body to subtract r1 and c,
4415 storing the result in r0. */
4418 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4420 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (r0
));
4422 if (icode
== CODE_FOR_nothing
4423 || !insn_operand_matches (icode
, 0, r0
)
4424 || !insn_operand_matches (icode
, 1, r1
)
4425 || !insn_operand_matches (icode
, 2, c
))
4428 return GEN_FCN (icode
) (r0
, r1
, c
);
4432 have_sub2_insn (rtx x
, rtx y
)
4434 enum insn_code icode
;
4436 gcc_assert (GET_MODE (x
) != VOIDmode
);
4438 icode
= optab_handler (sub_optab
, GET_MODE (x
));
4440 if (icode
== CODE_FOR_nothing
)
4443 if (!insn_operand_matches (icode
, 0, x
)
4444 || !insn_operand_matches (icode
, 1, x
)
4445 || !insn_operand_matches (icode
, 2, y
))
4451 /* Generate the body of an insn to extend Y (with mode MFROM)
4452 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4455 gen_extend_insn (rtx x
, rtx y
, machine_mode mto
,
4456 machine_mode mfrom
, int unsignedp
)
4458 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4459 return GEN_FCN (icode
) (x
, y
);
4462 /* Generate code to convert FROM to floating point
4463 and store in TO. FROM must be fixed point and not VOIDmode.
4464 UNSIGNEDP nonzero means regard FROM as unsigned.
4465 Normally this is done by correcting the final value
4466 if it is negative. */
4469 expand_float (rtx to
, rtx from
, int unsignedp
)
4471 enum insn_code icode
;
4473 machine_mode fmode
, imode
;
4474 bool can_do_signed
= false;
4476 /* Crash now, because we won't be able to decide which mode to use. */
4477 gcc_assert (GET_MODE (from
) != VOIDmode
);
4479 /* Look for an insn to do the conversion. Do it in the specified
4480 modes if possible; otherwise convert either input, output or both to
4481 wider mode. If the integer mode is wider than the mode of FROM,
4482 we can do the conversion signed even if the input is unsigned. */
4484 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4485 fmode
= GET_MODE_WIDER_MODE (fmode
))
4486 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4487 imode
= GET_MODE_WIDER_MODE (imode
))
4489 int doing_unsigned
= unsignedp
;
4491 if (fmode
!= GET_MODE (to
)
4492 && significand_size (fmode
) < GET_MODE_PRECISION (GET_MODE (from
)))
4495 icode
= can_float_p (fmode
, imode
, unsignedp
);
4496 if (icode
== CODE_FOR_nothing
&& unsignedp
)
4498 enum insn_code scode
= can_float_p (fmode
, imode
, 0);
4499 if (scode
!= CODE_FOR_nothing
)
4500 can_do_signed
= true;
4501 if (imode
!= GET_MODE (from
))
4502 icode
= scode
, doing_unsigned
= 0;
4505 if (icode
!= CODE_FOR_nothing
)
4507 if (imode
!= GET_MODE (from
))
4508 from
= convert_to_mode (imode
, from
, unsignedp
);
4510 if (fmode
!= GET_MODE (to
))
4511 target
= gen_reg_rtx (fmode
);
4513 emit_unop_insn (icode
, target
, from
,
4514 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4517 convert_move (to
, target
, 0);
4522 /* Unsigned integer, and no way to convert directly. Convert as signed,
4523 then unconditionally adjust the result. */
4524 if (unsignedp
&& can_do_signed
)
4526 rtx_code_label
*label
= gen_label_rtx ();
4528 REAL_VALUE_TYPE offset
;
4530 /* Look for a usable floating mode FMODE wider than the source and at
4531 least as wide as the target. Using FMODE will avoid rounding woes
4532 with unsigned values greater than the signed maximum value. */
4534 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4535 fmode
= GET_MODE_WIDER_MODE (fmode
))
4536 if (GET_MODE_PRECISION (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4537 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4540 if (fmode
== VOIDmode
)
4542 /* There is no such mode. Pretend the target is wide enough. */
4543 fmode
= GET_MODE (to
);
4545 /* Avoid double-rounding when TO is narrower than FROM. */
4546 if ((significand_size (fmode
) + 1)
4547 < GET_MODE_PRECISION (GET_MODE (from
)))
4550 rtx_code_label
*neglabel
= gen_label_rtx ();
4552 /* Don't use TARGET if it isn't a register, is a hard register,
4553 or is the wrong mode. */
4555 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4556 || GET_MODE (target
) != fmode
)
4557 target
= gen_reg_rtx (fmode
);
4559 imode
= GET_MODE (from
);
4560 do_pending_stack_adjust ();
4562 /* Test whether the sign bit is set. */
4563 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4566 /* The sign bit is not set. Convert as signed. */
4567 expand_float (target
, from
, 0);
4568 emit_jump_insn (targetm
.gen_jump (label
));
4571 /* The sign bit is set.
4572 Convert to a usable (positive signed) value by shifting right
4573 one bit, while remembering if a nonzero bit was shifted
4574 out; i.e., compute (from & 1) | (from >> 1). */
4576 emit_label (neglabel
);
4577 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4578 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4579 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, 1, NULL_RTX
, 1);
4580 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4582 expand_float (target
, temp
, 0);
4584 /* Multiply by 2 to undo the shift above. */
4585 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4586 target
, 0, OPTAB_LIB_WIDEN
);
4588 emit_move_insn (target
, temp
);
4590 do_pending_stack_adjust ();
4596 /* If we are about to do some arithmetic to correct for an
4597 unsigned operand, do it in a pseudo-register. */
4599 if (GET_MODE (to
) != fmode
4600 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4601 target
= gen_reg_rtx (fmode
);
4603 /* Convert as signed integer to floating. */
4604 expand_float (target
, from
, 0);
4606 /* If FROM is negative (and therefore TO is negative),
4607 correct its value by 2**bitwidth. */
4609 do_pending_stack_adjust ();
4610 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4614 real_2expN (&offset
, GET_MODE_PRECISION (GET_MODE (from
)), fmode
);
4615 temp
= expand_binop (fmode
, add_optab
, target
,
4616 const_double_from_real_value (offset
, fmode
),
4617 target
, 0, OPTAB_LIB_WIDEN
);
4619 emit_move_insn (target
, temp
);
4621 do_pending_stack_adjust ();
4626 /* No hardware instruction available; call a library routine. */
4631 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4633 if (GET_MODE_PRECISION (GET_MODE (from
)) < GET_MODE_PRECISION (SImode
))
4634 from
= convert_to_mode (SImode
, from
, unsignedp
);
4636 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
4637 gcc_assert (libfunc
);
4641 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4642 GET_MODE (to
), 1, from
,
4644 insns
= get_insns ();
4647 emit_libcall_block (insns
, target
, value
,
4648 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FLOAT
: FLOAT
,
4649 GET_MODE (to
), from
));
4654 /* Copy result to requested destination
4655 if we have been computing in a temp location. */
4659 if (GET_MODE (target
) == GET_MODE (to
))
4660 emit_move_insn (to
, target
);
4662 convert_move (to
, target
, 0);
4666 /* Generate code to convert FROM to fixed point and store in TO. FROM
4667 must be floating point. */
4670 expand_fix (rtx to
, rtx from
, int unsignedp
)
4672 enum insn_code icode
;
4674 machine_mode fmode
, imode
;
4675 bool must_trunc
= false;
4677 /* We first try to find a pair of modes, one real and one integer, at
4678 least as wide as FROM and TO, respectively, in which we can open-code
4679 this conversion. If the integer mode is wider than the mode of TO,
4680 we can do the conversion either signed or unsigned. */
4682 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4683 fmode
= GET_MODE_WIDER_MODE (fmode
))
4684 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4685 imode
= GET_MODE_WIDER_MODE (imode
))
4687 int doing_unsigned
= unsignedp
;
4689 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4690 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4691 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4693 if (icode
!= CODE_FOR_nothing
)
4695 rtx_insn
*last
= get_last_insn ();
4696 if (fmode
!= GET_MODE (from
))
4697 from
= convert_to_mode (fmode
, from
, 0);
4701 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4702 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
4706 if (imode
!= GET_MODE (to
))
4707 target
= gen_reg_rtx (imode
);
4709 if (maybe_emit_unop_insn (icode
, target
, from
,
4710 doing_unsigned
? UNSIGNED_FIX
: FIX
))
4713 convert_move (to
, target
, unsignedp
);
4716 delete_insns_since (last
);
4720 /* For an unsigned conversion, there is one more way to do it.
4721 If we have a signed conversion, we generate code that compares
4722 the real value to the largest representable positive number. If if
4723 is smaller, the conversion is done normally. Otherwise, subtract
4724 one plus the highest signed number, convert, and add it back.
4726 We only need to check all real modes, since we know we didn't find
4727 anything with a wider integer mode.
4729 This code used to extend FP value into mode wider than the destination.
4730 This is needed for decimal float modes which cannot accurately
4731 represent one plus the highest signed number of the same size, but
4732 not for binary modes. Consider, for instance conversion from SFmode
4735 The hot path through the code is dealing with inputs smaller than 2^63
4736 and doing just the conversion, so there is no bits to lose.
4738 In the other path we know the value is positive in the range 2^63..2^64-1
4739 inclusive. (as for other input overflow happens and result is undefined)
4740 So we know that the most important bit set in mantissa corresponds to
4741 2^63. The subtraction of 2^63 should not generate any rounding as it
4742 simply clears out that bit. The rest is trivial. */
4744 if (unsignedp
&& GET_MODE_PRECISION (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4745 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4746 fmode
= GET_MODE_WIDER_MODE (fmode
))
4747 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0, &must_trunc
)
4748 && (!DECIMAL_FLOAT_MODE_P (fmode
)
4749 || GET_MODE_BITSIZE (fmode
) > GET_MODE_PRECISION (GET_MODE (to
))))
4752 REAL_VALUE_TYPE offset
;
4754 rtx_code_label
*lab1
, *lab2
;
4757 bitsize
= GET_MODE_PRECISION (GET_MODE (to
));
4758 real_2expN (&offset
, bitsize
- 1, fmode
);
4759 limit
= const_double_from_real_value (offset
, fmode
);
4760 lab1
= gen_label_rtx ();
4761 lab2
= gen_label_rtx ();
4763 if (fmode
!= GET_MODE (from
))
4764 from
= convert_to_mode (fmode
, from
, 0);
4766 /* See if we need to do the subtraction. */
4767 do_pending_stack_adjust ();
4768 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4771 /* If not, do the signed "fix" and branch around fixup code. */
4772 expand_fix (to
, from
, 0);
4773 emit_jump_insn (targetm
.gen_jump (lab2
));
4776 /* Otherwise, subtract 2**(N-1), convert to signed number,
4777 then add 2**(N-1). Do the addition using XOR since this
4778 will often generate better code. */
4780 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4781 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4782 expand_fix (to
, target
, 0);
4783 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4785 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
4787 to
, 1, OPTAB_LIB_WIDEN
);
4790 emit_move_insn (to
, target
);
4794 if (optab_handler (mov_optab
, GET_MODE (to
)) != CODE_FOR_nothing
)
4796 /* Make a place for a REG_NOTE and add it. */
4797 insn
= emit_move_insn (to
, to
);
4798 set_dst_reg_note (insn
, REG_EQUAL
,
4799 gen_rtx_fmt_e (UNSIGNED_FIX
, GET_MODE (to
),
4807 /* We can't do it with an insn, so use a library call. But first ensure
4808 that the mode of TO is at least as wide as SImode, since those are the
4809 only library calls we know about. */
4811 if (GET_MODE_PRECISION (GET_MODE (to
)) < GET_MODE_PRECISION (SImode
))
4813 target
= gen_reg_rtx (SImode
);
4815 expand_fix (target
, from
, unsignedp
);
4823 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
4824 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
4825 gcc_assert (libfunc
);
4829 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4830 GET_MODE (to
), 1, from
,
4832 insns
= get_insns ();
4835 emit_libcall_block (insns
, target
, value
,
4836 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4837 GET_MODE (to
), from
));
4842 if (GET_MODE (to
) == GET_MODE (target
))
4843 emit_move_insn (to
, target
);
4845 convert_move (to
, target
, 0);
4849 /* Generate code to convert FROM or TO a fixed-point.
4850 If UINTP is true, either TO or FROM is an unsigned integer.
4851 If SATP is true, we need to saturate the result. */
4854 expand_fixed_convert (rtx to
, rtx from
, int uintp
, int satp
)
4856 machine_mode to_mode
= GET_MODE (to
);
4857 machine_mode from_mode
= GET_MODE (from
);
4859 enum rtx_code this_code
;
4860 enum insn_code code
;
4865 if (to_mode
== from_mode
)
4867 emit_move_insn (to
, from
);
4873 tab
= satp
? satfractuns_optab
: fractuns_optab
;
4874 this_code
= satp
? UNSIGNED_SAT_FRACT
: UNSIGNED_FRACT_CONVERT
;
4878 tab
= satp
? satfract_optab
: fract_optab
;
4879 this_code
= satp
? SAT_FRACT
: FRACT_CONVERT
;
4881 code
= convert_optab_handler (tab
, to_mode
, from_mode
);
4882 if (code
!= CODE_FOR_nothing
)
4884 emit_unop_insn (code
, to
, from
, this_code
);
4888 libfunc
= convert_optab_libfunc (tab
, to_mode
, from_mode
);
4889 gcc_assert (libfunc
);
4892 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, to_mode
,
4893 1, from
, from_mode
);
4894 insns
= get_insns ();
4897 emit_libcall_block (insns
, to
, value
,
4898 gen_rtx_fmt_e (optab_to_code (tab
), to_mode
, from
));
4901 /* Generate code to convert FROM to fixed point and store in TO. FROM
4902 must be floating point, TO must be signed. Use the conversion optab
4903 TAB to do the conversion. */
4906 expand_sfix_optab (rtx to
, rtx from
, convert_optab tab
)
4908 enum insn_code icode
;
4910 machine_mode fmode
, imode
;
4912 /* We first try to find a pair of modes, one real and one integer, at
4913 least as wide as FROM and TO, respectively, in which we can open-code
4914 this conversion. If the integer mode is wider than the mode of TO,
4915 we can do the conversion either signed or unsigned. */
4917 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4918 fmode
= GET_MODE_WIDER_MODE (fmode
))
4919 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4920 imode
= GET_MODE_WIDER_MODE (imode
))
4922 icode
= convert_optab_handler (tab
, imode
, fmode
);
4923 if (icode
!= CODE_FOR_nothing
)
4925 rtx_insn
*last
= get_last_insn ();
4926 if (fmode
!= GET_MODE (from
))
4927 from
= convert_to_mode (fmode
, from
, 0);
4929 if (imode
!= GET_MODE (to
))
4930 target
= gen_reg_rtx (imode
);
4932 if (!maybe_emit_unop_insn (icode
, target
, from
, UNKNOWN
))
4934 delete_insns_since (last
);
4938 convert_move (to
, target
, 0);
4946 /* Report whether we have an instruction to perform the operation
4947 specified by CODE on operands of mode MODE. */
4949 have_insn_for (enum rtx_code code
, machine_mode mode
)
4951 return (code_to_optab (code
)
4952 && (optab_handler (code_to_optab (code
), mode
)
4953 != CODE_FOR_nothing
));
4956 /* Print information about the current contents of the optabs on
4960 debug_optab_libfuncs (void)
4964 /* Dump the arithmetic optabs. */
4965 for (i
= FIRST_NORM_OPTAB
; i
<= LAST_NORMLIB_OPTAB
; ++i
)
4966 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
4968 rtx l
= optab_libfunc ((optab
) i
, (machine_mode
) j
);
4971 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
4972 fprintf (stderr
, "%s\t%s:\t%s\n",
4973 GET_RTX_NAME (optab_to_code ((optab
) i
)),
4979 /* Dump the conversion optabs. */
4980 for (i
= FIRST_CONV_OPTAB
; i
<= LAST_CONVLIB_OPTAB
; ++i
)
4981 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
4982 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
4984 rtx l
= convert_optab_libfunc ((optab
) i
, (machine_mode
) j
,
4988 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
4989 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
4990 GET_RTX_NAME (optab_to_code ((optab
) i
)),
4998 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
4999 CODE. Return 0 on failure. */
5002 gen_cond_trap (enum rtx_code code
, rtx op1
, rtx op2
, rtx tcode
)
5004 machine_mode mode
= GET_MODE (op1
);
5005 enum insn_code icode
;
5009 if (mode
== VOIDmode
)
5012 icode
= optab_handler (ctrap_optab
, mode
);
5013 if (icode
== CODE_FOR_nothing
)
5016 /* Some targets only accept a zero trap code. */
5017 if (!insn_operand_matches (icode
, 3, tcode
))
5020 do_pending_stack_adjust ();
5022 prepare_cmp_insn (op1
, op2
, code
, NULL_RTX
, false, OPTAB_DIRECT
,
5027 insn
= GEN_FCN (icode
) (trap_rtx
, XEXP (trap_rtx
, 0), XEXP (trap_rtx
, 1),
5030 /* If that failed, then give up. */
5038 insn
= get_insns ();
5043 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5044 or unsigned operation code. */
5047 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5059 code
= unsignedp
? LTU
: LT
;
5062 code
= unsignedp
? LEU
: LE
;
5065 code
= unsignedp
? GTU
: GT
;
5068 code
= unsignedp
? GEU
: GE
;
5071 case UNORDERED_EXPR
:
5110 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5111 unsigned operators. Do not generate compare instruction. */
5114 vector_compare_rtx (enum tree_code tcode
, tree t_op0
, tree t_op1
,
5115 bool unsignedp
, enum insn_code icode
)
5117 struct expand_operand ops
[2];
5118 rtx rtx_op0
, rtx_op1
;
5119 machine_mode m0
, m1
;
5120 enum rtx_code rcode
= get_rtx_code (tcode
, unsignedp
);
5122 gcc_assert (TREE_CODE_CLASS (tcode
) == tcc_comparison
);
5124 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
5125 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
5126 cases, use the original mode. */
5127 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)),
5129 m0
= GET_MODE (rtx_op0
);
5131 m0
= TYPE_MODE (TREE_TYPE (t_op0
));
5133 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)),
5135 m1
= GET_MODE (rtx_op1
);
5137 m1
= TYPE_MODE (TREE_TYPE (t_op1
));
5139 create_input_operand (&ops
[0], rtx_op0
, m0
);
5140 create_input_operand (&ops
[1], rtx_op1
, m1
);
5141 if (!maybe_legitimize_operands (icode
, 4, 2, ops
))
5143 return gen_rtx_fmt_ee (rcode
, VOIDmode
, ops
[0].value
, ops
[1].value
);
5146 /* Checks if vec_perm mask SEL is a constant equivalent to a shift of the first
5147 vec_perm operand, assuming the second operand is a constant vector of zeroes.
5148 Return the shift distance in bits if so, or NULL_RTX if the vec_perm is not a
5151 shift_amt_for_vec_perm_mask (rtx sel
)
5153 unsigned int i
, first
, nelt
= GET_MODE_NUNITS (GET_MODE (sel
));
5154 unsigned int bitsize
= GET_MODE_UNIT_BITSIZE (GET_MODE (sel
));
5156 if (GET_CODE (sel
) != CONST_VECTOR
)
5159 first
= INTVAL (CONST_VECTOR_ELT (sel
, 0));
5160 if (first
>= 2*nelt
)
5162 for (i
= 1; i
< nelt
; i
++)
5164 int idx
= INTVAL (CONST_VECTOR_ELT (sel
, i
));
5165 unsigned int expected
= (i
+ first
) & (2 * nelt
- 1);
5166 /* Indices into the second vector are all equivalent. */
5167 if (idx
< 0 || (MIN (nelt
, (unsigned) idx
) != MIN (nelt
, expected
)))
5171 return GEN_INT (first
* bitsize
);
5174 /* A subroutine of expand_vec_perm for expanding one vec_perm insn. */
5177 expand_vec_perm_1 (enum insn_code icode
, rtx target
,
5178 rtx v0
, rtx v1
, rtx sel
)
5180 machine_mode tmode
= GET_MODE (target
);
5181 machine_mode smode
= GET_MODE (sel
);
5182 struct expand_operand ops
[4];
5184 create_output_operand (&ops
[0], target
, tmode
);
5185 create_input_operand (&ops
[3], sel
, smode
);
5187 /* Make an effort to preserve v0 == v1. The target expander is able to
5188 rely on this to determine if we're permuting a single input operand. */
5189 if (rtx_equal_p (v0
, v1
))
5191 if (!insn_operand_matches (icode
, 1, v0
))
5192 v0
= force_reg (tmode
, v0
);
5193 gcc_checking_assert (insn_operand_matches (icode
, 1, v0
));
5194 gcc_checking_assert (insn_operand_matches (icode
, 2, v0
));
5196 create_fixed_operand (&ops
[1], v0
);
5197 create_fixed_operand (&ops
[2], v0
);
5201 create_input_operand (&ops
[1], v0
, tmode
);
5202 /* See if this can be handled with a vec_shr. We only do this if the
5203 second vector is all zeroes. */
5204 enum insn_code shift_code
= optab_handler (vec_shr_optab
, GET_MODE (v0
));
5205 if (v1
== CONST0_RTX (GET_MODE (v1
)) && shift_code
)
5206 if (rtx shift_amt
= shift_amt_for_vec_perm_mask (sel
))
5208 create_convert_operand_from_type (&ops
[2], shift_amt
,
5209 sizetype_tab
[(int) stk_sizetype
]);
5210 if (maybe_expand_insn (shift_code
, 3, ops
))
5211 return ops
[0].value
;
5213 create_input_operand (&ops
[2], v1
, tmode
);
5216 if (maybe_expand_insn (icode
, 4, ops
))
5217 return ops
[0].value
;
5221 /* Generate instructions for vec_perm optab given its mode
5222 and three operands. */
5225 expand_vec_perm (machine_mode mode
, rtx v0
, rtx v1
, rtx sel
, rtx target
)
5227 enum insn_code icode
;
5228 machine_mode qimode
;
5229 unsigned int i
, w
, e
, u
;
5230 rtx tmp
, sel_qi
= NULL
;
5233 if (!target
|| GET_MODE (target
) != mode
)
5234 target
= gen_reg_rtx (mode
);
5236 w
= GET_MODE_SIZE (mode
);
5237 e
= GET_MODE_NUNITS (mode
);
5238 u
= GET_MODE_UNIT_SIZE (mode
);
5240 /* Set QIMODE to a different vector mode with byte elements.
5241 If no such mode, or if MODE already has byte elements, use VOIDmode. */
5243 if (GET_MODE_INNER (mode
) != QImode
)
5245 qimode
= mode_for_vector (QImode
, w
);
5246 if (!VECTOR_MODE_P (qimode
))
5250 /* If the input is a constant, expand it specially. */
5251 gcc_assert (GET_MODE_CLASS (GET_MODE (sel
)) == MODE_VECTOR_INT
);
5252 if (GET_CODE (sel
) == CONST_VECTOR
)
5254 icode
= direct_optab_handler (vec_perm_const_optab
, mode
);
5255 if (icode
!= CODE_FOR_nothing
)
5257 tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel
);
5262 /* Fall back to a constant byte-based permutation. */
5263 if (qimode
!= VOIDmode
)
5265 vec
= rtvec_alloc (w
);
5266 for (i
= 0; i
< e
; ++i
)
5268 unsigned int j
, this_e
;
5270 this_e
= INTVAL (CONST_VECTOR_ELT (sel
, i
));
5271 this_e
&= 2 * e
- 1;
5274 for (j
= 0; j
< u
; ++j
)
5275 RTVEC_ELT (vec
, i
* u
+ j
) = GEN_INT (this_e
+ j
);
5277 sel_qi
= gen_rtx_CONST_VECTOR (qimode
, vec
);
5279 icode
= direct_optab_handler (vec_perm_const_optab
, qimode
);
5280 if (icode
!= CODE_FOR_nothing
)
5282 tmp
= mode
!= qimode
? gen_reg_rtx (qimode
) : target
;
5283 tmp
= expand_vec_perm_1 (icode
, tmp
, gen_lowpart (qimode
, v0
),
5284 gen_lowpart (qimode
, v1
), sel_qi
);
5286 return gen_lowpart (mode
, tmp
);
5291 /* Otherwise expand as a fully variable permuation. */
5292 icode
= direct_optab_handler (vec_perm_optab
, mode
);
5293 if (icode
!= CODE_FOR_nothing
)
5295 tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel
);
5300 /* As a special case to aid several targets, lower the element-based
5301 permutation to a byte-based permutation and try again. */
5302 if (qimode
== VOIDmode
)
5304 icode
= direct_optab_handler (vec_perm_optab
, qimode
);
5305 if (icode
== CODE_FOR_nothing
)
5310 /* Multiply each element by its byte size. */
5311 machine_mode selmode
= GET_MODE (sel
);
5313 sel
= expand_simple_binop (selmode
, PLUS
, sel
, sel
,
5314 NULL
, 0, OPTAB_DIRECT
);
5316 sel
= expand_simple_binop (selmode
, ASHIFT
, sel
,
5317 GEN_INT (exact_log2 (u
)),
5318 NULL
, 0, OPTAB_DIRECT
);
5319 gcc_assert (sel
!= NULL
);
5321 /* Broadcast the low byte each element into each of its bytes. */
5322 vec
= rtvec_alloc (w
);
5323 for (i
= 0; i
< w
; ++i
)
5325 int this_e
= i
/ u
* u
;
5326 if (BYTES_BIG_ENDIAN
)
5328 RTVEC_ELT (vec
, i
) = GEN_INT (this_e
);
5330 tmp
= gen_rtx_CONST_VECTOR (qimode
, vec
);
5331 sel
= gen_lowpart (qimode
, sel
);
5332 sel
= expand_vec_perm (qimode
, sel
, sel
, tmp
, NULL
);
5333 gcc_assert (sel
!= NULL
);
5335 /* Add the byte offset to each byte element. */
5336 /* Note that the definition of the indicies here is memory ordering,
5337 so there should be no difference between big and little endian. */
5338 vec
= rtvec_alloc (w
);
5339 for (i
= 0; i
< w
; ++i
)
5340 RTVEC_ELT (vec
, i
) = GEN_INT (i
% u
);
5341 tmp
= gen_rtx_CONST_VECTOR (qimode
, vec
);
5342 sel_qi
= expand_simple_binop (qimode
, PLUS
, sel
, tmp
,
5343 sel
, 0, OPTAB_DIRECT
);
5344 gcc_assert (sel_qi
!= NULL
);
5347 tmp
= mode
!= qimode
? gen_reg_rtx (qimode
) : target
;
5348 tmp
= expand_vec_perm_1 (icode
, tmp
, gen_lowpart (qimode
, v0
),
5349 gen_lowpart (qimode
, v1
), sel_qi
);
5351 tmp
= gen_lowpart (mode
, tmp
);
5355 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
5359 expand_vec_cond_expr (tree vec_cond_type
, tree op0
, tree op1
, tree op2
,
5362 struct expand_operand ops
[6];
5363 enum insn_code icode
;
5364 rtx comparison
, rtx_op1
, rtx_op2
;
5365 machine_mode mode
= TYPE_MODE (vec_cond_type
);
5366 machine_mode cmp_op_mode
;
5369 enum tree_code tcode
;
5371 if (COMPARISON_CLASS_P (op0
))
5373 op0a
= TREE_OPERAND (op0
, 0);
5374 op0b
= TREE_OPERAND (op0
, 1);
5375 tcode
= TREE_CODE (op0
);
5376 unsignedp
= TYPE_UNSIGNED (TREE_TYPE (op0a
));
5381 gcc_assert (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (op0
)));
5383 op0b
= build_zero_cst (TREE_TYPE (op0
));
5387 cmp_op_mode
= TYPE_MODE (TREE_TYPE (op0a
));
5390 gcc_assert (GET_MODE_SIZE (mode
) == GET_MODE_SIZE (cmp_op_mode
)
5391 && GET_MODE_NUNITS (mode
) == GET_MODE_NUNITS (cmp_op_mode
));
5393 icode
= get_vcond_icode (mode
, cmp_op_mode
, unsignedp
);
5394 if (icode
== CODE_FOR_nothing
)
5397 comparison
= vector_compare_rtx (tcode
, op0a
, op0b
, unsignedp
, icode
);
5398 rtx_op1
= expand_normal (op1
);
5399 rtx_op2
= expand_normal (op2
);
5401 create_output_operand (&ops
[0], target
, mode
);
5402 create_input_operand (&ops
[1], rtx_op1
, mode
);
5403 create_input_operand (&ops
[2], rtx_op2
, mode
);
5404 create_fixed_operand (&ops
[3], comparison
);
5405 create_fixed_operand (&ops
[4], XEXP (comparison
, 0));
5406 create_fixed_operand (&ops
[5], XEXP (comparison
, 1));
5407 expand_insn (icode
, 6, ops
);
5408 return ops
[0].value
;
5411 /* Expand a highpart multiply. */
5414 expand_mult_highpart (machine_mode mode
, rtx op0
, rtx op1
,
5415 rtx target
, bool uns_p
)
5417 struct expand_operand eops
[3];
5418 enum insn_code icode
;
5419 int method
, i
, nunits
;
5425 method
= can_mult_highpart_p (mode
, uns_p
);
5431 tab1
= uns_p
? umul_highpart_optab
: smul_highpart_optab
;
5432 return expand_binop (mode
, tab1
, op0
, op1
, target
, uns_p
,
5435 tab1
= uns_p
? vec_widen_umult_even_optab
: vec_widen_smult_even_optab
;
5436 tab2
= uns_p
? vec_widen_umult_odd_optab
: vec_widen_smult_odd_optab
;
5439 tab1
= uns_p
? vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
5440 tab2
= uns_p
? vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
5441 if (BYTES_BIG_ENDIAN
)
5442 std::swap (tab1
, tab2
);
5448 icode
= optab_handler (tab1
, mode
);
5449 nunits
= GET_MODE_NUNITS (mode
);
5450 wmode
= insn_data
[icode
].operand
[0].mode
;
5451 gcc_checking_assert (2 * GET_MODE_NUNITS (wmode
) == nunits
);
5452 gcc_checking_assert (GET_MODE_SIZE (wmode
) == GET_MODE_SIZE (mode
));
5454 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
5455 create_input_operand (&eops
[1], op0
, mode
);
5456 create_input_operand (&eops
[2], op1
, mode
);
5457 expand_insn (icode
, 3, eops
);
5458 m1
= gen_lowpart (mode
, eops
[0].value
);
5460 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
5461 create_input_operand (&eops
[1], op0
, mode
);
5462 create_input_operand (&eops
[2], op1
, mode
);
5463 expand_insn (optab_handler (tab2
, mode
), 3, eops
);
5464 m2
= gen_lowpart (mode
, eops
[0].value
);
5466 v
= rtvec_alloc (nunits
);
5469 for (i
= 0; i
< nunits
; ++i
)
5470 RTVEC_ELT (v
, i
) = GEN_INT (!BYTES_BIG_ENDIAN
+ (i
& ~1)
5471 + ((i
& 1) ? nunits
: 0));
5475 for (i
= 0; i
< nunits
; ++i
)
5476 RTVEC_ELT (v
, i
) = GEN_INT (2 * i
+ (BYTES_BIG_ENDIAN
? 0 : 1));
5478 perm
= gen_rtx_CONST_VECTOR (mode
, v
);
5480 return expand_vec_perm (mode
, m1
, m2
, perm
, target
);
5483 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
5487 find_cc_set (rtx x
, const_rtx pat
, void *data
)
5489 if (REG_P (x
) && GET_MODE_CLASS (GET_MODE (x
)) == MODE_CC
5490 && GET_CODE (pat
) == SET
)
5492 rtx
*p_cc_reg
= (rtx
*) data
;
5493 gcc_assert (!*p_cc_reg
);
5498 /* This is a helper function for the other atomic operations. This function
5499 emits a loop that contains SEQ that iterates until a compare-and-swap
5500 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5501 a set of instructions that takes a value from OLD_REG as an input and
5502 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5503 set to the current contents of MEM. After SEQ, a compare-and-swap will
5504 attempt to update MEM with NEW_REG. The function returns true when the
5505 loop was generated successfully. */
5508 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
5510 machine_mode mode
= GET_MODE (mem
);
5511 rtx_code_label
*label
;
5512 rtx cmp_reg
, success
, oldval
;
5514 /* The loop we want to generate looks like
5520 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
5524 Note that we only do the plain load from memory once. Subsequent
5525 iterations use the value loaded by the compare-and-swap pattern. */
5527 label
= gen_label_rtx ();
5528 cmp_reg
= gen_reg_rtx (mode
);
5530 emit_move_insn (cmp_reg
, mem
);
5532 emit_move_insn (old_reg
, cmp_reg
);
5538 if (!expand_atomic_compare_and_swap (&success
, &oldval
, mem
, old_reg
,
5539 new_reg
, false, MEMMODEL_SYNC_SEQ_CST
,
5543 if (oldval
!= cmp_reg
)
5544 emit_move_insn (cmp_reg
, oldval
);
5546 /* Mark this jump predicted not taken. */
5547 emit_cmp_and_jump_insns (success
, const0_rtx
, EQ
, const0_rtx
,
5548 GET_MODE (success
), 1, label
, 0);
5553 /* This function tries to emit an atomic_exchange intruction. VAL is written
5554 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
5555 using TARGET if possible. */
5558 maybe_emit_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
5560 machine_mode mode
= GET_MODE (mem
);
5561 enum insn_code icode
;
5563 /* If the target supports the exchange directly, great. */
5564 icode
= direct_optab_handler (atomic_exchange_optab
, mode
);
5565 if (icode
!= CODE_FOR_nothing
)
5567 struct expand_operand ops
[4];
5569 create_output_operand (&ops
[0], target
, mode
);
5570 create_fixed_operand (&ops
[1], mem
);
5571 create_input_operand (&ops
[2], val
, mode
);
5572 create_integer_operand (&ops
[3], model
);
5573 if (maybe_expand_insn (icode
, 4, ops
))
5574 return ops
[0].value
;
5580 /* This function tries to implement an atomic exchange operation using
5581 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
5582 The previous contents of *MEM are returned, using TARGET if possible.
5583 Since this instructionn is an acquire barrier only, stronger memory
5584 models may require additional barriers to be emitted. */
5587 maybe_emit_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
,
5588 enum memmodel model
)
5590 machine_mode mode
= GET_MODE (mem
);
5591 enum insn_code icode
;
5592 rtx_insn
*last_insn
= get_last_insn ();
5594 icode
= optab_handler (sync_lock_test_and_set_optab
, mode
);
5596 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
5597 exists, and the memory model is stronger than acquire, add a release
5598 barrier before the instruction. */
5600 if (is_mm_seq_cst (model
) || is_mm_release (model
) || is_mm_acq_rel (model
))
5601 expand_mem_thread_fence (model
);
5603 if (icode
!= CODE_FOR_nothing
)
5605 struct expand_operand ops
[3];
5606 create_output_operand (&ops
[0], target
, mode
);
5607 create_fixed_operand (&ops
[1], mem
);
5608 create_input_operand (&ops
[2], val
, mode
);
5609 if (maybe_expand_insn (icode
, 3, ops
))
5610 return ops
[0].value
;
5613 /* If an external test-and-set libcall is provided, use that instead of
5614 any external compare-and-swap that we might get from the compare-and-
5615 swap-loop expansion later. */
5616 if (!can_compare_and_swap_p (mode
, false))
5618 rtx libfunc
= optab_libfunc (sync_lock_test_and_set_optab
, mode
);
5619 if (libfunc
!= NULL
)
5623 addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
5624 return emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
5625 mode
, 2, addr
, ptr_mode
,
5630 /* If the test_and_set can't be emitted, eliminate any barrier that might
5631 have been emitted. */
5632 delete_insns_since (last_insn
);
5636 /* This function tries to implement an atomic exchange operation using a
5637 compare_and_swap loop. VAL is written to *MEM. The previous contents of
5638 *MEM are returned, using TARGET if possible. No memory model is required
5639 since a compare_and_swap loop is seq-cst. */
5642 maybe_emit_compare_and_swap_exchange_loop (rtx target
, rtx mem
, rtx val
)
5644 machine_mode mode
= GET_MODE (mem
);
5646 if (can_compare_and_swap_p (mode
, true))
5648 if (!target
|| !register_operand (target
, mode
))
5649 target
= gen_reg_rtx (mode
);
5650 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
5657 /* This function tries to implement an atomic test-and-set operation
5658 using the atomic_test_and_set instruction pattern. A boolean value
5659 is returned from the operation, using TARGET if possible. */
5662 maybe_emit_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
5664 machine_mode pat_bool_mode
;
5665 struct expand_operand ops
[3];
5667 if (!targetm
.have_atomic_test_and_set ())
5670 /* While we always get QImode from __atomic_test_and_set, we get
5671 other memory modes from __sync_lock_test_and_set. Note that we
5672 use no endian adjustment here. This matches the 4.6 behavior
5673 in the Sparc backend. */
5674 enum insn_code icode
= targetm
.code_for_atomic_test_and_set
;
5675 gcc_checking_assert (insn_data
[icode
].operand
[1].mode
== QImode
);
5676 if (GET_MODE (mem
) != QImode
)
5677 mem
= adjust_address_nv (mem
, QImode
, 0);
5679 pat_bool_mode
= insn_data
[icode
].operand
[0].mode
;
5680 create_output_operand (&ops
[0], target
, pat_bool_mode
);
5681 create_fixed_operand (&ops
[1], mem
);
5682 create_integer_operand (&ops
[2], model
);
5684 if (maybe_expand_insn (icode
, 3, ops
))
5685 return ops
[0].value
;
5689 /* This function expands the legacy _sync_lock test_and_set operation which is
5690 generally an atomic exchange. Some limited targets only allow the
5691 constant 1 to be stored. This is an ACQUIRE operation.
5693 TARGET is an optional place to stick the return value.
5694 MEM is where VAL is stored. */
5697 expand_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
)
5701 /* Try an atomic_exchange first. */
5702 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, MEMMODEL_SYNC_ACQUIRE
);
5706 ret
= maybe_emit_sync_lock_test_and_set (target
, mem
, val
,
5707 MEMMODEL_SYNC_ACQUIRE
);
5711 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
5715 /* If there are no other options, try atomic_test_and_set if the value
5716 being stored is 1. */
5717 if (val
== const1_rtx
)
5718 ret
= maybe_emit_atomic_test_and_set (target
, mem
, MEMMODEL_SYNC_ACQUIRE
);
5723 /* This function expands the atomic test_and_set operation:
5724 atomically store a boolean TRUE into MEM and return the previous value.
5726 MEMMODEL is the memory model variant to use.
5727 TARGET is an optional place to stick the return value. */
5730 expand_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
5732 machine_mode mode
= GET_MODE (mem
);
5733 rtx ret
, trueval
, subtarget
;
5735 ret
= maybe_emit_atomic_test_and_set (target
, mem
, model
);
5739 /* Be binary compatible with non-default settings of trueval, and different
5740 cpu revisions. E.g. one revision may have atomic-test-and-set, but
5741 another only has atomic-exchange. */
5742 if (targetm
.atomic_test_and_set_trueval
== 1)
5744 trueval
= const1_rtx
;
5745 subtarget
= target
? target
: gen_reg_rtx (mode
);
5749 trueval
= gen_int_mode (targetm
.atomic_test_and_set_trueval
, mode
);
5750 subtarget
= gen_reg_rtx (mode
);
5753 /* Try the atomic-exchange optab... */
5754 ret
= maybe_emit_atomic_exchange (subtarget
, mem
, trueval
, model
);
5756 /* ... then an atomic-compare-and-swap loop ... */
5758 ret
= maybe_emit_compare_and_swap_exchange_loop (subtarget
, mem
, trueval
);
5760 /* ... before trying the vaguely defined legacy lock_test_and_set. */
5762 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, trueval
, model
);
5764 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
5765 things with the value 1. Thus we try again without trueval. */
5766 if (!ret
&& targetm
.atomic_test_and_set_trueval
!= 1)
5767 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, const1_rtx
, model
);
5769 /* Failing all else, assume a single threaded environment and simply
5770 perform the operation. */
5773 /* If the result is ignored skip the move to target. */
5774 if (subtarget
!= const0_rtx
)
5775 emit_move_insn (subtarget
, mem
);
5777 emit_move_insn (mem
, trueval
);
5781 /* Recall that have to return a boolean value; rectify if trueval
5782 is not exactly one. */
5783 if (targetm
.atomic_test_and_set_trueval
!= 1)
5784 ret
= emit_store_flag_force (target
, NE
, ret
, const0_rtx
, mode
, 0, 1);
5789 /* This function expands the atomic exchange operation:
5790 atomically store VAL in MEM and return the previous value in MEM.
5792 MEMMODEL is the memory model variant to use.
5793 TARGET is an optional place to stick the return value. */
5796 expand_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
5800 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, model
);
5802 /* Next try a compare-and-swap loop for the exchange. */
5804 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
5809 /* This function expands the atomic compare exchange operation:
5811 *PTARGET_BOOL is an optional place to store the boolean success/failure.
5812 *PTARGET_OVAL is an optional place to store the old value from memory.
5813 Both target parameters may be NULL or const0_rtx to indicate that we do
5814 not care about that return value. Both target parameters are updated on
5815 success to the actual location of the corresponding result.
5817 MEMMODEL is the memory model variant to use.
5819 The return value of the function is true for success. */
5822 expand_atomic_compare_and_swap (rtx
*ptarget_bool
, rtx
*ptarget_oval
,
5823 rtx mem
, rtx expected
, rtx desired
,
5824 bool is_weak
, enum memmodel succ_model
,
5825 enum memmodel fail_model
)
5827 machine_mode mode
= GET_MODE (mem
);
5828 struct expand_operand ops
[8];
5829 enum insn_code icode
;
5830 rtx target_oval
, target_bool
= NULL_RTX
;
5833 /* Load expected into a register for the compare and swap. */
5834 if (MEM_P (expected
))
5835 expected
= copy_to_reg (expected
);
5837 /* Make sure we always have some place to put the return oldval.
5838 Further, make sure that place is distinct from the input expected,
5839 just in case we need that path down below. */
5840 if (ptarget_oval
&& *ptarget_oval
== const0_rtx
)
5841 ptarget_oval
= NULL
;
5843 if (ptarget_oval
== NULL
5844 || (target_oval
= *ptarget_oval
) == NULL
5845 || reg_overlap_mentioned_p (expected
, target_oval
))
5846 target_oval
= gen_reg_rtx (mode
);
5848 icode
= direct_optab_handler (atomic_compare_and_swap_optab
, mode
);
5849 if (icode
!= CODE_FOR_nothing
)
5851 machine_mode bool_mode
= insn_data
[icode
].operand
[0].mode
;
5853 if (ptarget_bool
&& *ptarget_bool
== const0_rtx
)
5854 ptarget_bool
= NULL
;
5856 /* Make sure we always have a place for the bool operand. */
5857 if (ptarget_bool
== NULL
5858 || (target_bool
= *ptarget_bool
) == NULL
5859 || GET_MODE (target_bool
) != bool_mode
)
5860 target_bool
= gen_reg_rtx (bool_mode
);
5862 /* Emit the compare_and_swap. */
5863 create_output_operand (&ops
[0], target_bool
, bool_mode
);
5864 create_output_operand (&ops
[1], target_oval
, mode
);
5865 create_fixed_operand (&ops
[2], mem
);
5866 create_input_operand (&ops
[3], expected
, mode
);
5867 create_input_operand (&ops
[4], desired
, mode
);
5868 create_integer_operand (&ops
[5], is_weak
);
5869 create_integer_operand (&ops
[6], succ_model
);
5870 create_integer_operand (&ops
[7], fail_model
);
5871 if (maybe_expand_insn (icode
, 8, ops
))
5873 /* Return success/failure. */
5874 target_bool
= ops
[0].value
;
5875 target_oval
= ops
[1].value
;
5880 /* Otherwise fall back to the original __sync_val_compare_and_swap
5881 which is always seq-cst. */
5882 icode
= optab_handler (sync_compare_and_swap_optab
, mode
);
5883 if (icode
!= CODE_FOR_nothing
)
5887 create_output_operand (&ops
[0], target_oval
, mode
);
5888 create_fixed_operand (&ops
[1], mem
);
5889 create_input_operand (&ops
[2], expected
, mode
);
5890 create_input_operand (&ops
[3], desired
, mode
);
5891 if (!maybe_expand_insn (icode
, 4, ops
))
5894 target_oval
= ops
[0].value
;
5896 /* If the caller isn't interested in the boolean return value,
5897 skip the computation of it. */
5898 if (ptarget_bool
== NULL
)
5901 /* Otherwise, work out if the compare-and-swap succeeded. */
5903 if (have_insn_for (COMPARE
, CCmode
))
5904 note_stores (PATTERN (get_last_insn ()), find_cc_set
, &cc_reg
);
5907 target_bool
= emit_store_flag_force (target_bool
, EQ
, cc_reg
,
5908 const0_rtx
, VOIDmode
, 0, 1);
5911 goto success_bool_from_val
;
5914 /* Also check for library support for __sync_val_compare_and_swap. */
5915 libfunc
= optab_libfunc (sync_compare_and_swap_optab
, mode
);
5916 if (libfunc
!= NULL
)
5918 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
5919 rtx target
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
5920 mode
, 3, addr
, ptr_mode
,
5921 expected
, mode
, desired
, mode
);
5922 emit_move_insn (target_oval
, target
);
5924 /* Compute the boolean return value only if requested. */
5926 goto success_bool_from_val
;
5934 success_bool_from_val
:
5935 target_bool
= emit_store_flag_force (target_bool
, EQ
, target_oval
,
5936 expected
, VOIDmode
, 1, 1);
5938 /* Make sure that the oval output winds up where the caller asked. */
5940 *ptarget_oval
= target_oval
;
5942 *ptarget_bool
= target_bool
;
5946 /* Generate asm volatile("" : : : "memory") as the memory barrier. */
5949 expand_asm_memory_barrier (void)
5953 asm_op
= gen_rtx_ASM_OPERANDS (VOIDmode
, empty_string
, empty_string
, 0,
5954 rtvec_alloc (0), rtvec_alloc (0),
5955 rtvec_alloc (0), UNKNOWN_LOCATION
);
5956 MEM_VOLATILE_P (asm_op
) = 1;
5958 clob
= gen_rtx_SCRATCH (VOIDmode
);
5959 clob
= gen_rtx_MEM (BLKmode
, clob
);
5960 clob
= gen_rtx_CLOBBER (VOIDmode
, clob
);
5962 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, asm_op
, clob
)));
5965 /* This routine will either emit the mem_thread_fence pattern or issue a
5966 sync_synchronize to generate a fence for memory model MEMMODEL. */
5969 expand_mem_thread_fence (enum memmodel model
)
5971 if (targetm
.have_mem_thread_fence ())
5972 emit_insn (targetm
.gen_mem_thread_fence (GEN_INT (model
)));
5973 else if (!is_mm_relaxed (model
))
5975 if (targetm
.have_memory_barrier ())
5976 emit_insn (targetm
.gen_memory_barrier ());
5977 else if (synchronize_libfunc
!= NULL_RTX
)
5978 emit_library_call (synchronize_libfunc
, LCT_NORMAL
, VOIDmode
, 0);
5980 expand_asm_memory_barrier ();
5984 /* This routine will either emit the mem_signal_fence pattern or issue a
5985 sync_synchronize to generate a fence for memory model MEMMODEL. */
5988 expand_mem_signal_fence (enum memmodel model
)
5990 if (targetm
.have_mem_signal_fence ())
5991 emit_insn (targetm
.gen_mem_signal_fence (GEN_INT (model
)));
5992 else if (!is_mm_relaxed (model
))
5994 /* By default targets are coherent between a thread and the signal
5995 handler running on the same thread. Thus this really becomes a
5996 compiler barrier, in that stores must not be sunk past
5997 (or raised above) a given point. */
5998 expand_asm_memory_barrier ();
6002 /* This function expands the atomic load operation:
6003 return the atomically loaded value in MEM.
6005 MEMMODEL is the memory model variant to use.
6006 TARGET is an option place to stick the return value. */
6009 expand_atomic_load (rtx target
, rtx mem
, enum memmodel model
)
6011 machine_mode mode
= GET_MODE (mem
);
6012 enum insn_code icode
;
6014 /* If the target supports the load directly, great. */
6015 icode
= direct_optab_handler (atomic_load_optab
, mode
);
6016 if (icode
!= CODE_FOR_nothing
)
6018 struct expand_operand ops
[3];
6020 create_output_operand (&ops
[0], target
, mode
);
6021 create_fixed_operand (&ops
[1], mem
);
6022 create_integer_operand (&ops
[2], model
);
6023 if (maybe_expand_insn (icode
, 3, ops
))
6024 return ops
[0].value
;
6027 /* If the size of the object is greater than word size on this target,
6028 then we assume that a load will not be atomic. */
6029 if (GET_MODE_PRECISION (mode
) > BITS_PER_WORD
)
6031 /* Issue val = compare_and_swap (mem, 0, 0).
6032 This may cause the occasional harmless store of 0 when the value is
6033 already 0, but it seems to be OK according to the standards guys. */
6034 if (expand_atomic_compare_and_swap (NULL
, &target
, mem
, const0_rtx
,
6035 const0_rtx
, false, model
, model
))
6038 /* Otherwise there is no atomic load, leave the library call. */
6042 /* Otherwise assume loads are atomic, and emit the proper barriers. */
6043 if (!target
|| target
== const0_rtx
)
6044 target
= gen_reg_rtx (mode
);
6046 /* For SEQ_CST, emit a barrier before the load. */
6047 if (is_mm_seq_cst (model
))
6048 expand_mem_thread_fence (model
);
6050 emit_move_insn (target
, mem
);
6052 /* Emit the appropriate barrier after the load. */
6053 expand_mem_thread_fence (model
);
6058 /* This function expands the atomic store operation:
6059 Atomically store VAL in MEM.
6060 MEMMODEL is the memory model variant to use.
6061 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
6062 function returns const0_rtx if a pattern was emitted. */
6065 expand_atomic_store (rtx mem
, rtx val
, enum memmodel model
, bool use_release
)
6067 machine_mode mode
= GET_MODE (mem
);
6068 enum insn_code icode
;
6069 struct expand_operand ops
[3];
6071 /* If the target supports the store directly, great. */
6072 icode
= direct_optab_handler (atomic_store_optab
, mode
);
6073 if (icode
!= CODE_FOR_nothing
)
6075 create_fixed_operand (&ops
[0], mem
);
6076 create_input_operand (&ops
[1], val
, mode
);
6077 create_integer_operand (&ops
[2], model
);
6078 if (maybe_expand_insn (icode
, 3, ops
))
6082 /* If using __sync_lock_release is a viable alternative, try it. */
6085 icode
= direct_optab_handler (sync_lock_release_optab
, mode
);
6086 if (icode
!= CODE_FOR_nothing
)
6088 create_fixed_operand (&ops
[0], mem
);
6089 create_input_operand (&ops
[1], const0_rtx
, mode
);
6090 if (maybe_expand_insn (icode
, 2, ops
))
6092 /* lock_release is only a release barrier. */
6093 if (is_mm_seq_cst (model
))
6094 expand_mem_thread_fence (model
);
6100 /* If the size of the object is greater than word size on this target,
6101 a default store will not be atomic, Try a mem_exchange and throw away
6102 the result. If that doesn't work, don't do anything. */
6103 if (GET_MODE_PRECISION (mode
) > BITS_PER_WORD
)
6105 rtx target
= maybe_emit_atomic_exchange (NULL_RTX
, mem
, val
, model
);
6107 target
= maybe_emit_compare_and_swap_exchange_loop (NULL_RTX
, mem
, val
);
6114 /* Otherwise assume stores are atomic, and emit the proper barriers. */
6115 expand_mem_thread_fence (model
);
6117 emit_move_insn (mem
, val
);
6119 /* For SEQ_CST, also emit a barrier after the store. */
6120 if (is_mm_seq_cst (model
))
6121 expand_mem_thread_fence (model
);
6127 /* Structure containing the pointers and values required to process the
6128 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
6130 struct atomic_op_functions
6132 direct_optab mem_fetch_before
;
6133 direct_optab mem_fetch_after
;
6134 direct_optab mem_no_result
;
6137 direct_optab no_result
;
6138 enum rtx_code reverse_code
;
6142 /* Fill in structure pointed to by OP with the various optab entries for an
6143 operation of type CODE. */
6146 get_atomic_op_for_code (struct atomic_op_functions
*op
, enum rtx_code code
)
6148 gcc_assert (op
!= NULL
);
6150 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
6151 in the source code during compilation, and the optab entries are not
6152 computable until runtime. Fill in the values at runtime. */
6156 op
->mem_fetch_before
= atomic_fetch_add_optab
;
6157 op
->mem_fetch_after
= atomic_add_fetch_optab
;
6158 op
->mem_no_result
= atomic_add_optab
;
6159 op
->fetch_before
= sync_old_add_optab
;
6160 op
->fetch_after
= sync_new_add_optab
;
6161 op
->no_result
= sync_add_optab
;
6162 op
->reverse_code
= MINUS
;
6165 op
->mem_fetch_before
= atomic_fetch_sub_optab
;
6166 op
->mem_fetch_after
= atomic_sub_fetch_optab
;
6167 op
->mem_no_result
= atomic_sub_optab
;
6168 op
->fetch_before
= sync_old_sub_optab
;
6169 op
->fetch_after
= sync_new_sub_optab
;
6170 op
->no_result
= sync_sub_optab
;
6171 op
->reverse_code
= PLUS
;
6174 op
->mem_fetch_before
= atomic_fetch_xor_optab
;
6175 op
->mem_fetch_after
= atomic_xor_fetch_optab
;
6176 op
->mem_no_result
= atomic_xor_optab
;
6177 op
->fetch_before
= sync_old_xor_optab
;
6178 op
->fetch_after
= sync_new_xor_optab
;
6179 op
->no_result
= sync_xor_optab
;
6180 op
->reverse_code
= XOR
;
6183 op
->mem_fetch_before
= atomic_fetch_and_optab
;
6184 op
->mem_fetch_after
= atomic_and_fetch_optab
;
6185 op
->mem_no_result
= atomic_and_optab
;
6186 op
->fetch_before
= sync_old_and_optab
;
6187 op
->fetch_after
= sync_new_and_optab
;
6188 op
->no_result
= sync_and_optab
;
6189 op
->reverse_code
= UNKNOWN
;
6192 op
->mem_fetch_before
= atomic_fetch_or_optab
;
6193 op
->mem_fetch_after
= atomic_or_fetch_optab
;
6194 op
->mem_no_result
= atomic_or_optab
;
6195 op
->fetch_before
= sync_old_ior_optab
;
6196 op
->fetch_after
= sync_new_ior_optab
;
6197 op
->no_result
= sync_ior_optab
;
6198 op
->reverse_code
= UNKNOWN
;
6201 op
->mem_fetch_before
= atomic_fetch_nand_optab
;
6202 op
->mem_fetch_after
= atomic_nand_fetch_optab
;
6203 op
->mem_no_result
= atomic_nand_optab
;
6204 op
->fetch_before
= sync_old_nand_optab
;
6205 op
->fetch_after
= sync_new_nand_optab
;
6206 op
->no_result
= sync_nand_optab
;
6207 op
->reverse_code
= UNKNOWN
;
6214 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
6215 using memory order MODEL. If AFTER is true the operation needs to return
6216 the value of *MEM after the operation, otherwise the previous value.
6217 TARGET is an optional place to place the result. The result is unused if
6219 Return the result if there is a better sequence, otherwise NULL_RTX. */
6222 maybe_optimize_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
6223 enum memmodel model
, bool after
)
6225 /* If the value is prefetched, or not used, it may be possible to replace
6226 the sequence with a native exchange operation. */
6227 if (!after
|| target
== const0_rtx
)
6229 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
6230 if (code
== AND
&& val
== const0_rtx
)
6232 if (target
== const0_rtx
)
6233 target
= gen_reg_rtx (GET_MODE (mem
));
6234 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6237 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
6238 if (code
== IOR
&& val
== constm1_rtx
)
6240 if (target
== const0_rtx
)
6241 target
= gen_reg_rtx (GET_MODE (mem
));
6242 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6249 /* Try to emit an instruction for a specific operation varaition.
6250 OPTAB contains the OP functions.
6251 TARGET is an optional place to return the result. const0_rtx means unused.
6252 MEM is the memory location to operate on.
6253 VAL is the value to use in the operation.
6254 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
6255 MODEL is the memory model, if used.
6256 AFTER is true if the returned result is the value after the operation. */
6259 maybe_emit_op (const struct atomic_op_functions
*optab
, rtx target
, rtx mem
,
6260 rtx val
, bool use_memmodel
, enum memmodel model
, bool after
)
6262 machine_mode mode
= GET_MODE (mem
);
6263 struct expand_operand ops
[4];
6264 enum insn_code icode
;
6268 /* Check to see if there is a result returned. */
6269 if (target
== const0_rtx
)
6273 icode
= direct_optab_handler (optab
->mem_no_result
, mode
);
6274 create_integer_operand (&ops
[2], model
);
6279 icode
= direct_optab_handler (optab
->no_result
, mode
);
6283 /* Otherwise, we need to generate a result. */
6288 icode
= direct_optab_handler (after
? optab
->mem_fetch_after
6289 : optab
->mem_fetch_before
, mode
);
6290 create_integer_operand (&ops
[3], model
);
6295 icode
= optab_handler (after
? optab
->fetch_after
6296 : optab
->fetch_before
, mode
);
6299 create_output_operand (&ops
[op_counter
++], target
, mode
);
6301 if (icode
== CODE_FOR_nothing
)
6304 create_fixed_operand (&ops
[op_counter
++], mem
);
6305 /* VAL may have been promoted to a wider mode. Shrink it if so. */
6306 create_convert_operand_to (&ops
[op_counter
++], val
, mode
, true);
6308 if (maybe_expand_insn (icode
, num_ops
, ops
))
6309 return (target
== const0_rtx
? const0_rtx
: ops
[0].value
);
6315 /* This function expands an atomic fetch_OP or OP_fetch operation:
6316 TARGET is an option place to stick the return value. const0_rtx indicates
6317 the result is unused.
6318 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6319 CODE is the operation being performed (OP)
6320 MEMMODEL is the memory model variant to use.
6321 AFTER is true to return the result of the operation (OP_fetch).
6322 AFTER is false to return the value before the operation (fetch_OP).
6324 This function will *only* generate instructions if there is a direct
6325 optab. No compare and swap loops or libcalls will be generated. */
6328 expand_atomic_fetch_op_no_fallback (rtx target
, rtx mem
, rtx val
,
6329 enum rtx_code code
, enum memmodel model
,
6332 machine_mode mode
= GET_MODE (mem
);
6333 struct atomic_op_functions optab
;
6335 bool unused_result
= (target
== const0_rtx
);
6337 get_atomic_op_for_code (&optab
, code
);
6339 /* Check to see if there are any better instructions. */
6340 result
= maybe_optimize_fetch_op (target
, mem
, val
, code
, model
, after
);
6344 /* Check for the case where the result isn't used and try those patterns. */
6347 /* Try the memory model variant first. */
6348 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, true);
6352 /* Next try the old style withuot a memory model. */
6353 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, true);
6357 /* There is no no-result pattern, so try patterns with a result. */
6361 /* Try the __atomic version. */
6362 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, after
);
6366 /* Try the older __sync version. */
6367 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, after
);
6371 /* If the fetch value can be calculated from the other variation of fetch,
6372 try that operation. */
6373 if (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
)
6375 /* Try the __atomic version, then the older __sync version. */
6376 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, !after
);
6378 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, !after
);
6382 /* If the result isn't used, no need to do compensation code. */
6386 /* Issue compensation code. Fetch_after == fetch_before OP val.
6387 Fetch_before == after REVERSE_OP val. */
6389 code
= optab
.reverse_code
;
6392 result
= expand_simple_binop (mode
, AND
, result
, val
, NULL_RTX
,
6393 true, OPTAB_LIB_WIDEN
);
6394 result
= expand_simple_unop (mode
, NOT
, result
, target
, true);
6397 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
6398 true, OPTAB_LIB_WIDEN
);
6403 /* No direct opcode can be generated. */
6409 /* This function expands an atomic fetch_OP or OP_fetch operation:
6410 TARGET is an option place to stick the return value. const0_rtx indicates
6411 the result is unused.
6412 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6413 CODE is the operation being performed (OP)
6414 MEMMODEL is the memory model variant to use.
6415 AFTER is true to return the result of the operation (OP_fetch).
6416 AFTER is false to return the value before the operation (fetch_OP). */
6418 expand_atomic_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
6419 enum memmodel model
, bool after
)
6421 machine_mode mode
= GET_MODE (mem
);
6423 bool unused_result
= (target
== const0_rtx
);
6425 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, val
, code
, model
,
6431 /* Add/sub can be implemented by doing the reverse operation with -(val). */
6432 if (code
== PLUS
|| code
== MINUS
)
6435 enum rtx_code reverse
= (code
== PLUS
? MINUS
: PLUS
);
6438 tmp
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, true);
6439 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, tmp
, reverse
,
6443 /* PLUS worked so emit the insns and return. */
6450 /* PLUS did not work, so throw away the negation code and continue. */
6454 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
6455 if (!can_compare_and_swap_p (mode
, false))
6459 enum rtx_code orig_code
= code
;
6460 struct atomic_op_functions optab
;
6462 get_atomic_op_for_code (&optab
, code
);
6463 libfunc
= optab_libfunc (after
? optab
.fetch_after
6464 : optab
.fetch_before
, mode
);
6466 && (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
))
6470 code
= optab
.reverse_code
;
6471 libfunc
= optab_libfunc (after
? optab
.fetch_before
6472 : optab
.fetch_after
, mode
);
6474 if (libfunc
!= NULL
)
6476 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
6477 result
= emit_library_call_value (libfunc
, NULL
, LCT_NORMAL
, mode
,
6478 2, addr
, ptr_mode
, val
, mode
);
6480 if (!unused_result
&& fixup
)
6481 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
6482 true, OPTAB_LIB_WIDEN
);
6486 /* We need the original code for any further attempts. */
6490 /* If nothing else has succeeded, default to a compare and swap loop. */
6491 if (can_compare_and_swap_p (mode
, true))
6494 rtx t0
= gen_reg_rtx (mode
), t1
;
6498 /* If the result is used, get a register for it. */
6501 if (!target
|| !register_operand (target
, mode
))
6502 target
= gen_reg_rtx (mode
);
6503 /* If fetch_before, copy the value now. */
6505 emit_move_insn (target
, t0
);
6508 target
= const0_rtx
;
6513 t1
= expand_simple_binop (mode
, AND
, t1
, val
, NULL_RTX
,
6514 true, OPTAB_LIB_WIDEN
);
6515 t1
= expand_simple_unop (mode
, code
, t1
, NULL_RTX
, true);
6518 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
, true,
6521 /* For after, copy the value now. */
6522 if (!unused_result
&& after
)
6523 emit_move_insn (target
, t1
);
6524 insn
= get_insns ();
6527 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
6534 /* Return true if OPERAND is suitable for operand number OPNO of
6535 instruction ICODE. */
6538 insn_operand_matches (enum insn_code icode
, unsigned int opno
, rtx operand
)
6540 return (!insn_data
[(int) icode
].operand
[opno
].predicate
6541 || (insn_data
[(int) icode
].operand
[opno
].predicate
6542 (operand
, insn_data
[(int) icode
].operand
[opno
].mode
)));
6545 /* TARGET is a target of a multiword operation that we are going to
6546 implement as a series of word-mode operations. Return true if
6547 TARGET is suitable for this purpose. */
6550 valid_multiword_target_p (rtx target
)
6555 mode
= GET_MODE (target
);
6556 for (i
= 0; i
< GET_MODE_SIZE (mode
); i
+= UNITS_PER_WORD
)
6557 if (!validate_subreg (word_mode
, mode
, target
, i
))
6562 /* Like maybe_legitimize_operand, but do not change the code of the
6563 current rtx value. */
6566 maybe_legitimize_operand_same_code (enum insn_code icode
, unsigned int opno
,
6567 struct expand_operand
*op
)
6569 /* See if the operand matches in its current form. */
6570 if (insn_operand_matches (icode
, opno
, op
->value
))
6573 /* If the operand is a memory whose address has no side effects,
6574 try forcing the address into a non-virtual pseudo register.
6575 The check for side effects is important because copy_to_mode_reg
6576 cannot handle things like auto-modified addresses. */
6577 if (insn_data
[(int) icode
].operand
[opno
].allows_mem
&& MEM_P (op
->value
))
6582 addr
= XEXP (mem
, 0);
6583 if (!(REG_P (addr
) && REGNO (addr
) > LAST_VIRTUAL_REGISTER
)
6584 && !side_effects_p (addr
))
6589 last
= get_last_insn ();
6590 mode
= get_address_mode (mem
);
6591 mem
= replace_equiv_address (mem
, copy_to_mode_reg (mode
, addr
));
6592 if (insn_operand_matches (icode
, opno
, mem
))
6597 delete_insns_since (last
);
6604 /* Try to make OP match operand OPNO of instruction ICODE. Return true
6605 on success, storing the new operand value back in OP. */
6608 maybe_legitimize_operand (enum insn_code icode
, unsigned int opno
,
6609 struct expand_operand
*op
)
6611 machine_mode mode
, imode
;
6612 bool old_volatile_ok
, result
;
6618 old_volatile_ok
= volatile_ok
;
6620 result
= maybe_legitimize_operand_same_code (icode
, opno
, op
);
6621 volatile_ok
= old_volatile_ok
;
6625 gcc_assert (mode
!= VOIDmode
);
6627 && op
->value
!= const0_rtx
6628 && GET_MODE (op
->value
) == mode
6629 && maybe_legitimize_operand_same_code (icode
, opno
, op
))
6632 op
->value
= gen_reg_rtx (mode
);
6637 gcc_assert (mode
!= VOIDmode
);
6638 gcc_assert (GET_MODE (op
->value
) == VOIDmode
6639 || GET_MODE (op
->value
) == mode
);
6640 if (maybe_legitimize_operand_same_code (icode
, opno
, op
))
6643 op
->value
= copy_to_mode_reg (mode
, op
->value
);
6646 case EXPAND_CONVERT_TO
:
6647 gcc_assert (mode
!= VOIDmode
);
6648 op
->value
= convert_to_mode (mode
, op
->value
, op
->unsigned_p
);
6651 case EXPAND_CONVERT_FROM
:
6652 if (GET_MODE (op
->value
) != VOIDmode
)
6653 mode
= GET_MODE (op
->value
);
6655 /* The caller must tell us what mode this value has. */
6656 gcc_assert (mode
!= VOIDmode
);
6658 imode
= insn_data
[(int) icode
].operand
[opno
].mode
;
6659 if (imode
!= VOIDmode
&& imode
!= mode
)
6661 op
->value
= convert_modes (imode
, mode
, op
->value
, op
->unsigned_p
);
6666 case EXPAND_ADDRESS
:
6667 gcc_assert (mode
!= VOIDmode
);
6668 op
->value
= convert_memory_address (mode
, op
->value
);
6671 case EXPAND_INTEGER
:
6672 mode
= insn_data
[(int) icode
].operand
[opno
].mode
;
6673 if (mode
!= VOIDmode
&& const_int_operand (op
->value
, mode
))
6677 return insn_operand_matches (icode
, opno
, op
->value
);
6680 /* Make OP describe an input operand that should have the same value
6681 as VALUE, after any mode conversion that the target might request.
6682 TYPE is the type of VALUE. */
6685 create_convert_operand_from_type (struct expand_operand
*op
,
6686 rtx value
, tree type
)
6688 create_convert_operand_from (op
, value
, TYPE_MODE (type
),
6689 TYPE_UNSIGNED (type
));
6692 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
6693 of instruction ICODE. Return true on success, leaving the new operand
6694 values in the OPS themselves. Emit no code on failure. */
6697 maybe_legitimize_operands (enum insn_code icode
, unsigned int opno
,
6698 unsigned int nops
, struct expand_operand
*ops
)
6703 last
= get_last_insn ();
6704 for (i
= 0; i
< nops
; i
++)
6705 if (!maybe_legitimize_operand (icode
, opno
+ i
, &ops
[i
]))
6707 delete_insns_since (last
);
6713 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
6714 as its operands. Return the instruction pattern on success,
6715 and emit any necessary set-up code. Return null and emit no
6719 maybe_gen_insn (enum insn_code icode
, unsigned int nops
,
6720 struct expand_operand
*ops
)
6722 gcc_assert (nops
== (unsigned int) insn_data
[(int) icode
].n_generator_args
);
6723 if (!maybe_legitimize_operands (icode
, 0, nops
, ops
))
6729 return GEN_FCN (icode
) (ops
[0].value
);
6731 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
);
6733 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
);
6735 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
6738 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
6739 ops
[3].value
, ops
[4].value
);
6741 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
6742 ops
[3].value
, ops
[4].value
, ops
[5].value
);
6744 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
6745 ops
[3].value
, ops
[4].value
, ops
[5].value
,
6748 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
6749 ops
[3].value
, ops
[4].value
, ops
[5].value
,
6750 ops
[6].value
, ops
[7].value
);
6752 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
6753 ops
[3].value
, ops
[4].value
, ops
[5].value
,
6754 ops
[6].value
, ops
[7].value
, ops
[8].value
);
6759 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
6760 as its operands. Return true on success and emit no code on failure. */
6763 maybe_expand_insn (enum insn_code icode
, unsigned int nops
,
6764 struct expand_operand
*ops
)
6766 rtx_insn
*pat
= maybe_gen_insn (icode
, nops
, ops
);
6775 /* Like maybe_expand_insn, but for jumps. */
6778 maybe_expand_jump_insn (enum insn_code icode
, unsigned int nops
,
6779 struct expand_operand
*ops
)
6781 rtx_insn
*pat
= maybe_gen_insn (icode
, nops
, ops
);
6784 emit_jump_insn (pat
);
6790 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
6794 expand_insn (enum insn_code icode
, unsigned int nops
,
6795 struct expand_operand
*ops
)
6797 if (!maybe_expand_insn (icode
, nops
, ops
))
6801 /* Like expand_insn, but for jumps. */
6804 expand_jump_insn (enum insn_code icode
, unsigned int nops
,
6805 struct expand_operand
*ops
)
6807 if (!maybe_expand_jump_insn (icode
, nops
, ops
))