1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
35 #include "diagnostic-core.h"
36 #include "rtx-vector-builder.h"
38 /* Include insn-config.h before expr.h so that HAVE_conditional_move
39 is properly defined. */
40 #include "stor-layout.h"
45 #include "optabs-tree.h"
48 static void prepare_float_lib_cmp (rtx
, rtx
, enum rtx_code
, rtx
*,
50 static rtx
expand_unop_direct (machine_mode
, optab
, rtx
, rtx
, int);
51 static void emit_libcall_block_1 (rtx_insn
*, rtx
, rtx
, rtx
, bool);
53 /* Debug facility for use in GDB. */
54 void debug_optab_libfuncs (void);
56 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
57 the result of operation CODE applied to OP0 (and OP1 if it is a binary
60 If the last insn does not set TARGET, don't do anything, but return 1.
62 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
63 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
64 try again, ensuring that TARGET is not one of the operands. */
67 add_equal_note (rtx_insn
*insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
73 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
75 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
76 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
77 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
78 && GET_RTX_CLASS (code
) != RTX_COMPARE
79 && GET_RTX_CLASS (code
) != RTX_UNARY
)
82 if (GET_CODE (target
) == ZERO_EXTRACT
)
85 for (last_insn
= insns
;
86 NEXT_INSN (last_insn
) != NULL_RTX
;
87 last_insn
= NEXT_INSN (last_insn
))
90 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
91 a value changing in the insn, so the note would be invalid for CSE. */
92 if (reg_overlap_mentioned_p (target
, op0
)
93 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
96 && (rtx_equal_p (target
, op0
)
97 || (op1
&& rtx_equal_p (target
, op1
))))
99 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
100 over expanding it as temp = MEM op X, MEM = temp. If the target
101 supports MEM = MEM op X instructions, it is sometimes too hard
102 to reconstruct that form later, especially if X is also a memory,
103 and due to multiple occurrences of addresses the address might
104 be forced into register unnecessarily.
105 Note that not emitting the REG_EQUIV note might inhibit
106 CSE in some cases. */
107 set
= single_set (last_insn
);
109 && GET_CODE (SET_SRC (set
)) == code
110 && MEM_P (SET_DEST (set
))
111 && (rtx_equal_p (SET_DEST (set
), XEXP (SET_SRC (set
), 0))
112 || (op1
&& rtx_equal_p (SET_DEST (set
),
113 XEXP (SET_SRC (set
), 1)))))
119 set
= set_for_reg_notes (last_insn
);
123 if (! rtx_equal_p (SET_DEST (set
), target
)
124 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
125 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
126 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
129 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
139 if (GET_MODE (op0
) != VOIDmode
&& GET_MODE (target
) != GET_MODE (op0
))
141 note
= gen_rtx_fmt_e (code
, GET_MODE (op0
), copy_rtx (op0
));
142 if (GET_MODE_UNIT_SIZE (GET_MODE (op0
))
143 > GET_MODE_UNIT_SIZE (GET_MODE (target
)))
144 note
= simplify_gen_unary (TRUNCATE
, GET_MODE (target
),
145 note
, GET_MODE (op0
));
147 note
= simplify_gen_unary (ZERO_EXTEND
, GET_MODE (target
),
148 note
, GET_MODE (op0
));
153 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
157 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
159 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
164 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
165 for a widening operation would be. In most cases this would be OP0, but if
166 that's a constant it'll be VOIDmode, which isn't useful. */
169 widened_mode (machine_mode to_mode
, rtx op0
, rtx op1
)
171 machine_mode m0
= GET_MODE (op0
);
172 machine_mode m1
= GET_MODE (op1
);
175 if (m0
== VOIDmode
&& m1
== VOIDmode
)
177 else if (m0
== VOIDmode
|| GET_MODE_UNIT_SIZE (m0
) < GET_MODE_UNIT_SIZE (m1
))
182 if (GET_MODE_UNIT_SIZE (result
) > GET_MODE_UNIT_SIZE (to_mode
))
188 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
189 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
190 not actually do a sign-extend or zero-extend, but can leave the
191 higher-order bits of the result rtx undefined, for example, in the case
192 of logical operations, but not right shifts. */
195 widen_operand (rtx op
, machine_mode mode
, machine_mode oldmode
,
196 int unsignedp
, int no_extend
)
199 scalar_int_mode int_mode
;
201 /* If we don't have to extend and this is a constant, return it. */
202 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
205 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
206 extend since it will be more efficient to do so unless the signedness of
207 a promoted object differs from our extension. */
209 || !is_a
<scalar_int_mode
> (mode
, &int_mode
)
210 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
211 && SUBREG_CHECK_PROMOTED_SIGN (op
, unsignedp
)))
212 return convert_modes (mode
, oldmode
, op
, unsignedp
);
214 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
216 if (GET_MODE_SIZE (int_mode
) <= UNITS_PER_WORD
)
217 return gen_lowpart (int_mode
, force_reg (GET_MODE (op
), op
));
219 /* Otherwise, get an object of MODE, clobber it, and set the low-order
222 result
= gen_reg_rtx (int_mode
);
223 emit_clobber (result
);
224 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
228 /* Expand vector widening operations.
230 There are two different classes of operations handled here:
231 1) Operations whose result is wider than all the arguments to the operation.
232 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
233 In this case OP0 and optionally OP1 would be initialized,
234 but WIDE_OP wouldn't (not relevant for this case).
235 2) Operations whose result is of the same size as the last argument to the
236 operation, but wider than all the other arguments to the operation.
237 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
238 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
240 E.g, when called to expand the following operations, this is how
241 the arguments will be initialized:
243 widening-sum 2 oprnd0 - oprnd1
244 widening-dot-product 3 oprnd0 oprnd1 oprnd2
245 widening-mult 2 oprnd0 oprnd1 -
246 type-promotion (vec-unpack) 1 oprnd0 - - */
249 expand_widen_pattern_expr (sepops ops
, rtx op0
, rtx op1
, rtx wide_op
,
250 rtx target
, int unsignedp
)
252 struct expand_operand eops
[4];
253 tree oprnd0
, oprnd1
, oprnd2
;
254 machine_mode wmode
= VOIDmode
, tmode0
, tmode1
= VOIDmode
;
255 optab widen_pattern_optab
;
256 enum insn_code icode
;
257 int nops
= TREE_CODE_LENGTH (ops
->code
);
262 tmode0
= TYPE_MODE (TREE_TYPE (oprnd0
));
263 if (ops
->code
== VEC_UNPACK_FIX_TRUNC_HI_EXPR
264 || ops
->code
== VEC_UNPACK_FIX_TRUNC_LO_EXPR
)
265 /* The sign is from the result type rather than operand's type
268 = optab_for_tree_code (ops
->code
, ops
->type
, optab_default
);
269 else if ((ops
->code
== VEC_UNPACK_HI_EXPR
270 || ops
->code
== VEC_UNPACK_LO_EXPR
)
271 && VECTOR_BOOLEAN_TYPE_P (ops
->type
)
272 && VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (oprnd0
))
273 && TYPE_MODE (ops
->type
) == TYPE_MODE (TREE_TYPE (oprnd0
))
274 && SCALAR_INT_MODE_P (TYPE_MODE (ops
->type
)))
276 /* For VEC_UNPACK_{LO,HI}_EXPR if the mode of op0 and result is
277 the same scalar mode for VECTOR_BOOLEAN_TYPE_P vectors, use
278 vec_unpacks_sbool_{lo,hi}_optab, so that we can pass in
279 the pattern number of elements in the wider vector. */
281 = (ops
->code
== VEC_UNPACK_HI_EXPR
282 ? vec_unpacks_sbool_hi_optab
: vec_unpacks_sbool_lo_optab
);
287 = optab_for_tree_code (ops
->code
, TREE_TYPE (oprnd0
), optab_default
);
288 if (ops
->code
== WIDEN_MULT_PLUS_EXPR
289 || ops
->code
== WIDEN_MULT_MINUS_EXPR
)
290 icode
= find_widening_optab_handler (widen_pattern_optab
,
291 TYPE_MODE (TREE_TYPE (ops
->op2
)),
294 icode
= optab_handler (widen_pattern_optab
, tmode0
);
295 gcc_assert (icode
!= CODE_FOR_nothing
);
300 tmode1
= TYPE_MODE (TREE_TYPE (oprnd1
));
305 op1
= GEN_INT (TYPE_VECTOR_SUBPARTS (TREE_TYPE (oprnd0
)).to_constant ());
309 /* The last operand is of a wider mode than the rest of the operands. */
314 gcc_assert (tmode1
== tmode0
);
317 wmode
= TYPE_MODE (TREE_TYPE (oprnd2
));
321 create_output_operand (&eops
[op
++], target
, TYPE_MODE (ops
->type
));
322 create_convert_operand_from (&eops
[op
++], op0
, tmode0
, unsignedp
);
324 create_convert_operand_from (&eops
[op
++], op1
, tmode1
, unsignedp
);
326 create_convert_operand_from (&eops
[op
++], wide_op
, wmode
, unsignedp
);
327 expand_insn (icode
, op
, eops
);
328 return eops
[0].value
;
331 /* Generate code to perform an operation specified by TERNARY_OPTAB
332 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
334 UNSIGNEDP is for the case where we have to widen the operands
335 to perform the operation. It says to use zero-extension.
337 If TARGET is nonzero, the value
338 is generated there, if it is convenient to do so.
339 In all cases an rtx is returned for the locus of the value;
340 this may or may not be TARGET. */
343 expand_ternary_op (machine_mode mode
, optab ternary_optab
, rtx op0
,
344 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
346 struct expand_operand ops
[4];
347 enum insn_code icode
= optab_handler (ternary_optab
, mode
);
349 gcc_assert (optab_handler (ternary_optab
, mode
) != CODE_FOR_nothing
);
351 create_output_operand (&ops
[0], target
, mode
);
352 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
353 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
354 create_convert_operand_from (&ops
[3], op2
, mode
, unsignedp
);
355 expand_insn (icode
, 4, ops
);
360 /* Like expand_binop, but return a constant rtx if the result can be
361 calculated at compile time. The arguments and return value are
362 otherwise the same as for expand_binop. */
365 simplify_expand_binop (machine_mode mode
, optab binoptab
,
366 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
367 enum optab_methods methods
)
369 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
371 rtx x
= simplify_binary_operation (optab_to_code (binoptab
),
377 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
380 /* Like simplify_expand_binop, but always put the result in TARGET.
381 Return true if the expansion succeeded. */
384 force_expand_binop (machine_mode mode
, optab binoptab
,
385 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
386 enum optab_methods methods
)
388 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
389 target
, unsignedp
, methods
);
393 emit_move_insn (target
, x
);
397 /* Create a new vector value in VMODE with all elements set to OP. The
398 mode of OP must be the element mode of VMODE. If OP is a constant,
399 then the return value will be a constant. */
402 expand_vector_broadcast (machine_mode vmode
, rtx op
)
407 gcc_checking_assert (VECTOR_MODE_P (vmode
));
409 if (valid_for_const_vector_p (vmode
, op
))
410 return gen_const_vec_duplicate (vmode
, op
);
412 insn_code icode
= optab_handler (vec_duplicate_optab
, vmode
);
413 if (icode
!= CODE_FOR_nothing
)
415 struct expand_operand ops
[2];
416 create_output_operand (&ops
[0], NULL_RTX
, vmode
);
417 create_input_operand (&ops
[1], op
, GET_MODE (op
));
418 expand_insn (icode
, 2, ops
);
422 if (!GET_MODE_NUNITS (vmode
).is_constant (&n
))
425 /* ??? If the target doesn't have a vec_init, then we have no easy way
426 of performing this operation. Most of this sort of generic support
427 is hidden away in the vector lowering support in gimple. */
428 icode
= convert_optab_handler (vec_init_optab
, vmode
,
429 GET_MODE_INNER (vmode
));
430 if (icode
== CODE_FOR_nothing
)
433 vec
= rtvec_alloc (n
);
434 for (int i
= 0; i
< n
; ++i
)
435 RTVEC_ELT (vec
, i
) = op
;
436 rtx ret
= gen_reg_rtx (vmode
);
437 emit_insn (GEN_FCN (icode
) (ret
, gen_rtx_PARALLEL (vmode
, vec
)));
442 /* This subroutine of expand_doubleword_shift handles the cases in which
443 the effective shift value is >= BITS_PER_WORD. The arguments and return
444 value are the same as for the parent routine, except that SUPERWORD_OP1
445 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
446 INTO_TARGET may be null if the caller has decided to calculate it. */
449 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
450 rtx outof_target
, rtx into_target
,
451 int unsignedp
, enum optab_methods methods
)
453 if (into_target
!= 0)
454 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
455 into_target
, unsignedp
, methods
))
458 if (outof_target
!= 0)
460 /* For a signed right shift, we must fill OUTOF_TARGET with copies
461 of the sign bit, otherwise we must fill it with zeros. */
462 if (binoptab
!= ashr_optab
)
463 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
465 if (!force_expand_binop (word_mode
, binoptab
, outof_input
,
466 gen_int_shift_amount (word_mode
,
468 outof_target
, unsignedp
, methods
))
474 /* This subroutine of expand_doubleword_shift handles the cases in which
475 the effective shift value is < BITS_PER_WORD. The arguments and return
476 value are the same as for the parent routine. */
479 expand_subword_shift (scalar_int_mode op1_mode
, optab binoptab
,
480 rtx outof_input
, rtx into_input
, rtx op1
,
481 rtx outof_target
, rtx into_target
,
482 int unsignedp
, enum optab_methods methods
,
483 unsigned HOST_WIDE_INT shift_mask
)
485 optab reverse_unsigned_shift
, unsigned_shift
;
488 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
489 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
491 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
492 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
493 the opposite direction to BINOPTAB. */
494 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
496 carries
= outof_input
;
497 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
,
498 op1_mode
), op1_mode
);
499 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
504 /* We must avoid shifting by BITS_PER_WORD bits since that is either
505 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
506 has unknown behavior. Do a single shift first, then shift by the
507 remainder. It's OK to use ~OP1 as the remainder if shift counts
508 are truncated to the mode size. */
509 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
510 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
511 if (shift_mask
== BITS_PER_WORD
- 1)
513 tmp
= immed_wide_int_const
514 (wi::minus_one (GET_MODE_PRECISION (op1_mode
)), op1_mode
);
515 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
520 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
- 1,
521 op1_mode
), op1_mode
);
522 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
526 if (tmp
== 0 || carries
== 0)
528 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
529 carries
, tmp
, 0, unsignedp
, methods
);
533 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
534 so the result can go directly into INTO_TARGET if convenient. */
535 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
536 into_target
, unsignedp
, methods
);
540 /* Now OR in the bits carried over from OUTOF_INPUT. */
541 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
542 into_target
, unsignedp
, methods
))
545 /* Use a standard word_mode shift for the out-of half. */
546 if (outof_target
!= 0)
547 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
548 outof_target
, unsignedp
, methods
))
555 /* Try implementing expand_doubleword_shift using conditional moves.
556 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
557 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
558 are the shift counts to use in the former and latter case. All other
559 arguments are the same as the parent routine. */
562 expand_doubleword_shift_condmove (scalar_int_mode op1_mode
, optab binoptab
,
563 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
564 rtx outof_input
, rtx into_input
,
565 rtx subword_op1
, rtx superword_op1
,
566 rtx outof_target
, rtx into_target
,
567 int unsignedp
, enum optab_methods methods
,
568 unsigned HOST_WIDE_INT shift_mask
)
570 rtx outof_superword
, into_superword
;
572 /* Put the superword version of the output into OUTOF_SUPERWORD and
574 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
575 if (outof_target
!= 0 && subword_op1
== superword_op1
)
577 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
578 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
579 into_superword
= outof_target
;
580 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
581 outof_superword
, 0, unsignedp
, methods
))
586 into_superword
= gen_reg_rtx (word_mode
);
587 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
588 outof_superword
, into_superword
,
593 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
594 if (!expand_subword_shift (op1_mode
, binoptab
,
595 outof_input
, into_input
, subword_op1
,
596 outof_target
, into_target
,
597 unsignedp
, methods
, shift_mask
))
600 /* Select between them. Do the INTO half first because INTO_SUPERWORD
601 might be the current value of OUTOF_TARGET. */
602 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
603 into_target
, into_superword
, word_mode
, false))
606 if (outof_target
!= 0)
607 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
608 outof_target
, outof_superword
,
615 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
616 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
617 input operand; the shift moves bits in the direction OUTOF_INPUT->
618 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
619 of the target. OP1 is the shift count and OP1_MODE is its mode.
620 If OP1 is constant, it will have been truncated as appropriate
621 and is known to be nonzero.
623 If SHIFT_MASK is zero, the result of word shifts is undefined when the
624 shift count is outside the range [0, BITS_PER_WORD). This routine must
625 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
627 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
628 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
629 fill with zeros or sign bits as appropriate.
631 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
632 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
633 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
634 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
637 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
638 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
639 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
640 function wants to calculate it itself.
642 Return true if the shift could be successfully synthesized. */
645 expand_doubleword_shift (scalar_int_mode op1_mode
, optab binoptab
,
646 rtx outof_input
, rtx into_input
, rtx op1
,
647 rtx outof_target
, rtx into_target
,
648 int unsignedp
, enum optab_methods methods
,
649 unsigned HOST_WIDE_INT shift_mask
)
651 rtx superword_op1
, tmp
, cmp1
, cmp2
;
652 enum rtx_code cmp_code
;
654 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
655 fill the result with sign or zero bits as appropriate. If so, the value
656 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
657 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
658 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
660 This isn't worthwhile for constant shifts since the optimizers will
661 cope better with in-range shift counts. */
662 if (shift_mask
>= BITS_PER_WORD
664 && !CONSTANT_P (op1
))
666 if (!expand_doubleword_shift (op1_mode
, binoptab
,
667 outof_input
, into_input
, op1
,
669 unsignedp
, methods
, shift_mask
))
671 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
672 outof_target
, unsignedp
, methods
))
677 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
678 is true when the effective shift value is less than BITS_PER_WORD.
679 Set SUPERWORD_OP1 to the shift count that should be used to shift
680 OUTOF_INPUT into INTO_TARGET when the condition is false. */
681 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
, op1_mode
), op1_mode
);
682 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
684 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
685 is a subword shift count. */
686 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
688 cmp2
= CONST0_RTX (op1_mode
);
694 /* Set CMP1 to OP1 - BITS_PER_WORD. */
695 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
697 cmp2
= CONST0_RTX (op1_mode
);
699 superword_op1
= cmp1
;
704 /* If we can compute the condition at compile time, pick the
705 appropriate subroutine. */
706 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
707 if (tmp
!= 0 && CONST_INT_P (tmp
))
709 if (tmp
== const0_rtx
)
710 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
711 outof_target
, into_target
,
714 return expand_subword_shift (op1_mode
, binoptab
,
715 outof_input
, into_input
, op1
,
716 outof_target
, into_target
,
717 unsignedp
, methods
, shift_mask
);
720 /* Try using conditional moves to generate straight-line code. */
721 if (HAVE_conditional_move
)
723 rtx_insn
*start
= get_last_insn ();
724 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
725 cmp_code
, cmp1
, cmp2
,
726 outof_input
, into_input
,
728 outof_target
, into_target
,
729 unsignedp
, methods
, shift_mask
))
731 delete_insns_since (start
);
734 /* As a last resort, use branches to select the correct alternative. */
735 rtx_code_label
*subword_label
= gen_label_rtx ();
736 rtx_code_label
*done_label
= gen_label_rtx ();
739 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
741 profile_probability::uninitialized ());
744 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
745 outof_target
, into_target
,
749 emit_jump_insn (targetm
.gen_jump (done_label
));
751 emit_label (subword_label
);
753 if (!expand_subword_shift (op1_mode
, binoptab
,
754 outof_input
, into_input
, op1
,
755 outof_target
, into_target
,
756 unsignedp
, methods
, shift_mask
))
759 emit_label (done_label
);
763 /* Subroutine of expand_binop. Perform a double word multiplication of
764 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
765 as the target's word_mode. This function return NULL_RTX if anything
766 goes wrong, in which case it may have already emitted instructions
767 which need to be deleted.
769 If we want to multiply two two-word values and have normal and widening
770 multiplies of single-word values, we can do this with three smaller
773 The multiplication proceeds as follows:
774 _______________________
775 [__op0_high_|__op0_low__]
776 _______________________
777 * [__op1_high_|__op1_low__]
778 _______________________________________________
779 _______________________
780 (1) [__op0_low__*__op1_low__]
781 _______________________
782 (2a) [__op0_low__*__op1_high_]
783 _______________________
784 (2b) [__op0_high_*__op1_low__]
785 _______________________
786 (3) [__op0_high_*__op1_high_]
789 This gives a 4-word result. Since we are only interested in the
790 lower 2 words, partial result (3) and the upper words of (2a) and
791 (2b) don't need to be calculated. Hence (2a) and (2b) can be
792 calculated using non-widening multiplication.
794 (1), however, needs to be calculated with an unsigned widening
795 multiplication. If this operation is not directly supported we
796 try using a signed widening multiplication and adjust the result.
797 This adjustment works as follows:
799 If both operands are positive then no adjustment is needed.
801 If the operands have different signs, for example op0_low < 0 and
802 op1_low >= 0, the instruction treats the most significant bit of
803 op0_low as a sign bit instead of a bit with significance
804 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
805 with 2**BITS_PER_WORD - op0_low, and two's complements the
806 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
809 Similarly, if both operands are negative, we need to add
810 (op0_low + op1_low) * 2**BITS_PER_WORD.
812 We use a trick to adjust quickly. We logically shift op0_low right
813 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
814 op0_high (op1_high) before it is used to calculate 2b (2a). If no
815 logical shift exists, we do an arithmetic right shift and subtract
819 expand_doubleword_mult (machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
820 bool umulp
, enum optab_methods methods
)
822 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
823 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
824 rtx wordm1
= (umulp
? NULL_RTX
825 : gen_int_shift_amount (word_mode
, BITS_PER_WORD
- 1));
826 rtx product
, adjust
, product_high
, temp
;
828 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
829 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
830 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
831 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
833 /* If we're using an unsigned multiply to directly compute the product
834 of the low-order words of the operands and perform any required
835 adjustments of the operands, we begin by trying two more multiplications
836 and then computing the appropriate sum.
838 We have checked above that the required addition is provided.
839 Full-word addition will normally always succeed, especially if
840 it is provided at all, so we don't worry about its failure. The
841 multiplication may well fail, however, so we do handle that. */
845 /* ??? This could be done with emit_store_flag where available. */
846 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
847 NULL_RTX
, 1, methods
);
849 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
850 NULL_RTX
, 0, OPTAB_DIRECT
);
853 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
854 NULL_RTX
, 0, methods
);
857 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
858 NULL_RTX
, 0, OPTAB_DIRECT
);
865 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
866 NULL_RTX
, 0, OPTAB_DIRECT
);
870 /* OP0_HIGH should now be dead. */
874 /* ??? This could be done with emit_store_flag where available. */
875 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
876 NULL_RTX
, 1, methods
);
878 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
879 NULL_RTX
, 0, OPTAB_DIRECT
);
882 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
883 NULL_RTX
, 0, methods
);
886 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
887 NULL_RTX
, 0, OPTAB_DIRECT
);
894 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
895 NULL_RTX
, 0, OPTAB_DIRECT
);
899 /* OP1_HIGH should now be dead. */
901 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
902 NULL_RTX
, 0, OPTAB_DIRECT
);
904 if (target
&& !REG_P (target
))
907 /* *_widen_optab needs to determine operand mode, make sure at least
908 one operand has non-VOID mode. */
909 if (GET_MODE (op0_low
) == VOIDmode
&& GET_MODE (op1_low
) == VOIDmode
)
910 op0_low
= force_reg (word_mode
, op0_low
);
913 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
914 target
, 1, OPTAB_DIRECT
);
916 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
917 target
, 1, OPTAB_DIRECT
);
922 product_high
= operand_subword (product
, high
, 1, mode
);
923 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
924 NULL_RTX
, 0, OPTAB_DIRECT
);
925 emit_move_insn (product_high
, adjust
);
929 /* Wrapper around expand_binop which takes an rtx code to specify
930 the operation to perform, not an optab pointer. All other
931 arguments are the same. */
933 expand_simple_binop (machine_mode mode
, enum rtx_code code
, rtx op0
,
934 rtx op1
, rtx target
, int unsignedp
,
935 enum optab_methods methods
)
937 optab binop
= code_to_optab (code
);
940 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
943 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
944 binop. Order them according to commutative_operand_precedence and, if
945 possible, try to put TARGET or a pseudo first. */
947 swap_commutative_operands_with_target (rtx target
, rtx op0
, rtx op1
)
949 int op0_prec
= commutative_operand_precedence (op0
);
950 int op1_prec
= commutative_operand_precedence (op1
);
952 if (op0_prec
< op1_prec
)
955 if (op0_prec
> op1_prec
)
958 /* With equal precedence, both orders are ok, but it is better if the
959 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
960 if (target
== 0 || REG_P (target
))
961 return (REG_P (op1
) && !REG_P (op0
)) || target
== op1
;
963 return rtx_equal_p (op1
, target
);
966 /* Return true if BINOPTAB implements a shift operation. */
969 shift_optab_p (optab binoptab
)
971 switch (optab_to_code (binoptab
))
987 /* Return true if BINOPTAB implements a commutative binary operation. */
990 commutative_optab_p (optab binoptab
)
992 return (GET_RTX_CLASS (optab_to_code (binoptab
)) == RTX_COMM_ARITH
993 || binoptab
== smul_widen_optab
994 || binoptab
== umul_widen_optab
995 || binoptab
== smul_highpart_optab
996 || binoptab
== umul_highpart_optab
);
999 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
1000 optimizing, and if the operand is a constant that costs more than
1001 1 instruction, force the constant into a register and return that
1002 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1005 avoid_expensive_constant (machine_mode mode
, optab binoptab
,
1006 int opn
, rtx x
, bool unsignedp
)
1008 bool speed
= optimize_insn_for_speed_p ();
1010 if (mode
!= VOIDmode
1013 && (rtx_cost (x
, mode
, optab_to_code (binoptab
), opn
, speed
)
1014 > set_src_cost (x
, mode
, speed
)))
1016 if (CONST_INT_P (x
))
1018 HOST_WIDE_INT intval
= trunc_int_for_mode (INTVAL (x
), mode
);
1019 if (intval
!= INTVAL (x
))
1020 x
= GEN_INT (intval
);
1023 x
= convert_modes (mode
, VOIDmode
, x
, unsignedp
);
1024 x
= force_reg (mode
, x
);
1029 /* Helper function for expand_binop: handle the case where there
1030 is an insn ICODE that directly implements the indicated operation.
1031 Returns null if this is not possible. */
1033 expand_binop_directly (enum insn_code icode
, machine_mode mode
, optab binoptab
,
1035 rtx target
, int unsignedp
, enum optab_methods methods
,
1038 machine_mode xmode0
= insn_data
[(int) icode
].operand
[1].mode
;
1039 machine_mode xmode1
= insn_data
[(int) icode
].operand
[2].mode
;
1040 machine_mode mode0
, mode1
, tmp_mode
;
1041 struct expand_operand ops
[3];
1044 rtx xop0
= op0
, xop1
= op1
;
1045 bool canonicalize_op1
= false;
1047 /* If it is a commutative operator and the modes would match
1048 if we would swap the operands, we can save the conversions. */
1049 commutative_p
= commutative_optab_p (binoptab
);
1051 && GET_MODE (xop0
) != xmode0
&& GET_MODE (xop1
) != xmode1
1052 && GET_MODE (xop0
) == xmode1
&& GET_MODE (xop1
) == xmode1
)
1053 std::swap (xop0
, xop1
);
1055 /* If we are optimizing, force expensive constants into a register. */
1056 xop0
= avoid_expensive_constant (xmode0
, binoptab
, 0, xop0
, unsignedp
);
1057 if (!shift_optab_p (binoptab
))
1058 xop1
= avoid_expensive_constant (xmode1
, binoptab
, 1, xop1
, unsignedp
);
1060 /* Shifts and rotates often use a different mode for op1 from op0;
1061 for VOIDmode constants we don't know the mode, so force it
1062 to be canonicalized using convert_modes. */
1063 canonicalize_op1
= true;
1065 /* In case the insn wants input operands in modes different from
1066 those of the actual operands, convert the operands. It would
1067 seem that we don't need to convert CONST_INTs, but we do, so
1068 that they're properly zero-extended, sign-extended or truncated
1071 mode0
= GET_MODE (xop0
) != VOIDmode
? GET_MODE (xop0
) : mode
;
1072 if (xmode0
!= VOIDmode
&& xmode0
!= mode0
)
1074 xop0
= convert_modes (xmode0
, mode0
, xop0
, unsignedp
);
1078 mode1
= ((GET_MODE (xop1
) != VOIDmode
|| canonicalize_op1
)
1079 ? GET_MODE (xop1
) : mode
);
1080 if (xmode1
!= VOIDmode
&& xmode1
!= mode1
)
1082 xop1
= convert_modes (xmode1
, mode1
, xop1
, unsignedp
);
1086 /* If operation is commutative,
1087 try to make the first operand a register.
1088 Even better, try to make it the same as the target.
1089 Also try to make the last operand a constant. */
1091 && swap_commutative_operands_with_target (target
, xop0
, xop1
))
1092 std::swap (xop0
, xop1
);
1094 /* Now, if insn's predicates don't allow our operands, put them into
1097 if (binoptab
== vec_pack_trunc_optab
1098 || binoptab
== vec_pack_usat_optab
1099 || binoptab
== vec_pack_ssat_optab
1100 || binoptab
== vec_pack_ufix_trunc_optab
1101 || binoptab
== vec_pack_sfix_trunc_optab
1102 || binoptab
== vec_packu_float_optab
1103 || binoptab
== vec_packs_float_optab
)
1105 /* The mode of the result is different then the mode of the
1107 tmp_mode
= insn_data
[(int) icode
].operand
[0].mode
;
1108 if (VECTOR_MODE_P (mode
)
1109 && maybe_ne (GET_MODE_NUNITS (tmp_mode
), 2 * GET_MODE_NUNITS (mode
)))
1111 delete_insns_since (last
);
1118 create_output_operand (&ops
[0], target
, tmp_mode
);
1119 create_input_operand (&ops
[1], xop0
, mode0
);
1120 create_input_operand (&ops
[2], xop1
, mode1
);
1121 pat
= maybe_gen_insn (icode
, 3, ops
);
1124 /* If PAT is composed of more than one insn, try to add an appropriate
1125 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1126 operand, call expand_binop again, this time without a target. */
1127 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1128 && ! add_equal_note (pat
, ops
[0].value
,
1129 optab_to_code (binoptab
),
1130 ops
[1].value
, ops
[2].value
))
1132 delete_insns_since (last
);
1133 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1134 unsignedp
, methods
);
1138 return ops
[0].value
;
1140 delete_insns_since (last
);
1144 /* Generate code to perform an operation specified by BINOPTAB
1145 on operands OP0 and OP1, with result having machine-mode MODE.
1147 UNSIGNEDP is for the case where we have to widen the operands
1148 to perform the operation. It says to use zero-extension.
1150 If TARGET is nonzero, the value
1151 is generated there, if it is convenient to do so.
1152 In all cases an rtx is returned for the locus of the value;
1153 this may or may not be TARGET. */
1156 expand_binop (machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1157 rtx target
, int unsignedp
, enum optab_methods methods
)
1159 enum optab_methods next_methods
1160 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1161 ? OPTAB_WIDEN
: methods
);
1162 enum mode_class mclass
;
1163 enum insn_code icode
;
1164 machine_mode wider_mode
;
1165 scalar_int_mode int_mode
;
1168 rtx_insn
*entry_last
= get_last_insn ();
1171 mclass
= GET_MODE_CLASS (mode
);
1173 /* If subtracting an integer constant, convert this into an addition of
1174 the negated constant. */
1176 if (binoptab
== sub_optab
&& CONST_INT_P (op1
))
1178 op1
= negate_rtx (mode
, op1
);
1179 binoptab
= add_optab
;
1181 /* For shifts, constant invalid op1 might be expanded from different
1182 mode than MODE. As those are invalid, force them to a register
1183 to avoid further problems during expansion. */
1184 else if (CONST_INT_P (op1
)
1185 && shift_optab_p (binoptab
)
1186 && UINTVAL (op1
) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode
)))
1188 op1
= gen_int_mode (INTVAL (op1
), GET_MODE_INNER (mode
));
1189 op1
= force_reg (GET_MODE_INNER (mode
), op1
);
1192 /* Record where to delete back to if we backtrack. */
1193 last
= get_last_insn ();
1195 /* If we can do it with a three-operand insn, do so. */
1197 if (methods
!= OPTAB_MUST_WIDEN
)
1199 if (convert_optab_p (binoptab
))
1201 machine_mode from_mode
= widened_mode (mode
, op0
, op1
);
1202 icode
= find_widening_optab_handler (binoptab
, mode
, from_mode
);
1205 icode
= optab_handler (binoptab
, mode
);
1206 if (icode
!= CODE_FOR_nothing
)
1208 temp
= expand_binop_directly (icode
, mode
, binoptab
, op0
, op1
,
1209 target
, unsignedp
, methods
, last
);
1215 /* If we were trying to rotate, and that didn't work, try rotating
1216 the other direction before falling back to shifts and bitwise-or. */
1217 if (((binoptab
== rotl_optab
1218 && (icode
= optab_handler (rotr_optab
, mode
)) != CODE_FOR_nothing
)
1219 || (binoptab
== rotr_optab
1220 && (icode
= optab_handler (rotl_optab
, mode
)) != CODE_FOR_nothing
))
1221 && is_int_mode (mode
, &int_mode
))
1223 optab otheroptab
= (binoptab
== rotl_optab
? rotr_optab
: rotl_optab
);
1225 unsigned int bits
= GET_MODE_PRECISION (int_mode
);
1227 if (CONST_INT_P (op1
))
1228 newop1
= gen_int_shift_amount (int_mode
, bits
- INTVAL (op1
));
1229 else if (targetm
.shift_truncation_mask (int_mode
) == bits
- 1)
1230 newop1
= negate_rtx (GET_MODE (op1
), op1
);
1232 newop1
= expand_binop (GET_MODE (op1
), sub_optab
,
1233 gen_int_mode (bits
, GET_MODE (op1
)), op1
,
1234 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1236 temp
= expand_binop_directly (icode
, int_mode
, otheroptab
, op0
, newop1
,
1237 target
, unsignedp
, methods
, last
);
1242 /* If this is a multiply, see if we can do a widening operation that
1243 takes operands of this mode and makes a wider mode. */
1245 if (binoptab
== smul_optab
1246 && GET_MODE_2XWIDER_MODE (mode
).exists (&wider_mode
)
1247 && (convert_optab_handler ((unsignedp
1249 : smul_widen_optab
),
1250 wider_mode
, mode
) != CODE_FOR_nothing
))
1252 /* *_widen_optab needs to determine operand mode, make sure at least
1253 one operand has non-VOID mode. */
1254 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
1255 op0
= force_reg (mode
, op0
);
1256 temp
= expand_binop (wider_mode
,
1257 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1258 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1262 if (GET_MODE_CLASS (mode
) == MODE_INT
1263 && TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (temp
)))
1264 return gen_lowpart (mode
, temp
);
1266 return convert_to_mode (mode
, temp
, unsignedp
);
1270 /* If this is a vector shift by a scalar, see if we can do a vector
1271 shift by a vector. If so, broadcast the scalar into a vector. */
1272 if (mclass
== MODE_VECTOR_INT
)
1274 optab otheroptab
= unknown_optab
;
1276 if (binoptab
== ashl_optab
)
1277 otheroptab
= vashl_optab
;
1278 else if (binoptab
== ashr_optab
)
1279 otheroptab
= vashr_optab
;
1280 else if (binoptab
== lshr_optab
)
1281 otheroptab
= vlshr_optab
;
1282 else if (binoptab
== rotl_optab
)
1283 otheroptab
= vrotl_optab
;
1284 else if (binoptab
== rotr_optab
)
1285 otheroptab
= vrotr_optab
;
1288 && (icode
= optab_handler (otheroptab
, mode
)) != CODE_FOR_nothing
)
1290 /* The scalar may have been extended to be too wide. Truncate
1291 it back to the proper size to fit in the broadcast vector. */
1292 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
1293 if (!CONST_INT_P (op1
)
1294 && (GET_MODE_BITSIZE (as_a
<scalar_int_mode
> (GET_MODE (op1
)))
1295 > GET_MODE_BITSIZE (inner_mode
)))
1296 op1
= force_reg (inner_mode
,
1297 simplify_gen_unary (TRUNCATE
, inner_mode
, op1
,
1299 rtx vop1
= expand_vector_broadcast (mode
, op1
);
1302 temp
= expand_binop_directly (icode
, mode
, otheroptab
, op0
, vop1
,
1303 target
, unsignedp
, methods
, last
);
1310 /* Look for a wider mode of the same class for which we think we
1311 can open-code the operation. Check for a widening multiply at the
1312 wider mode as well. */
1314 if (CLASS_HAS_WIDER_MODES_P (mclass
)
1315 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1316 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
1318 machine_mode next_mode
;
1319 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
1320 || (binoptab
== smul_optab
1321 && GET_MODE_WIDER_MODE (wider_mode
).exists (&next_mode
)
1322 && (find_widening_optab_handler ((unsignedp
1324 : smul_widen_optab
),
1326 != CODE_FOR_nothing
)))
1328 rtx xop0
= op0
, xop1
= op1
;
1331 /* For certain integer operations, we need not actually extend
1332 the narrow operands, as long as we will truncate
1333 the results to the same narrowness. */
1335 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1336 || binoptab
== xor_optab
1337 || binoptab
== add_optab
|| binoptab
== sub_optab
1338 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1339 && mclass
== MODE_INT
)
1342 xop0
= avoid_expensive_constant (mode
, binoptab
, 0,
1344 if (binoptab
!= ashl_optab
)
1345 xop1
= avoid_expensive_constant (mode
, binoptab
, 1,
1349 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1351 /* The second operand of a shift must always be extended. */
1352 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1353 no_extend
&& binoptab
!= ashl_optab
);
1355 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1356 unsignedp
, OPTAB_DIRECT
);
1359 if (mclass
!= MODE_INT
1360 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
1363 target
= gen_reg_rtx (mode
);
1364 convert_move (target
, temp
, 0);
1368 return gen_lowpart (mode
, temp
);
1371 delete_insns_since (last
);
1375 /* If operation is commutative,
1376 try to make the first operand a register.
1377 Even better, try to make it the same as the target.
1378 Also try to make the last operand a constant. */
1379 if (commutative_optab_p (binoptab
)
1380 && swap_commutative_operands_with_target (target
, op0
, op1
))
1381 std::swap (op0
, op1
);
1383 /* These can be done a word at a time. */
1384 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1385 && is_int_mode (mode
, &int_mode
)
1386 && GET_MODE_SIZE (int_mode
) > UNITS_PER_WORD
1387 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1392 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1393 won't be accurate, so use a new target. */
1397 || !valid_multiword_target_p (target
))
1398 target
= gen_reg_rtx (int_mode
);
1402 /* Do the actual arithmetic. */
1403 machine_mode op0_mode
= GET_MODE (op0
);
1404 machine_mode op1_mode
= GET_MODE (op1
);
1405 if (op0_mode
== VOIDmode
)
1406 op0_mode
= int_mode
;
1407 if (op1_mode
== VOIDmode
)
1408 op1_mode
= int_mode
;
1409 for (i
= 0; i
< GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
; i
++)
1411 rtx target_piece
= operand_subword (target
, i
, 1, int_mode
);
1412 rtx x
= expand_binop (word_mode
, binoptab
,
1413 operand_subword_force (op0
, i
, op0_mode
),
1414 operand_subword_force (op1
, i
, op1_mode
),
1415 target_piece
, unsignedp
, next_methods
);
1420 if (target_piece
!= x
)
1421 emit_move_insn (target_piece
, x
);
1424 insns
= get_insns ();
1427 if (i
== GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
)
1434 /* Synthesize double word shifts from single word shifts. */
1435 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1436 || binoptab
== ashr_optab
)
1437 && is_int_mode (mode
, &int_mode
)
1438 && (CONST_INT_P (op1
) || optimize_insn_for_speed_p ())
1439 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
1440 && GET_MODE_PRECISION (int_mode
) == GET_MODE_BITSIZE (int_mode
)
1441 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
1442 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1443 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1445 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1446 scalar_int_mode op1_mode
;
1448 double_shift_mask
= targetm
.shift_truncation_mask (int_mode
);
1449 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1450 op1_mode
= (GET_MODE (op1
) != VOIDmode
1451 ? as_a
<scalar_int_mode
> (GET_MODE (op1
))
1454 /* Apply the truncation to constant shifts. */
1455 if (double_shift_mask
> 0 && CONST_INT_P (op1
))
1456 op1
= gen_int_mode (INTVAL (op1
) & double_shift_mask
, op1_mode
);
1458 if (op1
== CONST0_RTX (op1_mode
))
1461 /* Make sure that this is a combination that expand_doubleword_shift
1462 can handle. See the comments there for details. */
1463 if (double_shift_mask
== 0
1464 || (shift_mask
== BITS_PER_WORD
- 1
1465 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1468 rtx into_target
, outof_target
;
1469 rtx into_input
, outof_input
;
1470 int left_shift
, outof_word
;
1472 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1473 won't be accurate, so use a new target. */
1477 || !valid_multiword_target_p (target
))
1478 target
= gen_reg_rtx (int_mode
);
1482 /* OUTOF_* is the word we are shifting bits away from, and
1483 INTO_* is the word that we are shifting bits towards, thus
1484 they differ depending on the direction of the shift and
1485 WORDS_BIG_ENDIAN. */
1487 left_shift
= binoptab
== ashl_optab
;
1488 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1490 outof_target
= operand_subword (target
, outof_word
, 1, int_mode
);
1491 into_target
= operand_subword (target
, 1 - outof_word
, 1, int_mode
);
1493 outof_input
= operand_subword_force (op0
, outof_word
, int_mode
);
1494 into_input
= operand_subword_force (op0
, 1 - outof_word
, int_mode
);
1496 if (expand_doubleword_shift (op1_mode
, binoptab
,
1497 outof_input
, into_input
, op1
,
1498 outof_target
, into_target
,
1499 unsignedp
, next_methods
, shift_mask
))
1501 insns
= get_insns ();
1511 /* Synthesize double word rotates from single word shifts. */
1512 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1513 && is_int_mode (mode
, &int_mode
)
1514 && CONST_INT_P (op1
)
1515 && GET_MODE_PRECISION (int_mode
) == 2 * BITS_PER_WORD
1516 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1517 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1520 rtx into_target
, outof_target
;
1521 rtx into_input
, outof_input
;
1523 int shift_count
, left_shift
, outof_word
;
1525 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1526 won't be accurate, so use a new target. Do this also if target is not
1527 a REG, first because having a register instead may open optimization
1528 opportunities, and second because if target and op0 happen to be MEMs
1529 designating the same location, we would risk clobbering it too early
1530 in the code sequence we generate below. */
1535 || !valid_multiword_target_p (target
))
1536 target
= gen_reg_rtx (int_mode
);
1540 shift_count
= INTVAL (op1
);
1542 /* OUTOF_* is the word we are shifting bits away from, and
1543 INTO_* is the word that we are shifting bits towards, thus
1544 they differ depending on the direction of the shift and
1545 WORDS_BIG_ENDIAN. */
1547 left_shift
= (binoptab
== rotl_optab
);
1548 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1550 outof_target
= operand_subword (target
, outof_word
, 1, int_mode
);
1551 into_target
= operand_subword (target
, 1 - outof_word
, 1, int_mode
);
1553 outof_input
= operand_subword_force (op0
, outof_word
, int_mode
);
1554 into_input
= operand_subword_force (op0
, 1 - outof_word
, int_mode
);
1556 if (shift_count
== BITS_PER_WORD
)
1558 /* This is just a word swap. */
1559 emit_move_insn (outof_target
, into_input
);
1560 emit_move_insn (into_target
, outof_input
);
1565 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1566 HOST_WIDE_INT first_shift_count
, second_shift_count
;
1567 optab reverse_unsigned_shift
, unsigned_shift
;
1569 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1570 ? lshr_optab
: ashl_optab
);
1572 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1573 ? ashl_optab
: lshr_optab
);
1575 if (shift_count
> BITS_PER_WORD
)
1577 first_shift_count
= shift_count
- BITS_PER_WORD
;
1578 second_shift_count
= 2 * BITS_PER_WORD
- shift_count
;
1582 first_shift_count
= BITS_PER_WORD
- shift_count
;
1583 second_shift_count
= shift_count
;
1585 rtx first_shift_count_rtx
1586 = gen_int_shift_amount (word_mode
, first_shift_count
);
1587 rtx second_shift_count_rtx
1588 = gen_int_shift_amount (word_mode
, second_shift_count
);
1590 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1591 outof_input
, first_shift_count_rtx
,
1592 NULL_RTX
, unsignedp
, next_methods
);
1593 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1594 into_input
, second_shift_count_rtx
,
1595 NULL_RTX
, unsignedp
, next_methods
);
1597 if (into_temp1
!= 0 && into_temp2
!= 0)
1598 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1599 into_target
, unsignedp
, next_methods
);
1603 if (inter
!= 0 && inter
!= into_target
)
1604 emit_move_insn (into_target
, inter
);
1606 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1607 into_input
, first_shift_count_rtx
,
1608 NULL_RTX
, unsignedp
, next_methods
);
1609 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1610 outof_input
, second_shift_count_rtx
,
1611 NULL_RTX
, unsignedp
, next_methods
);
1613 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1614 inter
= expand_binop (word_mode
, ior_optab
,
1615 outof_temp1
, outof_temp2
,
1616 outof_target
, unsignedp
, next_methods
);
1618 if (inter
!= 0 && inter
!= outof_target
)
1619 emit_move_insn (outof_target
, inter
);
1622 insns
= get_insns ();
1632 /* These can be done a word at a time by propagating carries. */
1633 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1634 && is_int_mode (mode
, &int_mode
)
1635 && GET_MODE_SIZE (int_mode
) >= 2 * UNITS_PER_WORD
1636 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1639 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1640 const unsigned int nwords
= GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
;
1641 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1642 rtx xop0
, xop1
, xtarget
;
1644 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1645 value is one of those, use it. Otherwise, use 1 since it is the
1646 one easiest to get. */
1647 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1648 int normalizep
= STORE_FLAG_VALUE
;
1653 /* Prepare the operands. */
1654 xop0
= force_reg (int_mode
, op0
);
1655 xop1
= force_reg (int_mode
, op1
);
1657 xtarget
= gen_reg_rtx (int_mode
);
1659 if (target
== 0 || !REG_P (target
) || !valid_multiword_target_p (target
))
1662 /* Indicate for flow that the entire target reg is being set. */
1664 emit_clobber (xtarget
);
1666 /* Do the actual arithmetic. */
1667 for (i
= 0; i
< nwords
; i
++)
1669 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1670 rtx target_piece
= operand_subword (xtarget
, index
, 1, int_mode
);
1671 rtx op0_piece
= operand_subword_force (xop0
, index
, int_mode
);
1672 rtx op1_piece
= operand_subword_force (xop1
, index
, int_mode
);
1675 /* Main add/subtract of the input operands. */
1676 x
= expand_binop (word_mode
, binoptab
,
1677 op0_piece
, op1_piece
,
1678 target_piece
, unsignedp
, next_methods
);
1684 /* Store carry from main add/subtract. */
1685 carry_out
= gen_reg_rtx (word_mode
);
1686 carry_out
= emit_store_flag_force (carry_out
,
1687 (binoptab
== add_optab
1690 word_mode
, 1, normalizep
);
1697 /* Add/subtract previous carry to main result. */
1698 newx
= expand_binop (word_mode
,
1699 normalizep
== 1 ? binoptab
: otheroptab
,
1701 NULL_RTX
, 1, next_methods
);
1705 /* Get out carry from adding/subtracting carry in. */
1706 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1707 carry_tmp
= emit_store_flag_force (carry_tmp
,
1708 (binoptab
== add_optab
1711 word_mode
, 1, normalizep
);
1713 /* Logical-ior the two poss. carry together. */
1714 carry_out
= expand_binop (word_mode
, ior_optab
,
1715 carry_out
, carry_tmp
,
1716 carry_out
, 0, next_methods
);
1720 emit_move_insn (target_piece
, newx
);
1724 if (x
!= target_piece
)
1725 emit_move_insn (target_piece
, x
);
1728 carry_in
= carry_out
;
1731 if (i
== GET_MODE_BITSIZE (int_mode
) / (unsigned) BITS_PER_WORD
)
1733 if (optab_handler (mov_optab
, int_mode
) != CODE_FOR_nothing
1734 || ! rtx_equal_p (target
, xtarget
))
1736 rtx_insn
*temp
= emit_move_insn (target
, xtarget
);
1738 set_dst_reg_note (temp
, REG_EQUAL
,
1739 gen_rtx_fmt_ee (optab_to_code (binoptab
),
1740 int_mode
, copy_rtx (xop0
),
1751 delete_insns_since (last
);
1754 /* Attempt to synthesize double word multiplies using a sequence of word
1755 mode multiplications. We first attempt to generate a sequence using a
1756 more efficient unsigned widening multiply, and if that fails we then
1757 try using a signed widening multiply. */
1759 if (binoptab
== smul_optab
1760 && is_int_mode (mode
, &int_mode
)
1761 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
1762 && optab_handler (smul_optab
, word_mode
) != CODE_FOR_nothing
1763 && optab_handler (add_optab
, word_mode
) != CODE_FOR_nothing
)
1765 rtx product
= NULL_RTX
;
1766 if (convert_optab_handler (umul_widen_optab
, int_mode
, word_mode
)
1767 != CODE_FOR_nothing
)
1769 product
= expand_doubleword_mult (int_mode
, op0
, op1
, target
,
1772 delete_insns_since (last
);
1775 if (product
== NULL_RTX
1776 && (convert_optab_handler (smul_widen_optab
, int_mode
, word_mode
)
1777 != CODE_FOR_nothing
))
1779 product
= expand_doubleword_mult (int_mode
, op0
, op1
, target
,
1782 delete_insns_since (last
);
1785 if (product
!= NULL_RTX
)
1787 if (optab_handler (mov_optab
, int_mode
) != CODE_FOR_nothing
)
1789 rtx_insn
*move
= emit_move_insn (target
? target
: product
,
1791 set_dst_reg_note (move
,
1793 gen_rtx_fmt_ee (MULT
, int_mode
,
1796 target
? target
: product
);
1802 /* It can't be open-coded in this mode.
1803 Use a library call if one is available and caller says that's ok. */
1805 libfunc
= optab_libfunc (binoptab
, mode
);
1807 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1811 machine_mode op1_mode
= mode
;
1816 if (shift_optab_p (binoptab
))
1818 op1_mode
= targetm
.libgcc_shift_count_mode ();
1819 /* Specify unsigned here,
1820 since negative shift counts are meaningless. */
1821 op1x
= convert_to_mode (op1_mode
, op1
, 1);
1824 if (GET_MODE (op0
) != VOIDmode
1825 && GET_MODE (op0
) != mode
)
1826 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1828 /* Pass 1 for NO_QUEUE so we don't lose any increments
1829 if the libcall is cse'd or moved. */
1830 value
= emit_library_call_value (libfunc
,
1831 NULL_RTX
, LCT_CONST
, mode
,
1832 op0
, mode
, op1x
, op1_mode
);
1834 insns
= get_insns ();
1837 bool trapv
= trapv_binoptab_p (binoptab
);
1838 target
= gen_reg_rtx (mode
);
1839 emit_libcall_block_1 (insns
, target
, value
,
1841 : gen_rtx_fmt_ee (optab_to_code (binoptab
),
1842 mode
, op0
, op1
), trapv
);
1847 delete_insns_since (last
);
1849 /* It can't be done in this mode. Can we do it in a wider mode? */
1851 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1852 || methods
== OPTAB_MUST_WIDEN
))
1854 /* Caller says, don't even try. */
1855 delete_insns_since (entry_last
);
1859 /* Compute the value of METHODS to pass to recursive calls.
1860 Don't allow widening to be tried recursively. */
1862 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1864 /* Look for a wider mode of the same class for which it appears we can do
1867 if (CLASS_HAS_WIDER_MODES_P (mclass
))
1869 /* This code doesn't make sense for conversion optabs, since we
1870 wouldn't then want to extend the operands to be the same size
1872 gcc_assert (!convert_optab_p (binoptab
));
1873 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
1875 if (optab_handler (binoptab
, wider_mode
)
1876 || (methods
== OPTAB_LIB
1877 && optab_libfunc (binoptab
, wider_mode
)))
1879 rtx xop0
= op0
, xop1
= op1
;
1882 /* For certain integer operations, we need not actually extend
1883 the narrow operands, as long as we will truncate
1884 the results to the same narrowness. */
1886 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1887 || binoptab
== xor_optab
1888 || binoptab
== add_optab
|| binoptab
== sub_optab
1889 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1890 && mclass
== MODE_INT
)
1893 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1894 unsignedp
, no_extend
);
1896 /* The second operand of a shift must always be extended. */
1897 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1898 no_extend
&& binoptab
!= ashl_optab
);
1900 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1901 unsignedp
, methods
);
1904 if (mclass
!= MODE_INT
1905 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
1908 target
= gen_reg_rtx (mode
);
1909 convert_move (target
, temp
, 0);
1913 return gen_lowpart (mode
, temp
);
1916 delete_insns_since (last
);
1921 delete_insns_since (entry_last
);
1925 /* Expand a binary operator which has both signed and unsigned forms.
1926 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1929 If we widen unsigned operands, we may use a signed wider operation instead
1930 of an unsigned wider operation, since the result would be the same. */
1933 sign_expand_binop (machine_mode mode
, optab uoptab
, optab soptab
,
1934 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
1935 enum optab_methods methods
)
1938 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1941 /* Do it without widening, if possible. */
1942 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1943 unsignedp
, OPTAB_DIRECT
);
1944 if (temp
|| methods
== OPTAB_DIRECT
)
1947 /* Try widening to a signed int. Disable any direct use of any
1948 signed insn in the current mode. */
1949 save_enable
= swap_optab_enable (soptab
, mode
, false);
1951 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
1952 unsignedp
, OPTAB_WIDEN
);
1954 /* For unsigned operands, try widening to an unsigned int. */
1955 if (!temp
&& unsignedp
)
1956 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1957 unsignedp
, OPTAB_WIDEN
);
1958 if (temp
|| methods
== OPTAB_WIDEN
)
1961 /* Use the right width libcall if that exists. */
1962 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1963 unsignedp
, OPTAB_LIB
);
1964 if (temp
|| methods
== OPTAB_LIB
)
1967 /* Must widen and use a libcall, use either signed or unsigned. */
1968 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
1969 unsignedp
, methods
);
1970 if (!temp
&& unsignedp
)
1971 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1972 unsignedp
, methods
);
1975 /* Undo the fiddling above. */
1977 swap_optab_enable (soptab
, mode
, true);
1981 /* Generate code to perform an operation specified by UNOPPTAB
1982 on operand OP0, with two results to TARG0 and TARG1.
1983 We assume that the order of the operands for the instruction
1984 is TARG0, TARG1, OP0.
1986 Either TARG0 or TARG1 may be zero, but what that means is that
1987 the result is not actually wanted. We will generate it into
1988 a dummy pseudo-reg and discard it. They may not both be zero.
1990 Returns 1 if this operation can be performed; 0 if not. */
1993 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
1996 machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1997 enum mode_class mclass
;
1998 machine_mode wider_mode
;
1999 rtx_insn
*entry_last
= get_last_insn ();
2002 mclass
= GET_MODE_CLASS (mode
);
2005 targ0
= gen_reg_rtx (mode
);
2007 targ1
= gen_reg_rtx (mode
);
2009 /* Record where to go back to if we fail. */
2010 last
= get_last_insn ();
2012 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
2014 struct expand_operand ops
[3];
2015 enum insn_code icode
= optab_handler (unoptab
, mode
);
2017 create_fixed_operand (&ops
[0], targ0
);
2018 create_fixed_operand (&ops
[1], targ1
);
2019 create_convert_operand_from (&ops
[2], op0
, mode
, unsignedp
);
2020 if (maybe_expand_insn (icode
, 3, ops
))
2024 /* It can't be done in this mode. Can we do it in a wider mode? */
2026 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2028 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2030 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2032 rtx t0
= gen_reg_rtx (wider_mode
);
2033 rtx t1
= gen_reg_rtx (wider_mode
);
2034 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2036 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
2038 convert_move (targ0
, t0
, unsignedp
);
2039 convert_move (targ1
, t1
, unsignedp
);
2043 delete_insns_since (last
);
2048 delete_insns_since (entry_last
);
2052 /* Generate code to perform an operation specified by BINOPTAB
2053 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2054 We assume that the order of the operands for the instruction
2055 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2056 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2058 Either TARG0 or TARG1 may be zero, but what that means is that
2059 the result is not actually wanted. We will generate it into
2060 a dummy pseudo-reg and discard it. They may not both be zero.
2062 Returns 1 if this operation can be performed; 0 if not. */
2065 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
2068 machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2069 enum mode_class mclass
;
2070 machine_mode wider_mode
;
2071 rtx_insn
*entry_last
= get_last_insn ();
2074 mclass
= GET_MODE_CLASS (mode
);
2077 targ0
= gen_reg_rtx (mode
);
2079 targ1
= gen_reg_rtx (mode
);
2081 /* Record where to go back to if we fail. */
2082 last
= get_last_insn ();
2084 if (optab_handler (binoptab
, mode
) != CODE_FOR_nothing
)
2086 struct expand_operand ops
[4];
2087 enum insn_code icode
= optab_handler (binoptab
, mode
);
2088 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2089 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2090 rtx xop0
= op0
, xop1
= op1
;
2092 /* If we are optimizing, force expensive constants into a register. */
2093 xop0
= avoid_expensive_constant (mode0
, binoptab
, 0, xop0
, unsignedp
);
2094 xop1
= avoid_expensive_constant (mode1
, binoptab
, 1, xop1
, unsignedp
);
2096 create_fixed_operand (&ops
[0], targ0
);
2097 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
2098 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
2099 create_fixed_operand (&ops
[3], targ1
);
2100 if (maybe_expand_insn (icode
, 4, ops
))
2102 delete_insns_since (last
);
2105 /* It can't be done in this mode. Can we do it in a wider mode? */
2107 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2109 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2111 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
)
2113 rtx t0
= gen_reg_rtx (wider_mode
);
2114 rtx t1
= gen_reg_rtx (wider_mode
);
2115 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2116 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2118 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2121 convert_move (targ0
, t0
, unsignedp
);
2122 convert_move (targ1
, t1
, unsignedp
);
2126 delete_insns_since (last
);
2131 delete_insns_since (entry_last
);
2135 /* Expand the two-valued library call indicated by BINOPTAB, but
2136 preserve only one of the values. If TARG0 is non-NULL, the first
2137 value is placed into TARG0; otherwise the second value is placed
2138 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2139 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2140 This routine assumes that the value returned by the library call is
2141 as if the return value was of an integral mode twice as wide as the
2142 mode of OP0. Returns 1 if the call was successful. */
2145 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2146 rtx targ0
, rtx targ1
, enum rtx_code code
)
2149 machine_mode libval_mode
;
2154 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2155 gcc_assert (!targ0
!= !targ1
);
2157 mode
= GET_MODE (op0
);
2158 libfunc
= optab_libfunc (binoptab
, mode
);
2162 /* The value returned by the library function will have twice as
2163 many bits as the nominal MODE. */
2164 libval_mode
= smallest_int_mode_for_size (2 * GET_MODE_BITSIZE (mode
));
2166 libval
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
2170 /* Get the part of VAL containing the value that we want. */
2171 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2172 targ0
? 0 : GET_MODE_SIZE (mode
));
2173 insns
= get_insns ();
2175 /* Move the into the desired location. */
2176 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2177 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2183 /* Wrapper around expand_unop which takes an rtx code to specify
2184 the operation to perform, not an optab pointer. All other
2185 arguments are the same. */
2187 expand_simple_unop (machine_mode mode
, enum rtx_code code
, rtx op0
,
2188 rtx target
, int unsignedp
)
2190 optab unop
= code_to_optab (code
);
2193 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2199 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2201 A similar operation can be used for clrsb. UNOPTAB says which operation
2202 we are trying to expand. */
2204 widen_leading (scalar_int_mode mode
, rtx op0
, rtx target
, optab unoptab
)
2206 opt_scalar_int_mode wider_mode_iter
;
2207 FOR_EACH_WIDER_MODE (wider_mode_iter
, mode
)
2209 scalar_int_mode wider_mode
= wider_mode_iter
.require ();
2210 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2215 last
= get_last_insn ();
2218 target
= gen_reg_rtx (mode
);
2219 xop0
= widen_operand (op0
, wider_mode
, mode
,
2220 unoptab
!= clrsb_optab
, false);
2221 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2222 unoptab
!= clrsb_optab
);
2225 (wider_mode
, sub_optab
, temp
,
2226 gen_int_mode (GET_MODE_PRECISION (wider_mode
)
2227 - GET_MODE_PRECISION (mode
),
2229 target
, true, OPTAB_DIRECT
);
2231 delete_insns_since (last
);
2239 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2240 quantities, choosing which based on whether the high word is nonzero. */
2242 expand_doubleword_clz (scalar_int_mode mode
, rtx op0
, rtx target
)
2244 rtx xop0
= force_reg (mode
, op0
);
2245 rtx subhi
= gen_highpart (word_mode
, xop0
);
2246 rtx sublo
= gen_lowpart (word_mode
, xop0
);
2247 rtx_code_label
*hi0_label
= gen_label_rtx ();
2248 rtx_code_label
*after_label
= gen_label_rtx ();
2252 /* If we were not given a target, use a word_mode register, not a
2253 'mode' register. The result will fit, and nobody is expecting
2254 anything bigger (the return type of __builtin_clz* is int). */
2256 target
= gen_reg_rtx (word_mode
);
2258 /* In any case, write to a word_mode scratch in both branches of the
2259 conditional, so we can ensure there is a single move insn setting
2260 'target' to tag a REG_EQUAL note on. */
2261 result
= gen_reg_rtx (word_mode
);
2265 /* If the high word is not equal to zero,
2266 then clz of the full value is clz of the high word. */
2267 emit_cmp_and_jump_insns (subhi
, CONST0_RTX (word_mode
), EQ
, 0,
2268 word_mode
, true, hi0_label
);
2270 temp
= expand_unop_direct (word_mode
, clz_optab
, subhi
, result
, true);
2275 convert_move (result
, temp
, true);
2277 emit_jump_insn (targetm
.gen_jump (after_label
));
2280 /* Else clz of the full value is clz of the low word plus the number
2281 of bits in the high word. */
2282 emit_label (hi0_label
);
2284 temp
= expand_unop_direct (word_mode
, clz_optab
, sublo
, 0, true);
2287 temp
= expand_binop (word_mode
, add_optab
, temp
,
2288 gen_int_mode (GET_MODE_BITSIZE (word_mode
), word_mode
),
2289 result
, true, OPTAB_DIRECT
);
2293 convert_move (result
, temp
, true);
2295 emit_label (after_label
);
2296 convert_move (target
, result
, true);
2301 add_equal_note (seq
, target
, CLZ
, xop0
, 0);
2310 /* Try calculating popcount of a double-word quantity as two popcount's of
2311 word-sized quantities and summing up the results. */
2313 expand_doubleword_popcount (scalar_int_mode mode
, rtx op0
, rtx target
)
2320 t0
= expand_unop_direct (word_mode
, popcount_optab
,
2321 operand_subword_force (op0
, 0, mode
), NULL_RTX
,
2323 t1
= expand_unop_direct (word_mode
, popcount_optab
,
2324 operand_subword_force (op0
, 1, mode
), NULL_RTX
,
2332 /* If we were not given a target, use a word_mode register, not a
2333 'mode' register. The result will fit, and nobody is expecting
2334 anything bigger (the return type of __builtin_popcount* is int). */
2336 target
= gen_reg_rtx (word_mode
);
2338 t
= expand_binop (word_mode
, add_optab
, t0
, t1
, target
, 0, OPTAB_DIRECT
);
2343 add_equal_note (seq
, t
, POPCOUNT
, op0
, 0);
2351 (parity:narrow (low (x) ^ high (x))) */
2353 expand_doubleword_parity (scalar_int_mode mode
, rtx op0
, rtx target
)
2355 rtx t
= expand_binop (word_mode
, xor_optab
,
2356 operand_subword_force (op0
, 0, mode
),
2357 operand_subword_force (op0
, 1, mode
),
2358 NULL_RTX
, 0, OPTAB_DIRECT
);
2359 return expand_unop (word_mode
, parity_optab
, t
, target
, true);
2365 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2367 widen_bswap (scalar_int_mode mode
, rtx op0
, rtx target
)
2371 opt_scalar_int_mode wider_mode_iter
;
2373 FOR_EACH_WIDER_MODE (wider_mode_iter
, mode
)
2374 if (optab_handler (bswap_optab
, wider_mode_iter
.require ())
2375 != CODE_FOR_nothing
)
2378 if (!wider_mode_iter
.exists ())
2381 scalar_int_mode wider_mode
= wider_mode_iter
.require ();
2382 last
= get_last_insn ();
2384 x
= widen_operand (op0
, wider_mode
, mode
, true, true);
2385 x
= expand_unop (wider_mode
, bswap_optab
, x
, NULL_RTX
, true);
2387 gcc_assert (GET_MODE_PRECISION (wider_mode
) == GET_MODE_BITSIZE (wider_mode
)
2388 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
));
2390 x
= expand_shift (RSHIFT_EXPR
, wider_mode
, x
,
2391 GET_MODE_BITSIZE (wider_mode
)
2392 - GET_MODE_BITSIZE (mode
),
2398 target
= gen_reg_rtx (mode
);
2399 emit_move_insn (target
, gen_lowpart (mode
, x
));
2402 delete_insns_since (last
);
2407 /* Try calculating bswap as two bswaps of two word-sized operands. */
2410 expand_doubleword_bswap (machine_mode mode
, rtx op
, rtx target
)
2414 t1
= expand_unop (word_mode
, bswap_optab
,
2415 operand_subword_force (op
, 0, mode
), NULL_RTX
, true);
2416 t0
= expand_unop (word_mode
, bswap_optab
,
2417 operand_subword_force (op
, 1, mode
), NULL_RTX
, true);
2419 if (target
== 0 || !valid_multiword_target_p (target
))
2420 target
= gen_reg_rtx (mode
);
2422 emit_clobber (target
);
2423 emit_move_insn (operand_subword (target
, 0, 1, mode
), t0
);
2424 emit_move_insn (operand_subword (target
, 1, 1, mode
), t1
);
2429 /* Try calculating (parity x) as (and (popcount x) 1), where
2430 popcount can also be done in a wider mode. */
2432 expand_parity (scalar_int_mode mode
, rtx op0
, rtx target
)
2434 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2435 opt_scalar_int_mode wider_mode_iter
;
2436 FOR_EACH_MODE_FROM (wider_mode_iter
, mode
)
2438 scalar_int_mode wider_mode
= wider_mode_iter
.require ();
2439 if (optab_handler (popcount_optab
, wider_mode
) != CODE_FOR_nothing
)
2444 last
= get_last_insn ();
2446 if (target
== 0 || GET_MODE (target
) != wider_mode
)
2447 target
= gen_reg_rtx (wider_mode
);
2449 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2450 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2453 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2454 target
, true, OPTAB_DIRECT
);
2458 if (mclass
!= MODE_INT
2459 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2460 return convert_to_mode (mode
, temp
, 0);
2462 return gen_lowpart (mode
, temp
);
2465 delete_insns_since (last
);
2471 /* Try calculating ctz(x) as K - clz(x & -x) ,
2472 where K is GET_MODE_PRECISION(mode) - 1.
2474 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2475 don't have to worry about what the hardware does in that case. (If
2476 the clz instruction produces the usual value at 0, which is K, the
2477 result of this code sequence will be -1; expand_ffs, below, relies
2478 on this. It might be nice to have it be K instead, for consistency
2479 with the (very few) processors that provide a ctz with a defined
2480 value, but that would take one more instruction, and it would be
2481 less convenient for expand_ffs anyway. */
2484 expand_ctz (scalar_int_mode mode
, rtx op0
, rtx target
)
2489 if (optab_handler (clz_optab
, mode
) == CODE_FOR_nothing
)
2494 temp
= expand_unop_direct (mode
, neg_optab
, op0
, NULL_RTX
, true);
2496 temp
= expand_binop (mode
, and_optab
, op0
, temp
, NULL_RTX
,
2497 true, OPTAB_DIRECT
);
2499 temp
= expand_unop_direct (mode
, clz_optab
, temp
, NULL_RTX
, true);
2501 temp
= expand_binop (mode
, sub_optab
,
2502 gen_int_mode (GET_MODE_PRECISION (mode
) - 1, mode
),
2504 true, OPTAB_DIRECT
);
2514 add_equal_note (seq
, temp
, CTZ
, op0
, 0);
2520 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2521 else with the sequence used by expand_clz.
2523 The ffs builtin promises to return zero for a zero value and ctz/clz
2524 may have an undefined value in that case. If they do not give us a
2525 convenient value, we have to generate a test and branch. */
2527 expand_ffs (scalar_int_mode mode
, rtx op0
, rtx target
)
2529 HOST_WIDE_INT val
= 0;
2530 bool defined_at_zero
= false;
2534 if (optab_handler (ctz_optab
, mode
) != CODE_FOR_nothing
)
2538 temp
= expand_unop_direct (mode
, ctz_optab
, op0
, 0, true);
2542 defined_at_zero
= (CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2);
2544 else if (optab_handler (clz_optab
, mode
) != CODE_FOR_nothing
)
2547 temp
= expand_ctz (mode
, op0
, 0);
2551 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2)
2553 defined_at_zero
= true;
2554 val
= (GET_MODE_PRECISION (mode
) - 1) - val
;
2560 if (defined_at_zero
&& val
== -1)
2561 /* No correction needed at zero. */;
2564 /* We don't try to do anything clever with the situation found
2565 on some processors (eg Alpha) where ctz(0:mode) ==
2566 bitsize(mode). If someone can think of a way to send N to -1
2567 and leave alone all values in the range 0..N-1 (where N is a
2568 power of two), cheaper than this test-and-branch, please add it.
2570 The test-and-branch is done after the operation itself, in case
2571 the operation sets condition codes that can be recycled for this.
2572 (This is true on i386, for instance.) */
2574 rtx_code_label
*nonzero_label
= gen_label_rtx ();
2575 emit_cmp_and_jump_insns (op0
, CONST0_RTX (mode
), NE
, 0,
2576 mode
, true, nonzero_label
);
2578 convert_move (temp
, GEN_INT (-1), false);
2579 emit_label (nonzero_label
);
2582 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2583 to produce a value in the range 0..bitsize. */
2584 temp
= expand_binop (mode
, add_optab
, temp
, gen_int_mode (1, mode
),
2585 target
, false, OPTAB_DIRECT
);
2592 add_equal_note (seq
, temp
, FFS
, op0
, 0);
2601 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2602 conditions, VAL may already be a SUBREG against which we cannot generate
2603 a further SUBREG. In this case, we expect forcing the value into a
2604 register will work around the situation. */
2607 lowpart_subreg_maybe_copy (machine_mode omode
, rtx val
,
2611 ret
= lowpart_subreg (omode
, val
, imode
);
2614 val
= force_reg (imode
, val
);
2615 ret
= lowpart_subreg (omode
, val
, imode
);
2616 gcc_assert (ret
!= NULL
);
2621 /* Expand a floating point absolute value or negation operation via a
2622 logical operation on the sign bit. */
2625 expand_absneg_bit (enum rtx_code code
, scalar_float_mode mode
,
2626 rtx op0
, rtx target
)
2628 const struct real_format
*fmt
;
2629 int bitpos
, word
, nwords
, i
;
2630 scalar_int_mode imode
;
2634 /* The format has to have a simple sign bit. */
2635 fmt
= REAL_MODE_FORMAT (mode
);
2639 bitpos
= fmt
->signbit_rw
;
2643 /* Don't create negative zeros if the format doesn't support them. */
2644 if (code
== NEG
&& !fmt
->has_signed_zero
)
2647 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2649 if (!int_mode_for_mode (mode
).exists (&imode
))
2658 if (FLOAT_WORDS_BIG_ENDIAN
)
2659 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2661 word
= bitpos
/ BITS_PER_WORD
;
2662 bitpos
= bitpos
% BITS_PER_WORD
;
2663 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2666 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
2672 || (nwords
> 1 && !valid_multiword_target_p (target
)))
2673 target
= gen_reg_rtx (mode
);
2679 for (i
= 0; i
< nwords
; ++i
)
2681 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2682 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2686 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2688 immed_wide_int_const (mask
, imode
),
2689 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2690 if (temp
!= targ_piece
)
2691 emit_move_insn (targ_piece
, temp
);
2694 emit_move_insn (targ_piece
, op0_piece
);
2697 insns
= get_insns ();
2704 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2705 gen_lowpart (imode
, op0
),
2706 immed_wide_int_const (mask
, imode
),
2707 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2708 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2710 set_dst_reg_note (get_last_insn (), REG_EQUAL
,
2711 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)),
2718 /* As expand_unop, but will fail rather than attempt the operation in a
2719 different mode or with a libcall. */
2721 expand_unop_direct (machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2724 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
2726 struct expand_operand ops
[2];
2727 enum insn_code icode
= optab_handler (unoptab
, mode
);
2728 rtx_insn
*last
= get_last_insn ();
2731 create_output_operand (&ops
[0], target
, mode
);
2732 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
2733 pat
= maybe_gen_insn (icode
, 2, ops
);
2736 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2737 && ! add_equal_note (pat
, ops
[0].value
,
2738 optab_to_code (unoptab
),
2739 ops
[1].value
, NULL_RTX
))
2741 delete_insns_since (last
);
2742 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2747 return ops
[0].value
;
2753 /* Generate code to perform an operation specified by UNOPTAB
2754 on operand OP0, with result having machine-mode MODE.
2756 UNSIGNEDP is for the case where we have to widen the operands
2757 to perform the operation. It says to use zero-extension.
2759 If TARGET is nonzero, the value
2760 is generated there, if it is convenient to do so.
2761 In all cases an rtx is returned for the locus of the value;
2762 this may or may not be TARGET. */
2765 expand_unop (machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2768 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2769 machine_mode wider_mode
;
2770 scalar_int_mode int_mode
;
2771 scalar_float_mode float_mode
;
2775 temp
= expand_unop_direct (mode
, unoptab
, op0
, target
, unsignedp
);
2779 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2781 /* Widening (or narrowing) clz needs special treatment. */
2782 if (unoptab
== clz_optab
)
2784 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2786 temp
= widen_leading (int_mode
, op0
, target
, unoptab
);
2790 if (GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
2791 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2793 temp
= expand_doubleword_clz (int_mode
, op0
, target
);
2802 if (unoptab
== clrsb_optab
)
2804 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2806 temp
= widen_leading (int_mode
, op0
, target
, unoptab
);
2813 if (unoptab
== popcount_optab
2814 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
2815 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
2816 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
2817 && optimize_insn_for_speed_p ())
2819 temp
= expand_doubleword_popcount (int_mode
, op0
, target
);
2824 if (unoptab
== parity_optab
2825 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
2826 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
2827 && (optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
2828 || optab_handler (popcount_optab
, word_mode
) != CODE_FOR_nothing
)
2829 && optimize_insn_for_speed_p ())
2831 temp
= expand_doubleword_parity (int_mode
, op0
, target
);
2836 /* Widening (or narrowing) bswap needs special treatment. */
2837 if (unoptab
== bswap_optab
)
2839 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
2840 or ROTATERT. First try these directly; if this fails, then try the
2841 obvious pair of shifts with allowed widening, as this will probably
2842 be always more efficient than the other fallback methods. */
2848 if (optab_handler (rotl_optab
, mode
) != CODE_FOR_nothing
)
2850 temp
= expand_binop (mode
, rotl_optab
, op0
,
2851 gen_int_shift_amount (mode
, 8),
2852 target
, unsignedp
, OPTAB_DIRECT
);
2857 if (optab_handler (rotr_optab
, mode
) != CODE_FOR_nothing
)
2859 temp
= expand_binop (mode
, rotr_optab
, op0
,
2860 gen_int_shift_amount (mode
, 8),
2861 target
, unsignedp
, OPTAB_DIRECT
);
2866 last
= get_last_insn ();
2868 temp1
= expand_binop (mode
, ashl_optab
, op0
,
2869 gen_int_shift_amount (mode
, 8), NULL_RTX
,
2870 unsignedp
, OPTAB_WIDEN
);
2871 temp2
= expand_binop (mode
, lshr_optab
, op0
,
2872 gen_int_shift_amount (mode
, 8), NULL_RTX
,
2873 unsignedp
, OPTAB_WIDEN
);
2876 temp
= expand_binop (mode
, ior_optab
, temp1
, temp2
, target
,
2877 unsignedp
, OPTAB_WIDEN
);
2882 delete_insns_since (last
);
2885 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2887 temp
= widen_bswap (int_mode
, op0
, target
);
2891 if (GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
2892 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2894 temp
= expand_doubleword_bswap (mode
, op0
, target
);
2903 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2904 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2906 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2909 rtx_insn
*last
= get_last_insn ();
2911 /* For certain operations, we need not actually extend
2912 the narrow operand, as long as we will truncate the
2913 results to the same narrowness. */
2915 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2916 (unoptab
== neg_optab
2917 || unoptab
== one_cmpl_optab
)
2918 && mclass
== MODE_INT
);
2920 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2925 if (mclass
!= MODE_INT
2926 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2929 target
= gen_reg_rtx (mode
);
2930 convert_move (target
, temp
, 0);
2934 return gen_lowpart (mode
, temp
);
2937 delete_insns_since (last
);
2941 /* These can be done a word at a time. */
2942 if (unoptab
== one_cmpl_optab
2943 && is_int_mode (mode
, &int_mode
)
2944 && GET_MODE_SIZE (int_mode
) > UNITS_PER_WORD
2945 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
2950 if (target
== 0 || target
== op0
|| !valid_multiword_target_p (target
))
2951 target
= gen_reg_rtx (int_mode
);
2955 /* Do the actual arithmetic. */
2956 for (i
= 0; i
< GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
; i
++)
2958 rtx target_piece
= operand_subword (target
, i
, 1, int_mode
);
2959 rtx x
= expand_unop (word_mode
, unoptab
,
2960 operand_subword_force (op0
, i
, int_mode
),
2961 target_piece
, unsignedp
);
2963 if (target_piece
!= x
)
2964 emit_move_insn (target_piece
, x
);
2967 insns
= get_insns ();
2974 if (optab_to_code (unoptab
) == NEG
)
2976 /* Try negating floating point values by flipping the sign bit. */
2977 if (is_a
<scalar_float_mode
> (mode
, &float_mode
))
2979 temp
= expand_absneg_bit (NEG
, float_mode
, op0
, target
);
2984 /* If there is no negation pattern, and we have no negative zero,
2985 try subtracting from zero. */
2986 if (!HONOR_SIGNED_ZEROS (mode
))
2988 temp
= expand_binop (mode
, (unoptab
== negv_optab
2989 ? subv_optab
: sub_optab
),
2990 CONST0_RTX (mode
), op0
, target
,
2991 unsignedp
, OPTAB_DIRECT
);
2997 /* Try calculating parity (x) as popcount (x) % 2. */
2998 if (unoptab
== parity_optab
&& is_a
<scalar_int_mode
> (mode
, &int_mode
))
3000 temp
= expand_parity (int_mode
, op0
, target
);
3005 /* Try implementing ffs (x) in terms of clz (x). */
3006 if (unoptab
== ffs_optab
&& is_a
<scalar_int_mode
> (mode
, &int_mode
))
3008 temp
= expand_ffs (int_mode
, op0
, target
);
3013 /* Try implementing ctz (x) in terms of clz (x). */
3014 if (unoptab
== ctz_optab
&& is_a
<scalar_int_mode
> (mode
, &int_mode
))
3016 temp
= expand_ctz (int_mode
, op0
, target
);
3022 /* Now try a library call in this mode. */
3023 libfunc
= optab_libfunc (unoptab
, mode
);
3029 machine_mode outmode
= mode
;
3031 /* All of these functions return small values. Thus we choose to
3032 have them return something that isn't a double-word. */
3033 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
3034 || unoptab
== clrsb_optab
|| unoptab
== popcount_optab
3035 || unoptab
== parity_optab
)
3037 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
),
3038 optab_libfunc (unoptab
, mode
)));
3042 /* Pass 1 for NO_QUEUE so we don't lose any increments
3043 if the libcall is cse'd or moved. */
3044 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, outmode
,
3046 insns
= get_insns ();
3049 target
= gen_reg_rtx (outmode
);
3050 bool trapv
= trapv_unoptab_p (unoptab
);
3052 eq_value
= NULL_RTX
;
3055 eq_value
= gen_rtx_fmt_e (optab_to_code (unoptab
), mode
, op0
);
3056 if (GET_MODE_UNIT_SIZE (outmode
) < GET_MODE_UNIT_SIZE (mode
))
3057 eq_value
= simplify_gen_unary (TRUNCATE
, outmode
, eq_value
, mode
);
3058 else if (GET_MODE_UNIT_SIZE (outmode
) > GET_MODE_UNIT_SIZE (mode
))
3059 eq_value
= simplify_gen_unary (ZERO_EXTEND
,
3060 outmode
, eq_value
, mode
);
3062 emit_libcall_block_1 (insns
, target
, value
, eq_value
, trapv
);
3067 /* It can't be done in this mode. Can we do it in a wider mode? */
3069 if (CLASS_HAS_WIDER_MODES_P (mclass
))
3071 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
3073 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
3074 || optab_libfunc (unoptab
, wider_mode
))
3077 rtx_insn
*last
= get_last_insn ();
3079 /* For certain operations, we need not actually extend
3080 the narrow operand, as long as we will truncate the
3081 results to the same narrowness. */
3082 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3083 (unoptab
== neg_optab
3084 || unoptab
== one_cmpl_optab
3085 || unoptab
== bswap_optab
)
3086 && mclass
== MODE_INT
);
3088 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3091 /* If we are generating clz using wider mode, adjust the
3092 result. Similarly for clrsb. */
3093 if ((unoptab
== clz_optab
|| unoptab
== clrsb_optab
)
3096 scalar_int_mode wider_int_mode
3097 = as_a
<scalar_int_mode
> (wider_mode
);
3098 int_mode
= as_a
<scalar_int_mode
> (mode
);
3100 (wider_mode
, sub_optab
, temp
,
3101 gen_int_mode (GET_MODE_PRECISION (wider_int_mode
)
3102 - GET_MODE_PRECISION (int_mode
),
3104 target
, true, OPTAB_DIRECT
);
3107 /* Likewise for bswap. */
3108 if (unoptab
== bswap_optab
&& temp
!= 0)
3110 scalar_int_mode wider_int_mode
3111 = as_a
<scalar_int_mode
> (wider_mode
);
3112 int_mode
= as_a
<scalar_int_mode
> (mode
);
3113 gcc_assert (GET_MODE_PRECISION (wider_int_mode
)
3114 == GET_MODE_BITSIZE (wider_int_mode
)
3115 && GET_MODE_PRECISION (int_mode
)
3116 == GET_MODE_BITSIZE (int_mode
));
3118 temp
= expand_shift (RSHIFT_EXPR
, wider_int_mode
, temp
,
3119 GET_MODE_BITSIZE (wider_int_mode
)
3120 - GET_MODE_BITSIZE (int_mode
),
3126 if (mclass
!= MODE_INT
)
3129 target
= gen_reg_rtx (mode
);
3130 convert_move (target
, temp
, 0);
3134 return gen_lowpart (mode
, temp
);
3137 delete_insns_since (last
);
3142 /* One final attempt at implementing negation via subtraction,
3143 this time allowing widening of the operand. */
3144 if (optab_to_code (unoptab
) == NEG
&& !HONOR_SIGNED_ZEROS (mode
))
3147 temp
= expand_binop (mode
,
3148 unoptab
== negv_optab
? subv_optab
: sub_optab
,
3149 CONST0_RTX (mode
), op0
,
3150 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3158 /* Emit code to compute the absolute value of OP0, with result to
3159 TARGET if convenient. (TARGET may be 0.) The return value says
3160 where the result actually is to be found.
3162 MODE is the mode of the operand; the mode of the result is
3163 different but can be deduced from MODE.
3168 expand_abs_nojump (machine_mode mode
, rtx op0
, rtx target
,
3169 int result_unsignedp
)
3173 if (GET_MODE_CLASS (mode
) != MODE_INT
3175 result_unsignedp
= 1;
3177 /* First try to do it with a special abs instruction. */
3178 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
3183 /* For floating point modes, try clearing the sign bit. */
3184 scalar_float_mode float_mode
;
3185 if (is_a
<scalar_float_mode
> (mode
, &float_mode
))
3187 temp
= expand_absneg_bit (ABS
, float_mode
, op0
, target
);
3192 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3193 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
3194 && !HONOR_SIGNED_ZEROS (mode
))
3196 rtx_insn
*last
= get_last_insn ();
3198 temp
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3201 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3207 delete_insns_since (last
);
3210 /* If this machine has expensive jumps, we can do integer absolute
3211 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3212 where W is the width of MODE. */
3214 scalar_int_mode int_mode
;
3215 if (is_int_mode (mode
, &int_mode
)
3216 && BRANCH_COST (optimize_insn_for_speed_p (),
3219 rtx extended
= expand_shift (RSHIFT_EXPR
, int_mode
, op0
,
3220 GET_MODE_PRECISION (int_mode
) - 1,
3223 temp
= expand_binop (int_mode
, xor_optab
, extended
, op0
, target
, 0,
3226 temp
= expand_binop (int_mode
,
3227 result_unsignedp
? sub_optab
: subv_optab
,
3228 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
3238 expand_abs (machine_mode mode
, rtx op0
, rtx target
,
3239 int result_unsignedp
, int safe
)
3242 rtx_code_label
*op1
;
3244 if (GET_MODE_CLASS (mode
) != MODE_INT
3246 result_unsignedp
= 1;
3248 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
3252 /* If that does not win, use conditional jump and negate. */
3254 /* It is safe to use the target if it is the same
3255 as the source if this is also a pseudo register */
3256 if (op0
== target
&& REG_P (op0
)
3257 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
3260 op1
= gen_label_rtx ();
3261 if (target
== 0 || ! safe
3262 || GET_MODE (target
) != mode
3263 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
3265 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
3266 target
= gen_reg_rtx (mode
);
3268 emit_move_insn (target
, op0
);
3271 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
3272 NULL_RTX
, NULL
, op1
,
3273 profile_probability::uninitialized ());
3275 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3278 emit_move_insn (target
, op0
);
3284 /* Emit code to compute the one's complement absolute value of OP0
3285 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3286 (TARGET may be NULL_RTX.) The return value says where the result
3287 actually is to be found.
3289 MODE is the mode of the operand; the mode of the result is
3290 different but can be deduced from MODE. */
3293 expand_one_cmpl_abs_nojump (machine_mode mode
, rtx op0
, rtx target
)
3297 /* Not applicable for floating point modes. */
3298 if (FLOAT_MODE_P (mode
))
3301 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3302 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
)
3304 rtx_insn
*last
= get_last_insn ();
3306 temp
= expand_unop (mode
, one_cmpl_optab
, op0
, NULL_RTX
, 0);
3308 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3314 delete_insns_since (last
);
3317 /* If this machine has expensive jumps, we can do one's complement
3318 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3320 scalar_int_mode int_mode
;
3321 if (is_int_mode (mode
, &int_mode
)
3322 && BRANCH_COST (optimize_insn_for_speed_p (),
3325 rtx extended
= expand_shift (RSHIFT_EXPR
, int_mode
, op0
,
3326 GET_MODE_PRECISION (int_mode
) - 1,
3329 temp
= expand_binop (int_mode
, xor_optab
, extended
, op0
, target
, 0,
3339 /* A subroutine of expand_copysign, perform the copysign operation using the
3340 abs and neg primitives advertised to exist on the target. The assumption
3341 is that we have a split register file, and leaving op0 in fp registers,
3342 and not playing with subregs so much, will help the register allocator. */
3345 expand_copysign_absneg (scalar_float_mode mode
, rtx op0
, rtx op1
, rtx target
,
3346 int bitpos
, bool op0_is_abs
)
3348 scalar_int_mode imode
;
3349 enum insn_code icode
;
3351 rtx_code_label
*label
;
3356 /* Check if the back end provides an insn that handles signbit for the
3358 icode
= optab_handler (signbit_optab
, mode
);
3359 if (icode
!= CODE_FOR_nothing
)
3361 imode
= as_a
<scalar_int_mode
> (insn_data
[(int) icode
].operand
[0].mode
);
3362 sign
= gen_reg_rtx (imode
);
3363 emit_unop_insn (icode
, sign
, op1
, UNKNOWN
);
3367 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3369 if (!int_mode_for_mode (mode
).exists (&imode
))
3371 op1
= gen_lowpart (imode
, op1
);
3378 if (FLOAT_WORDS_BIG_ENDIAN
)
3379 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3381 word
= bitpos
/ BITS_PER_WORD
;
3382 bitpos
= bitpos
% BITS_PER_WORD
;
3383 op1
= operand_subword_force (op1
, word
, mode
);
3386 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3387 sign
= expand_binop (imode
, and_optab
, op1
,
3388 immed_wide_int_const (mask
, imode
),
3389 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3394 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
3401 if (target
== NULL_RTX
)
3402 target
= copy_to_reg (op0
);
3404 emit_move_insn (target
, op0
);
3407 label
= gen_label_rtx ();
3408 emit_cmp_and_jump_insns (sign
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
3410 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
3411 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3413 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
3415 emit_move_insn (target
, op0
);
3423 /* A subroutine of expand_copysign, perform the entire copysign operation
3424 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3425 is true if op0 is known to have its sign bit clear. */
3428 expand_copysign_bit (scalar_float_mode mode
, rtx op0
, rtx op1
, rtx target
,
3429 int bitpos
, bool op0_is_abs
)
3431 scalar_int_mode imode
;
3432 int word
, nwords
, i
;
3436 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3438 if (!int_mode_for_mode (mode
).exists (&imode
))
3447 if (FLOAT_WORDS_BIG_ENDIAN
)
3448 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3450 word
= bitpos
/ BITS_PER_WORD
;
3451 bitpos
= bitpos
% BITS_PER_WORD
;
3452 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3455 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3460 || (nwords
> 1 && !valid_multiword_target_p (target
)))
3461 target
= gen_reg_rtx (mode
);
3467 for (i
= 0; i
< nwords
; ++i
)
3469 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3470 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3476 = expand_binop (imode
, and_optab
, op0_piece
,
3477 immed_wide_int_const (~mask
, imode
),
3478 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3479 op1
= expand_binop (imode
, and_optab
,
3480 operand_subword_force (op1
, i
, mode
),
3481 immed_wide_int_const (mask
, imode
),
3482 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3484 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
3485 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3486 if (temp
!= targ_piece
)
3487 emit_move_insn (targ_piece
, temp
);
3490 emit_move_insn (targ_piece
, op0_piece
);
3493 insns
= get_insns ();
3500 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
3501 immed_wide_int_const (mask
, imode
),
3502 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3504 op0
= gen_lowpart (imode
, op0
);
3506 op0
= expand_binop (imode
, and_optab
, op0
,
3507 immed_wide_int_const (~mask
, imode
),
3508 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3510 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
3511 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3512 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3518 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3519 scalar floating point mode. Return NULL if we do not know how to
3520 expand the operation inline. */
3523 expand_copysign (rtx op0
, rtx op1
, rtx target
)
3525 scalar_float_mode mode
;
3526 const struct real_format
*fmt
;
3530 mode
= as_a
<scalar_float_mode
> (GET_MODE (op0
));
3531 gcc_assert (GET_MODE (op1
) == mode
);
3533 /* First try to do it with a special instruction. */
3534 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
3535 target
, 0, OPTAB_DIRECT
);
3539 fmt
= REAL_MODE_FORMAT (mode
);
3540 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
3544 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
3546 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
3547 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
3551 if (fmt
->signbit_ro
>= 0
3552 && (CONST_DOUBLE_AS_FLOAT_P (op0
)
3553 || (optab_handler (neg_optab
, mode
) != CODE_FOR_nothing
3554 && optab_handler (abs_optab
, mode
) != CODE_FOR_nothing
)))
3556 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
3557 fmt
->signbit_ro
, op0_is_abs
);
3562 if (fmt
->signbit_rw
< 0)
3564 return expand_copysign_bit (mode
, op0
, op1
, target
,
3565 fmt
->signbit_rw
, op0_is_abs
);
3568 /* Generate an instruction whose insn-code is INSN_CODE,
3569 with two operands: an output TARGET and an input OP0.
3570 TARGET *must* be nonzero, and the output is always stored there.
3571 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3572 the value that is stored into TARGET.
3574 Return false if expansion failed. */
3577 maybe_emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
,
3580 struct expand_operand ops
[2];
3583 create_output_operand (&ops
[0], target
, GET_MODE (target
));
3584 create_input_operand (&ops
[1], op0
, GET_MODE (op0
));
3585 pat
= maybe_gen_insn (icode
, 2, ops
);
3589 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
3591 add_equal_note (pat
, ops
[0].value
, code
, ops
[1].value
, NULL_RTX
);
3595 if (ops
[0].value
!= target
)
3596 emit_move_insn (target
, ops
[0].value
);
3599 /* Generate an instruction whose insn-code is INSN_CODE,
3600 with two operands: an output TARGET and an input OP0.
3601 TARGET *must* be nonzero, and the output is always stored there.
3602 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3603 the value that is stored into TARGET. */
3606 emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
, enum rtx_code code
)
3608 bool ok
= maybe_emit_unop_insn (icode
, target
, op0
, code
);
3612 struct no_conflict_data
3615 rtx_insn
*first
, *insn
;
3619 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3620 the currently examined clobber / store has to stay in the list of
3621 insns that constitute the actual libcall block. */
3623 no_conflict_move_test (rtx dest
, const_rtx set
, void *p0
)
3625 struct no_conflict_data
*p
= (struct no_conflict_data
*) p0
;
3627 /* If this inns directly contributes to setting the target, it must stay. */
3628 if (reg_overlap_mentioned_p (p
->target
, dest
))
3629 p
->must_stay
= true;
3630 /* If we haven't committed to keeping any other insns in the list yet,
3631 there is nothing more to check. */
3632 else if (p
->insn
== p
->first
)
3634 /* If this insn sets / clobbers a register that feeds one of the insns
3635 already in the list, this insn has to stay too. */
3636 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
3637 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
3638 || reg_used_between_p (dest
, p
->first
, p
->insn
)
3639 /* Likewise if this insn depends on a register set by a previous
3640 insn in the list, or if it sets a result (presumably a hard
3641 register) that is set or clobbered by a previous insn.
3642 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3643 SET_DEST perform the former check on the address, and the latter
3644 check on the MEM. */
3645 || (GET_CODE (set
) == SET
3646 && (modified_in_p (SET_SRC (set
), p
->first
)
3647 || modified_in_p (SET_DEST (set
), p
->first
)
3648 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
3649 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
3650 p
->must_stay
= true;
3654 /* Emit code to make a call to a constant function or a library call.
3656 INSNS is a list containing all insns emitted in the call.
3657 These insns leave the result in RESULT. Our block is to copy RESULT
3658 to TARGET, which is logically equivalent to EQUIV.
3660 We first emit any insns that set a pseudo on the assumption that these are
3661 loading constants into registers; doing so allows them to be safely cse'ed
3662 between blocks. Then we emit all the other insns in the block, followed by
3663 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3664 note with an operand of EQUIV. */
3667 emit_libcall_block_1 (rtx_insn
*insns
, rtx target
, rtx result
, rtx equiv
,
3668 bool equiv_may_trap
)
3670 rtx final_dest
= target
;
3671 rtx_insn
*next
, *last
, *insn
;
3673 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3674 into a MEM later. Protect the libcall block from this change. */
3675 if (! REG_P (target
) || REG_USERVAR_P (target
))
3676 target
= gen_reg_rtx (GET_MODE (target
));
3678 /* If we're using non-call exceptions, a libcall corresponding to an
3679 operation that may trap may also trap. */
3680 /* ??? See the comment in front of make_reg_eh_region_note. */
3681 if (cfun
->can_throw_non_call_exceptions
3682 && (equiv_may_trap
|| may_trap_p (equiv
)))
3684 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3687 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3690 int lp_nr
= INTVAL (XEXP (note
, 0));
3691 if (lp_nr
== 0 || lp_nr
== INT_MIN
)
3692 remove_note (insn
, note
);
3698 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3699 reg note to indicate that this call cannot throw or execute a nonlocal
3700 goto (unless there is already a REG_EH_REGION note, in which case
3702 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3704 make_reg_eh_region_note_nothrow_nononlocal (insn
);
3707 /* First emit all insns that set pseudos. Remove them from the list as
3708 we go. Avoid insns that set pseudos which were referenced in previous
3709 insns. These can be generated by move_by_pieces, for example,
3710 to update an address. Similarly, avoid insns that reference things
3711 set in previous insns. */
3713 for (insn
= insns
; insn
; insn
= next
)
3715 rtx set
= single_set (insn
);
3717 next
= NEXT_INSN (insn
);
3719 if (set
!= 0 && REG_P (SET_DEST (set
))
3720 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3722 struct no_conflict_data data
;
3724 data
.target
= const0_rtx
;
3728 note_stores (PATTERN (insn
), no_conflict_move_test
, &data
);
3729 if (! data
.must_stay
)
3731 if (PREV_INSN (insn
))
3732 SET_NEXT_INSN (PREV_INSN (insn
)) = next
;
3737 SET_PREV_INSN (next
) = PREV_INSN (insn
);
3743 /* Some ports use a loop to copy large arguments onto the stack.
3744 Don't move anything outside such a loop. */
3749 /* Write the remaining insns followed by the final copy. */
3750 for (insn
= insns
; insn
; insn
= next
)
3752 next
= NEXT_INSN (insn
);
3757 last
= emit_move_insn (target
, result
);
3759 set_dst_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
), target
);
3761 if (final_dest
!= target
)
3762 emit_move_insn (final_dest
, target
);
3766 emit_libcall_block (rtx_insn
*insns
, rtx target
, rtx result
, rtx equiv
)
3768 emit_libcall_block_1 (insns
, target
, result
, equiv
, false);
3771 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3772 PURPOSE describes how this comparison will be used. CODE is the rtx
3773 comparison code we will be using.
3775 ??? Actually, CODE is slightly weaker than that. A target is still
3776 required to implement all of the normal bcc operations, but not
3777 required to implement all (or any) of the unordered bcc operations. */
3780 can_compare_p (enum rtx_code code
, machine_mode mode
,
3781 enum can_compare_purpose purpose
)
3784 test
= gen_rtx_fmt_ee (code
, mode
, const0_rtx
, const0_rtx
);
3787 enum insn_code icode
;
3789 if (purpose
== ccp_jump
3790 && (icode
= optab_handler (cbranch_optab
, mode
)) != CODE_FOR_nothing
3791 && insn_operand_matches (icode
, 0, test
))
3793 if (purpose
== ccp_store_flag
3794 && (icode
= optab_handler (cstore_optab
, mode
)) != CODE_FOR_nothing
3795 && insn_operand_matches (icode
, 1, test
))
3797 if (purpose
== ccp_cmov
3798 && optab_handler (cmov_optab
, mode
) != CODE_FOR_nothing
)
3801 mode
= GET_MODE_WIDER_MODE (mode
).else_void ();
3802 PUT_MODE (test
, mode
);
3804 while (mode
!= VOIDmode
);
3809 /* This function is called when we are going to emit a compare instruction that
3810 compares the values found in X and Y, using the rtl operator COMPARISON.
3812 If they have mode BLKmode, then SIZE specifies the size of both operands.
3814 UNSIGNEDP nonzero says that the operands are unsigned;
3815 this matters if they need to be widened (as given by METHODS).
3817 *PTEST is where the resulting comparison RTX is returned or NULL_RTX
3818 if we failed to produce one.
3820 *PMODE is the mode of the inputs (in case they are const_int).
3822 This function performs all the setup necessary so that the caller only has
3823 to emit a single comparison insn. This setup can involve doing a BLKmode
3824 comparison or emitting a library call to perform the comparison if no insn
3825 is available to handle it.
3826 The values which are passed in through pointers can be modified; the caller
3827 should perform the comparison on the modified values. Constant
3828 comparisons must have already been folded. */
3831 prepare_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3832 int unsignedp
, enum optab_methods methods
,
3833 rtx
*ptest
, machine_mode
*pmode
)
3835 machine_mode mode
= *pmode
;
3837 machine_mode cmp_mode
;
3838 enum mode_class mclass
;
3840 /* The other methods are not needed. */
3841 gcc_assert (methods
== OPTAB_DIRECT
|| methods
== OPTAB_WIDEN
3842 || methods
== OPTAB_LIB_WIDEN
);
3844 if (CONST_SCALAR_INT_P (y
))
3845 canonicalize_comparison (mode
, &comparison
, &y
);
3847 /* If we are optimizing, force expensive constants into a register. */
3848 if (CONSTANT_P (x
) && optimize
3849 && (rtx_cost (x
, mode
, COMPARE
, 0, optimize_insn_for_speed_p ())
3850 > COSTS_N_INSNS (1)))
3851 x
= force_reg (mode
, x
);
3853 if (CONSTANT_P (y
) && optimize
3854 && (rtx_cost (y
, mode
, COMPARE
, 1, optimize_insn_for_speed_p ())
3855 > COSTS_N_INSNS (1)))
3856 y
= force_reg (mode
, y
);
3859 /* Make sure if we have a canonical comparison. The RTL
3860 documentation states that canonical comparisons are required only
3861 for targets which have cc0. */
3862 gcc_assert (!CONSTANT_P (x
) || CONSTANT_P (y
));
3865 /* Don't let both operands fail to indicate the mode. */
3866 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3867 x
= force_reg (mode
, x
);
3868 if (mode
== VOIDmode
)
3869 mode
= GET_MODE (x
) != VOIDmode
? GET_MODE (x
) : GET_MODE (y
);
3871 /* Handle all BLKmode compares. */
3873 if (mode
== BLKmode
)
3875 machine_mode result_mode
;
3876 enum insn_code cmp_code
;
3879 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3883 /* Try to use a memory block compare insn - either cmpstr
3884 or cmpmem will do. */
3885 opt_scalar_int_mode cmp_mode_iter
;
3886 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter
, MODE_INT
)
3888 scalar_int_mode cmp_mode
= cmp_mode_iter
.require ();
3889 cmp_code
= direct_optab_handler (cmpmem_optab
, cmp_mode
);
3890 if (cmp_code
== CODE_FOR_nothing
)
3891 cmp_code
= direct_optab_handler (cmpstr_optab
, cmp_mode
);
3892 if (cmp_code
== CODE_FOR_nothing
)
3893 cmp_code
= direct_optab_handler (cmpstrn_optab
, cmp_mode
);
3894 if (cmp_code
== CODE_FOR_nothing
)
3897 /* Must make sure the size fits the insn's mode. */
3898 if (CONST_INT_P (size
)
3899 ? INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
))
3900 : (GET_MODE_BITSIZE (as_a
<scalar_int_mode
> (GET_MODE (size
)))
3901 > GET_MODE_BITSIZE (cmp_mode
)))
3904 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3905 result
= gen_reg_rtx (result_mode
);
3906 size
= convert_to_mode (cmp_mode
, size
, 1);
3907 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3909 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, result
, const0_rtx
);
3910 *pmode
= result_mode
;
3914 if (methods
!= OPTAB_LIB
&& methods
!= OPTAB_LIB_WIDEN
)
3917 /* Otherwise call a library function. */
3918 result
= emit_block_comp_via_libcall (XEXP (x
, 0), XEXP (y
, 0), size
);
3922 mode
= TYPE_MODE (integer_type_node
);
3923 methods
= OPTAB_LIB_WIDEN
;
3927 /* Don't allow operands to the compare to trap, as that can put the
3928 compare and branch in different basic blocks. */
3929 if (cfun
->can_throw_non_call_exceptions
)
3932 x
= copy_to_reg (x
);
3934 y
= copy_to_reg (y
);
3937 if (GET_MODE_CLASS (mode
) == MODE_CC
)
3939 enum insn_code icode
= optab_handler (cbranch_optab
, CCmode
);
3940 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
3941 gcc_assert (icode
!= CODE_FOR_nothing
3942 && insn_operand_matches (icode
, 0, test
));
3947 mclass
= GET_MODE_CLASS (mode
);
3948 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
3949 FOR_EACH_MODE_FROM (cmp_mode
, mode
)
3951 enum insn_code icode
;
3952 icode
= optab_handler (cbranch_optab
, cmp_mode
);
3953 if (icode
!= CODE_FOR_nothing
3954 && insn_operand_matches (icode
, 0, test
))
3956 rtx_insn
*last
= get_last_insn ();
3957 rtx op0
= prepare_operand (icode
, x
, 1, mode
, cmp_mode
, unsignedp
);
3958 rtx op1
= prepare_operand (icode
, y
, 2, mode
, cmp_mode
, unsignedp
);
3960 && insn_operand_matches (icode
, 1, op0
)
3961 && insn_operand_matches (icode
, 2, op1
))
3963 XEXP (test
, 0) = op0
;
3964 XEXP (test
, 1) = op1
;
3969 delete_insns_since (last
);
3972 if (methods
== OPTAB_DIRECT
|| !CLASS_HAS_WIDER_MODES_P (mclass
))
3976 if (methods
!= OPTAB_LIB_WIDEN
)
3979 if (SCALAR_FLOAT_MODE_P (mode
))
3981 /* Small trick if UNORDERED isn't implemented by the hardware. */
3982 if (comparison
== UNORDERED
&& rtx_equal_p (x
, y
))
3984 prepare_cmp_insn (x
, y
, UNLT
, NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
3990 prepare_float_lib_cmp (x
, y
, comparison
, ptest
, pmode
);
3995 machine_mode ret_mode
;
3997 /* Handle a libcall just for the mode we are using. */
3998 libfunc
= optab_libfunc (cmp_optab
, mode
);
3999 gcc_assert (libfunc
);
4001 /* If we want unsigned, and this mode has a distinct unsigned
4002 comparison routine, use that. */
4005 rtx ulibfunc
= optab_libfunc (ucmp_optab
, mode
);
4010 ret_mode
= targetm
.libgcc_cmp_return_mode ();
4011 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4012 ret_mode
, x
, mode
, y
, mode
);
4014 /* There are two kinds of comparison routines. Biased routines
4015 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4016 of gcc expect that the comparison operation is equivalent
4017 to the modified comparison. For signed comparisons compare the
4018 result against 1 in the biased case, and zero in the unbiased
4019 case. For unsigned comparisons always compare against 1 after
4020 biasing the unbiased result by adding 1. This gives us a way to
4022 The comparisons in the fixed-point helper library are always
4027 if (!TARGET_LIB_INT_CMP_BIASED
&& !ALL_FIXED_POINT_MODE_P (mode
))
4030 x
= plus_constant (ret_mode
, result
, 1);
4036 prepare_cmp_insn (x
, y
, comparison
, NULL_RTX
, unsignedp
, methods
,
4046 /* Before emitting an insn with code ICODE, make sure that X, which is going
4047 to be used for operand OPNUM of the insn, is converted from mode MODE to
4048 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4049 that it is accepted by the operand predicate. Return the new value. */
4052 prepare_operand (enum insn_code icode
, rtx x
, int opnum
, machine_mode mode
,
4053 machine_mode wider_mode
, int unsignedp
)
4055 if (mode
!= wider_mode
)
4056 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
4058 if (!insn_operand_matches (icode
, opnum
, x
))
4060 machine_mode op_mode
= insn_data
[(int) icode
].operand
[opnum
].mode
;
4061 if (reload_completed
)
4063 if (GET_MODE (x
) != op_mode
&& GET_MODE (x
) != VOIDmode
)
4065 x
= copy_to_mode_reg (op_mode
, x
);
4071 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4072 we can do the branch. */
4075 emit_cmp_and_jump_insn_1 (rtx test
, machine_mode mode
, rtx label
,
4076 profile_probability prob
)
4078 machine_mode optab_mode
;
4079 enum mode_class mclass
;
4080 enum insn_code icode
;
4083 mclass
= GET_MODE_CLASS (mode
);
4084 optab_mode
= (mclass
== MODE_CC
) ? CCmode
: mode
;
4085 icode
= optab_handler (cbranch_optab
, optab_mode
);
4087 gcc_assert (icode
!= CODE_FOR_nothing
);
4088 gcc_assert (insn_operand_matches (icode
, 0, test
));
4089 insn
= emit_jump_insn (GEN_FCN (icode
) (test
, XEXP (test
, 0),
4090 XEXP (test
, 1), label
));
4091 if (prob
.initialized_p ()
4092 && profile_status_for_fn (cfun
) != PROFILE_ABSENT
4095 && any_condjump_p (insn
)
4096 && !find_reg_note (insn
, REG_BR_PROB
, 0))
4097 add_reg_br_prob_note (insn
, prob
);
4100 /* Generate code to compare X with Y so that the condition codes are
4101 set and to jump to LABEL if the condition is true. If X is a
4102 constant and Y is not a constant, then the comparison is swapped to
4103 ensure that the comparison RTL has the canonical form.
4105 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4106 need to be widened. UNSIGNEDP is also used to select the proper
4107 branch condition code.
4109 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4111 MODE is the mode of the inputs (in case they are const_int).
4113 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4114 It will be potentially converted into an unsigned variant based on
4115 UNSIGNEDP to select a proper jump instruction.
4117 PROB is the probability of jumping to LABEL. */
4120 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4121 machine_mode mode
, int unsignedp
, rtx label
,
4122 profile_probability prob
)
4124 rtx op0
= x
, op1
= y
;
4127 /* Swap operands and condition to ensure canonical RTL. */
4128 if (swap_commutative_operands_p (x
, y
)
4129 && can_compare_p (swap_condition (comparison
), mode
, ccp_jump
))
4132 comparison
= swap_condition (comparison
);
4135 /* If OP0 is still a constant, then both X and Y must be constants
4136 or the opposite comparison is not supported. Force X into a register
4137 to create canonical RTL. */
4138 if (CONSTANT_P (op0
))
4139 op0
= force_reg (mode
, op0
);
4142 comparison
= unsigned_condition (comparison
);
4144 prepare_cmp_insn (op0
, op1
, comparison
, size
, unsignedp
, OPTAB_LIB_WIDEN
,
4146 emit_cmp_and_jump_insn_1 (test
, mode
, label
, prob
);
4150 /* Emit a library call comparison between floating point X and Y.
4151 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4154 prepare_float_lib_cmp (rtx x
, rtx y
, enum rtx_code comparison
,
4155 rtx
*ptest
, machine_mode
*pmode
)
4157 enum rtx_code swapped
= swap_condition (comparison
);
4158 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
4159 machine_mode orig_mode
= GET_MODE (x
);
4161 rtx true_rtx
, false_rtx
;
4162 rtx value
, target
, equiv
;
4165 bool reversed_p
= false;
4166 scalar_int_mode cmp_mode
= targetm
.libgcc_cmp_return_mode ();
4168 FOR_EACH_MODE_FROM (mode
, orig_mode
)
4170 if (code_to_optab (comparison
)
4171 && (libfunc
= optab_libfunc (code_to_optab (comparison
), mode
)))
4174 if (code_to_optab (swapped
)
4175 && (libfunc
= optab_libfunc (code_to_optab (swapped
), mode
)))
4178 comparison
= swapped
;
4182 if (code_to_optab (reversed
)
4183 && (libfunc
= optab_libfunc (code_to_optab (reversed
), mode
)))
4185 comparison
= reversed
;
4191 gcc_assert (mode
!= VOIDmode
);
4193 if (mode
!= orig_mode
)
4195 x
= convert_to_mode (mode
, x
, 0);
4196 y
= convert_to_mode (mode
, y
, 0);
4199 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4200 the RTL. The allows the RTL optimizers to delete the libcall if the
4201 condition can be determined at compile-time. */
4202 if (comparison
== UNORDERED
4203 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4205 true_rtx
= const_true_rtx
;
4206 false_rtx
= const0_rtx
;
4213 true_rtx
= const0_rtx
;
4214 false_rtx
= const_true_rtx
;
4218 true_rtx
= const_true_rtx
;
4219 false_rtx
= const0_rtx
;
4223 true_rtx
= const1_rtx
;
4224 false_rtx
= const0_rtx
;
4228 true_rtx
= const0_rtx
;
4229 false_rtx
= constm1_rtx
;
4233 true_rtx
= constm1_rtx
;
4234 false_rtx
= const0_rtx
;
4238 true_rtx
= const0_rtx
;
4239 false_rtx
= const1_rtx
;
4247 if (comparison
== UNORDERED
)
4249 rtx temp
= simplify_gen_relational (NE
, cmp_mode
, mode
, x
, x
);
4250 equiv
= simplify_gen_relational (NE
, cmp_mode
, mode
, y
, y
);
4251 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4252 temp
, const_true_rtx
, equiv
);
4256 equiv
= simplify_gen_relational (comparison
, cmp_mode
, mode
, x
, y
);
4257 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4258 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4259 equiv
, true_rtx
, false_rtx
);
4263 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4264 cmp_mode
, x
, mode
, y
, mode
);
4265 insns
= get_insns ();
4268 target
= gen_reg_rtx (cmp_mode
);
4269 emit_libcall_block (insns
, target
, value
, equiv
);
4271 if (comparison
== UNORDERED
4272 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
)
4274 *ptest
= gen_rtx_fmt_ee (reversed_p
? EQ
: NE
, VOIDmode
, target
, false_rtx
);
4276 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, target
, const0_rtx
);
4281 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4284 emit_indirect_jump (rtx loc
)
4286 if (!targetm
.have_indirect_jump ())
4287 sorry ("indirect jumps are not available on this target");
4290 struct expand_operand ops
[1];
4291 create_address_operand (&ops
[0], loc
);
4292 expand_jump_insn (targetm
.code_for_indirect_jump
, 1, ops
);
4298 /* Emit a conditional move instruction if the machine supports one for that
4299 condition and machine mode.
4301 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4302 the mode to use should they be constants. If it is VOIDmode, they cannot
4305 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4306 should be stored there. MODE is the mode to use should they be constants.
4307 If it is VOIDmode, they cannot both be constants.
4309 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4310 is not supported. */
4313 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4314 machine_mode cmode
, rtx op2
, rtx op3
,
4315 machine_mode mode
, int unsignedp
)
4319 enum insn_code icode
;
4320 enum rtx_code reversed
;
4322 /* If the two source operands are identical, that's just a move. */
4324 if (rtx_equal_p (op2
, op3
))
4327 target
= gen_reg_rtx (mode
);
4329 emit_move_insn (target
, op3
);
4333 /* If one operand is constant, make it the second one. Only do this
4334 if the other operand is not constant as well. */
4336 if (swap_commutative_operands_p (op0
, op1
))
4338 std::swap (op0
, op1
);
4339 code
= swap_condition (code
);
4342 /* get_condition will prefer to generate LT and GT even if the old
4343 comparison was against zero, so undo that canonicalization here since
4344 comparisons against zero are cheaper. */
4345 if (code
== LT
&& op1
== const1_rtx
)
4346 code
= LE
, op1
= const0_rtx
;
4347 else if (code
== GT
&& op1
== constm1_rtx
)
4348 code
= GE
, op1
= const0_rtx
;
4350 if (cmode
== VOIDmode
)
4351 cmode
= GET_MODE (op0
);
4353 enum rtx_code orig_code
= code
;
4354 bool swapped
= false;
4355 if (swap_commutative_operands_p (op2
, op3
)
4356 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4359 std::swap (op2
, op3
);
4364 if (mode
== VOIDmode
)
4365 mode
= GET_MODE (op2
);
4367 icode
= direct_optab_handler (movcc_optab
, mode
);
4369 if (icode
== CODE_FOR_nothing
)
4373 target
= gen_reg_rtx (mode
);
4375 for (int pass
= 0; ; pass
++)
4377 code
= unsignedp
? unsigned_condition (code
) : code
;
4378 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4380 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4381 punt and let the caller figure out how best to deal with this
4383 if (COMPARISON_P (comparison
))
4385 saved_pending_stack_adjust save
;
4386 save_pending_stack_adjust (&save
);
4387 last
= get_last_insn ();
4388 do_pending_stack_adjust ();
4389 machine_mode cmpmode
= cmode
;
4390 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4391 GET_CODE (comparison
), NULL_RTX
, unsignedp
,
4392 OPTAB_WIDEN
, &comparison
, &cmpmode
);
4395 struct expand_operand ops
[4];
4397 create_output_operand (&ops
[0], target
, mode
);
4398 create_fixed_operand (&ops
[1], comparison
);
4399 create_input_operand (&ops
[2], op2
, mode
);
4400 create_input_operand (&ops
[3], op3
, mode
);
4401 if (maybe_expand_insn (icode
, 4, ops
))
4403 if (ops
[0].value
!= target
)
4404 convert_move (target
, ops
[0].value
, false);
4408 delete_insns_since (last
);
4409 restore_pending_stack_adjust (&save
);
4415 /* If the preferred op2/op3 order is not usable, retry with other
4416 operand order, perhaps it will expand successfully. */
4419 else if ((reversed
= reversed_comparison_code_parts (orig_code
, op0
, op1
,
4425 std::swap (op2
, op3
);
4430 /* Emit a conditional negate or bitwise complement using the
4431 negcc or notcc optabs if available. Return NULL_RTX if such operations
4432 are not available. Otherwise return the RTX holding the result.
4433 TARGET is the desired destination of the result. COMP is the comparison
4434 on which to negate. If COND is true move into TARGET the negation
4435 or bitwise complement of OP1. Otherwise move OP2 into TARGET.
4436 CODE is either NEG or NOT. MODE is the machine mode in which the
4437 operation is performed. */
4440 emit_conditional_neg_or_complement (rtx target
, rtx_code code
,
4441 machine_mode mode
, rtx cond
, rtx op1
,
4444 optab op
= unknown_optab
;
4447 else if (code
== NOT
)
4452 insn_code icode
= direct_optab_handler (op
, mode
);
4454 if (icode
== CODE_FOR_nothing
)
4458 target
= gen_reg_rtx (mode
);
4460 rtx_insn
*last
= get_last_insn ();
4461 struct expand_operand ops
[4];
4463 create_output_operand (&ops
[0], target
, mode
);
4464 create_fixed_operand (&ops
[1], cond
);
4465 create_input_operand (&ops
[2], op1
, mode
);
4466 create_input_operand (&ops
[3], op2
, mode
);
4468 if (maybe_expand_insn (icode
, 4, ops
))
4470 if (ops
[0].value
!= target
)
4471 convert_move (target
, ops
[0].value
, false);
4475 delete_insns_since (last
);
4479 /* Emit a conditional addition instruction if the machine supports one for that
4480 condition and machine mode.
4482 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4483 the mode to use should they be constants. If it is VOIDmode, they cannot
4486 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
4487 should be stored there. MODE is the mode to use should they be constants.
4488 If it is VOIDmode, they cannot both be constants.
4490 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4491 is not supported. */
4494 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4495 machine_mode cmode
, rtx op2
, rtx op3
,
4496 machine_mode mode
, int unsignedp
)
4500 enum insn_code icode
;
4502 /* If one operand is constant, make it the second one. Only do this
4503 if the other operand is not constant as well. */
4505 if (swap_commutative_operands_p (op0
, op1
))
4507 std::swap (op0
, op1
);
4508 code
= swap_condition (code
);
4511 /* get_condition will prefer to generate LT and GT even if the old
4512 comparison was against zero, so undo that canonicalization here since
4513 comparisons against zero are cheaper. */
4514 if (code
== LT
&& op1
== const1_rtx
)
4515 code
= LE
, op1
= const0_rtx
;
4516 else if (code
== GT
&& op1
== constm1_rtx
)
4517 code
= GE
, op1
= const0_rtx
;
4519 if (cmode
== VOIDmode
)
4520 cmode
= GET_MODE (op0
);
4522 if (mode
== VOIDmode
)
4523 mode
= GET_MODE (op2
);
4525 icode
= optab_handler (addcc_optab
, mode
);
4527 if (icode
== CODE_FOR_nothing
)
4531 target
= gen_reg_rtx (mode
);
4533 code
= unsignedp
? unsigned_condition (code
) : code
;
4534 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4536 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4537 return NULL and let the caller figure out how best to deal with this
4539 if (!COMPARISON_P (comparison
))
4542 do_pending_stack_adjust ();
4543 last
= get_last_insn ();
4544 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4545 GET_CODE (comparison
), NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4546 &comparison
, &cmode
);
4549 struct expand_operand ops
[4];
4551 create_output_operand (&ops
[0], target
, mode
);
4552 create_fixed_operand (&ops
[1], comparison
);
4553 create_input_operand (&ops
[2], op2
, mode
);
4554 create_input_operand (&ops
[3], op3
, mode
);
4555 if (maybe_expand_insn (icode
, 4, ops
))
4557 if (ops
[0].value
!= target
)
4558 convert_move (target
, ops
[0].value
, false);
4562 delete_insns_since (last
);
4566 /* These functions attempt to generate an insn body, rather than
4567 emitting the insn, but if the gen function already emits them, we
4568 make no attempt to turn them back into naked patterns. */
4570 /* Generate and return an insn body to add Y to X. */
4573 gen_add2_insn (rtx x
, rtx y
)
4575 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (x
));
4577 gcc_assert (insn_operand_matches (icode
, 0, x
));
4578 gcc_assert (insn_operand_matches (icode
, 1, x
));
4579 gcc_assert (insn_operand_matches (icode
, 2, y
));
4581 return GEN_FCN (icode
) (x
, x
, y
);
4584 /* Generate and return an insn body to add r1 and c,
4585 storing the result in r0. */
4588 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4590 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (r0
));
4592 if (icode
== CODE_FOR_nothing
4593 || !insn_operand_matches (icode
, 0, r0
)
4594 || !insn_operand_matches (icode
, 1, r1
)
4595 || !insn_operand_matches (icode
, 2, c
))
4598 return GEN_FCN (icode
) (r0
, r1
, c
);
4602 have_add2_insn (rtx x
, rtx y
)
4604 enum insn_code icode
;
4606 gcc_assert (GET_MODE (x
) != VOIDmode
);
4608 icode
= optab_handler (add_optab
, GET_MODE (x
));
4610 if (icode
== CODE_FOR_nothing
)
4613 if (!insn_operand_matches (icode
, 0, x
)
4614 || !insn_operand_matches (icode
, 1, x
)
4615 || !insn_operand_matches (icode
, 2, y
))
4621 /* Generate and return an insn body to add Y to X. */
4624 gen_addptr3_insn (rtx x
, rtx y
, rtx z
)
4626 enum insn_code icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
4628 gcc_assert (insn_operand_matches (icode
, 0, x
));
4629 gcc_assert (insn_operand_matches (icode
, 1, y
));
4630 gcc_assert (insn_operand_matches (icode
, 2, z
));
4632 return GEN_FCN (icode
) (x
, y
, z
);
4635 /* Return true if the target implements an addptr pattern and X, Y,
4636 and Z are valid for the pattern predicates. */
4639 have_addptr3_insn (rtx x
, rtx y
, rtx z
)
4641 enum insn_code icode
;
4643 gcc_assert (GET_MODE (x
) != VOIDmode
);
4645 icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
4647 if (icode
== CODE_FOR_nothing
)
4650 if (!insn_operand_matches (icode
, 0, x
)
4651 || !insn_operand_matches (icode
, 1, y
)
4652 || !insn_operand_matches (icode
, 2, z
))
4658 /* Generate and return an insn body to subtract Y from X. */
4661 gen_sub2_insn (rtx x
, rtx y
)
4663 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (x
));
4665 gcc_assert (insn_operand_matches (icode
, 0, x
));
4666 gcc_assert (insn_operand_matches (icode
, 1, x
));
4667 gcc_assert (insn_operand_matches (icode
, 2, y
));
4669 return GEN_FCN (icode
) (x
, x
, y
);
4672 /* Generate and return an insn body to subtract r1 and c,
4673 storing the result in r0. */
4676 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4678 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (r0
));
4680 if (icode
== CODE_FOR_nothing
4681 || !insn_operand_matches (icode
, 0, r0
)
4682 || !insn_operand_matches (icode
, 1, r1
)
4683 || !insn_operand_matches (icode
, 2, c
))
4686 return GEN_FCN (icode
) (r0
, r1
, c
);
4690 have_sub2_insn (rtx x
, rtx y
)
4692 enum insn_code icode
;
4694 gcc_assert (GET_MODE (x
) != VOIDmode
);
4696 icode
= optab_handler (sub_optab
, GET_MODE (x
));
4698 if (icode
== CODE_FOR_nothing
)
4701 if (!insn_operand_matches (icode
, 0, x
)
4702 || !insn_operand_matches (icode
, 1, x
)
4703 || !insn_operand_matches (icode
, 2, y
))
4709 /* Generate the body of an insn to extend Y (with mode MFROM)
4710 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4713 gen_extend_insn (rtx x
, rtx y
, machine_mode mto
,
4714 machine_mode mfrom
, int unsignedp
)
4716 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4717 return GEN_FCN (icode
) (x
, y
);
4720 /* Generate code to convert FROM to floating point
4721 and store in TO. FROM must be fixed point and not VOIDmode.
4722 UNSIGNEDP nonzero means regard FROM as unsigned.
4723 Normally this is done by correcting the final value
4724 if it is negative. */
4727 expand_float (rtx to
, rtx from
, int unsignedp
)
4729 enum insn_code icode
;
4731 scalar_mode from_mode
, to_mode
;
4732 machine_mode fmode
, imode
;
4733 bool can_do_signed
= false;
4735 /* Crash now, because we won't be able to decide which mode to use. */
4736 gcc_assert (GET_MODE (from
) != VOIDmode
);
4738 /* Look for an insn to do the conversion. Do it in the specified
4739 modes if possible; otherwise convert either input, output or both to
4740 wider mode. If the integer mode is wider than the mode of FROM,
4741 we can do the conversion signed even if the input is unsigned. */
4743 FOR_EACH_MODE_FROM (fmode
, GET_MODE (to
))
4744 FOR_EACH_MODE_FROM (imode
, GET_MODE (from
))
4746 int doing_unsigned
= unsignedp
;
4748 if (fmode
!= GET_MODE (to
)
4749 && (significand_size (fmode
)
4750 < GET_MODE_UNIT_PRECISION (GET_MODE (from
))))
4753 icode
= can_float_p (fmode
, imode
, unsignedp
);
4754 if (icode
== CODE_FOR_nothing
&& unsignedp
)
4756 enum insn_code scode
= can_float_p (fmode
, imode
, 0);
4757 if (scode
!= CODE_FOR_nothing
)
4758 can_do_signed
= true;
4759 if (imode
!= GET_MODE (from
))
4760 icode
= scode
, doing_unsigned
= 0;
4763 if (icode
!= CODE_FOR_nothing
)
4765 if (imode
!= GET_MODE (from
))
4766 from
= convert_to_mode (imode
, from
, unsignedp
);
4768 if (fmode
!= GET_MODE (to
))
4769 target
= gen_reg_rtx (fmode
);
4771 emit_unop_insn (icode
, target
, from
,
4772 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4775 convert_move (to
, target
, 0);
4780 /* Unsigned integer, and no way to convert directly. Convert as signed,
4781 then unconditionally adjust the result. */
4784 && is_a
<scalar_mode
> (GET_MODE (to
), &to_mode
)
4785 && is_a
<scalar_mode
> (GET_MODE (from
), &from_mode
))
4787 opt_scalar_mode fmode_iter
;
4788 rtx_code_label
*label
= gen_label_rtx ();
4790 REAL_VALUE_TYPE offset
;
4792 /* Look for a usable floating mode FMODE wider than the source and at
4793 least as wide as the target. Using FMODE will avoid rounding woes
4794 with unsigned values greater than the signed maximum value. */
4796 FOR_EACH_MODE_FROM (fmode_iter
, to_mode
)
4798 scalar_mode fmode
= fmode_iter
.require ();
4799 if (GET_MODE_PRECISION (from_mode
) < GET_MODE_BITSIZE (fmode
)
4800 && can_float_p (fmode
, from_mode
, 0) != CODE_FOR_nothing
)
4804 if (!fmode_iter
.exists (&fmode
))
4806 /* There is no such mode. Pretend the target is wide enough. */
4809 /* Avoid double-rounding when TO is narrower than FROM. */
4810 if ((significand_size (fmode
) + 1)
4811 < GET_MODE_PRECISION (from_mode
))
4814 rtx_code_label
*neglabel
= gen_label_rtx ();
4816 /* Don't use TARGET if it isn't a register, is a hard register,
4817 or is the wrong mode. */
4819 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4820 || GET_MODE (target
) != fmode
)
4821 target
= gen_reg_rtx (fmode
);
4824 do_pending_stack_adjust ();
4826 /* Test whether the sign bit is set. */
4827 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4830 /* The sign bit is not set. Convert as signed. */
4831 expand_float (target
, from
, 0);
4832 emit_jump_insn (targetm
.gen_jump (label
));
4835 /* The sign bit is set.
4836 Convert to a usable (positive signed) value by shifting right
4837 one bit, while remembering if a nonzero bit was shifted
4838 out; i.e., compute (from & 1) | (from >> 1). */
4840 emit_label (neglabel
);
4841 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4842 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4843 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, 1, NULL_RTX
, 1);
4844 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4846 expand_float (target
, temp
, 0);
4848 /* Multiply by 2 to undo the shift above. */
4849 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4850 target
, 0, OPTAB_LIB_WIDEN
);
4852 emit_move_insn (target
, temp
);
4854 do_pending_stack_adjust ();
4860 /* If we are about to do some arithmetic to correct for an
4861 unsigned operand, do it in a pseudo-register. */
4863 if (to_mode
!= fmode
4864 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4865 target
= gen_reg_rtx (fmode
);
4867 /* Convert as signed integer to floating. */
4868 expand_float (target
, from
, 0);
4870 /* If FROM is negative (and therefore TO is negative),
4871 correct its value by 2**bitwidth. */
4873 do_pending_stack_adjust ();
4874 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, from_mode
,
4878 real_2expN (&offset
, GET_MODE_PRECISION (from_mode
), fmode
);
4879 temp
= expand_binop (fmode
, add_optab
, target
,
4880 const_double_from_real_value (offset
, fmode
),
4881 target
, 0, OPTAB_LIB_WIDEN
);
4883 emit_move_insn (target
, temp
);
4885 do_pending_stack_adjust ();
4890 /* No hardware instruction available; call a library routine. */
4895 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4897 if (is_narrower_int_mode (GET_MODE (from
), SImode
))
4898 from
= convert_to_mode (SImode
, from
, unsignedp
);
4900 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
4901 gcc_assert (libfunc
);
4905 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4906 GET_MODE (to
), from
, GET_MODE (from
));
4907 insns
= get_insns ();
4910 emit_libcall_block (insns
, target
, value
,
4911 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FLOAT
: FLOAT
,
4912 GET_MODE (to
), from
));
4917 /* Copy result to requested destination
4918 if we have been computing in a temp location. */
4922 if (GET_MODE (target
) == GET_MODE (to
))
4923 emit_move_insn (to
, target
);
4925 convert_move (to
, target
, 0);
4929 /* Generate code to convert FROM to fixed point and store in TO. FROM
4930 must be floating point. */
4933 expand_fix (rtx to
, rtx from
, int unsignedp
)
4935 enum insn_code icode
;
4937 machine_mode fmode
, imode
;
4938 opt_scalar_mode fmode_iter
;
4939 bool must_trunc
= false;
4941 /* We first try to find a pair of modes, one real and one integer, at
4942 least as wide as FROM and TO, respectively, in which we can open-code
4943 this conversion. If the integer mode is wider than the mode of TO,
4944 we can do the conversion either signed or unsigned. */
4946 FOR_EACH_MODE_FROM (fmode
, GET_MODE (from
))
4947 FOR_EACH_MODE_FROM (imode
, GET_MODE (to
))
4949 int doing_unsigned
= unsignedp
;
4951 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4952 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4953 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4955 if (icode
!= CODE_FOR_nothing
)
4957 rtx_insn
*last
= get_last_insn ();
4958 if (fmode
!= GET_MODE (from
))
4959 from
= convert_to_mode (fmode
, from
, 0);
4963 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4964 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
4968 if (imode
!= GET_MODE (to
))
4969 target
= gen_reg_rtx (imode
);
4971 if (maybe_emit_unop_insn (icode
, target
, from
,
4972 doing_unsigned
? UNSIGNED_FIX
: FIX
))
4975 convert_move (to
, target
, unsignedp
);
4978 delete_insns_since (last
);
4982 /* For an unsigned conversion, there is one more way to do it.
4983 If we have a signed conversion, we generate code that compares
4984 the real value to the largest representable positive number. If if
4985 is smaller, the conversion is done normally. Otherwise, subtract
4986 one plus the highest signed number, convert, and add it back.
4988 We only need to check all real modes, since we know we didn't find
4989 anything with a wider integer mode.
4991 This code used to extend FP value into mode wider than the destination.
4992 This is needed for decimal float modes which cannot accurately
4993 represent one plus the highest signed number of the same size, but
4994 not for binary modes. Consider, for instance conversion from SFmode
4997 The hot path through the code is dealing with inputs smaller than 2^63
4998 and doing just the conversion, so there is no bits to lose.
5000 In the other path we know the value is positive in the range 2^63..2^64-1
5001 inclusive. (as for other input overflow happens and result is undefined)
5002 So we know that the most important bit set in mantissa corresponds to
5003 2^63. The subtraction of 2^63 should not generate any rounding as it
5004 simply clears out that bit. The rest is trivial. */
5006 scalar_int_mode to_mode
;
5008 && is_a
<scalar_int_mode
> (GET_MODE (to
), &to_mode
)
5009 && HWI_COMPUTABLE_MODE_P (to_mode
))
5010 FOR_EACH_MODE_FROM (fmode_iter
, as_a
<scalar_mode
> (GET_MODE (from
)))
5012 scalar_mode fmode
= fmode_iter
.require ();
5013 if (CODE_FOR_nothing
!= can_fix_p (to_mode
, fmode
,
5015 && (!DECIMAL_FLOAT_MODE_P (fmode
)
5016 || (GET_MODE_BITSIZE (fmode
) > GET_MODE_PRECISION (to_mode
))))
5019 REAL_VALUE_TYPE offset
;
5021 rtx_code_label
*lab1
, *lab2
;
5024 bitsize
= GET_MODE_PRECISION (to_mode
);
5025 real_2expN (&offset
, bitsize
- 1, fmode
);
5026 limit
= const_double_from_real_value (offset
, fmode
);
5027 lab1
= gen_label_rtx ();
5028 lab2
= gen_label_rtx ();
5030 if (fmode
!= GET_MODE (from
))
5031 from
= convert_to_mode (fmode
, from
, 0);
5033 /* See if we need to do the subtraction. */
5034 do_pending_stack_adjust ();
5035 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
,
5036 GET_MODE (from
), 0, lab1
);
5038 /* If not, do the signed "fix" and branch around fixup code. */
5039 expand_fix (to
, from
, 0);
5040 emit_jump_insn (targetm
.gen_jump (lab2
));
5043 /* Otherwise, subtract 2**(N-1), convert to signed number,
5044 then add 2**(N-1). Do the addition using XOR since this
5045 will often generate better code. */
5047 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
5048 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
5049 expand_fix (to
, target
, 0);
5050 target
= expand_binop (to_mode
, xor_optab
, to
,
5052 (HOST_WIDE_INT_1
<< (bitsize
- 1),
5054 to
, 1, OPTAB_LIB_WIDEN
);
5057 emit_move_insn (to
, target
);
5061 if (optab_handler (mov_optab
, to_mode
) != CODE_FOR_nothing
)
5063 /* Make a place for a REG_NOTE and add it. */
5064 insn
= emit_move_insn (to
, to
);
5065 set_dst_reg_note (insn
, REG_EQUAL
,
5066 gen_rtx_fmt_e (UNSIGNED_FIX
, to_mode
,
5075 /* We can't do it with an insn, so use a library call. But first ensure
5076 that the mode of TO is at least as wide as SImode, since those are the
5077 only library calls we know about. */
5079 if (is_narrower_int_mode (GET_MODE (to
), SImode
))
5081 target
= gen_reg_rtx (SImode
);
5083 expand_fix (target
, from
, unsignedp
);
5091 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
5092 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5093 gcc_assert (libfunc
);
5097 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5098 GET_MODE (to
), from
, GET_MODE (from
));
5099 insns
= get_insns ();
5102 emit_libcall_block (insns
, target
, value
,
5103 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
5104 GET_MODE (to
), from
));
5109 if (GET_MODE (to
) == GET_MODE (target
))
5110 emit_move_insn (to
, target
);
5112 convert_move (to
, target
, 0);
5117 /* Promote integer arguments for a libcall if necessary.
5118 emit_library_call_value cannot do the promotion because it does not
5119 know if it should do a signed or unsigned promotion. This is because
5120 there are no tree types defined for libcalls. */
5123 prepare_libcall_arg (rtx arg
, int uintp
)
5125 scalar_int_mode mode
;
5126 machine_mode arg_mode
;
5127 if (is_a
<scalar_int_mode
> (GET_MODE (arg
), &mode
))
5129 /* If we need to promote the integer function argument we need to do
5130 it here instead of inside emit_library_call_value because in
5131 emit_library_call_value we don't know if we should do a signed or
5132 unsigned promotion. */
5135 arg_mode
= promote_function_mode (NULL_TREE
, mode
,
5136 &unsigned_p
, NULL_TREE
, 0);
5137 if (arg_mode
!= mode
)
5138 return convert_to_mode (arg_mode
, arg
, uintp
);
5143 /* Generate code to convert FROM or TO a fixed-point.
5144 If UINTP is true, either TO or FROM is an unsigned integer.
5145 If SATP is true, we need to saturate the result. */
5148 expand_fixed_convert (rtx to
, rtx from
, int uintp
, int satp
)
5150 machine_mode to_mode
= GET_MODE (to
);
5151 machine_mode from_mode
= GET_MODE (from
);
5153 enum rtx_code this_code
;
5154 enum insn_code code
;
5159 if (to_mode
== from_mode
)
5161 emit_move_insn (to
, from
);
5167 tab
= satp
? satfractuns_optab
: fractuns_optab
;
5168 this_code
= satp
? UNSIGNED_SAT_FRACT
: UNSIGNED_FRACT_CONVERT
;
5172 tab
= satp
? satfract_optab
: fract_optab
;
5173 this_code
= satp
? SAT_FRACT
: FRACT_CONVERT
;
5175 code
= convert_optab_handler (tab
, to_mode
, from_mode
);
5176 if (code
!= CODE_FOR_nothing
)
5178 emit_unop_insn (code
, to
, from
, this_code
);
5182 libfunc
= convert_optab_libfunc (tab
, to_mode
, from_mode
);
5183 gcc_assert (libfunc
);
5185 from
= prepare_libcall_arg (from
, uintp
);
5186 from_mode
= GET_MODE (from
);
5189 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, to_mode
,
5191 insns
= get_insns ();
5194 emit_libcall_block (insns
, to
, value
,
5195 gen_rtx_fmt_e (optab_to_code (tab
), to_mode
, from
));
5198 /* Generate code to convert FROM to fixed point and store in TO. FROM
5199 must be floating point, TO must be signed. Use the conversion optab
5200 TAB to do the conversion. */
5203 expand_sfix_optab (rtx to
, rtx from
, convert_optab tab
)
5205 enum insn_code icode
;
5207 machine_mode fmode
, imode
;
5209 /* We first try to find a pair of modes, one real and one integer, at
5210 least as wide as FROM and TO, respectively, in which we can open-code
5211 this conversion. If the integer mode is wider than the mode of TO,
5212 we can do the conversion either signed or unsigned. */
5214 FOR_EACH_MODE_FROM (fmode
, GET_MODE (from
))
5215 FOR_EACH_MODE_FROM (imode
, GET_MODE (to
))
5217 icode
= convert_optab_handler (tab
, imode
, fmode
);
5218 if (icode
!= CODE_FOR_nothing
)
5220 rtx_insn
*last
= get_last_insn ();
5221 if (fmode
!= GET_MODE (from
))
5222 from
= convert_to_mode (fmode
, from
, 0);
5224 if (imode
!= GET_MODE (to
))
5225 target
= gen_reg_rtx (imode
);
5227 if (!maybe_emit_unop_insn (icode
, target
, from
, UNKNOWN
))
5229 delete_insns_since (last
);
5233 convert_move (to
, target
, 0);
5241 /* Report whether we have an instruction to perform the operation
5242 specified by CODE on operands of mode MODE. */
5244 have_insn_for (enum rtx_code code
, machine_mode mode
)
5246 return (code_to_optab (code
)
5247 && (optab_handler (code_to_optab (code
), mode
)
5248 != CODE_FOR_nothing
));
5251 /* Print information about the current contents of the optabs on
5255 debug_optab_libfuncs (void)
5259 /* Dump the arithmetic optabs. */
5260 for (i
= FIRST_NORM_OPTAB
; i
<= LAST_NORMLIB_OPTAB
; ++i
)
5261 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5263 rtx l
= optab_libfunc ((optab
) i
, (machine_mode
) j
);
5266 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
5267 fprintf (stderr
, "%s\t%s:\t%s\n",
5268 GET_RTX_NAME (optab_to_code ((optab
) i
)),
5274 /* Dump the conversion optabs. */
5275 for (i
= FIRST_CONV_OPTAB
; i
<= LAST_CONVLIB_OPTAB
; ++i
)
5276 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5277 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
5279 rtx l
= convert_optab_libfunc ((optab
) i
, (machine_mode
) j
,
5283 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
5284 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
5285 GET_RTX_NAME (optab_to_code ((optab
) i
)),
5293 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5294 CODE. Return 0 on failure. */
5297 gen_cond_trap (enum rtx_code code
, rtx op1
, rtx op2
, rtx tcode
)
5299 machine_mode mode
= GET_MODE (op1
);
5300 enum insn_code icode
;
5304 if (mode
== VOIDmode
)
5307 icode
= optab_handler (ctrap_optab
, mode
);
5308 if (icode
== CODE_FOR_nothing
)
5311 /* Some targets only accept a zero trap code. */
5312 if (!insn_operand_matches (icode
, 3, tcode
))
5315 do_pending_stack_adjust ();
5317 prepare_cmp_insn (op1
, op2
, code
, NULL_RTX
, false, OPTAB_DIRECT
,
5322 insn
= GEN_FCN (icode
) (trap_rtx
, XEXP (trap_rtx
, 0), XEXP (trap_rtx
, 1),
5325 /* If that failed, then give up. */
5333 insn
= get_insns ();
5338 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5339 or unsigned operation code. */
5342 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5354 code
= unsignedp
? LTU
: LT
;
5357 code
= unsignedp
? LEU
: LE
;
5360 code
= unsignedp
? GTU
: GT
;
5363 code
= unsignedp
? GEU
: GE
;
5366 case UNORDERED_EXPR
:
5405 /* Return a comparison rtx of mode CMP_MODE for COND. Use UNSIGNEDP to
5406 select signed or unsigned operators. OPNO holds the index of the
5407 first comparison operand for insn ICODE. Do not generate the
5408 compare instruction itself. */
5411 vector_compare_rtx (machine_mode cmp_mode
, enum tree_code tcode
,
5412 tree t_op0
, tree t_op1
, bool unsignedp
,
5413 enum insn_code icode
, unsigned int opno
)
5415 struct expand_operand ops
[2];
5416 rtx rtx_op0
, rtx_op1
;
5417 machine_mode m0
, m1
;
5418 enum rtx_code rcode
= get_rtx_code (tcode
, unsignedp
);
5420 gcc_assert (TREE_CODE_CLASS (tcode
) == tcc_comparison
);
5422 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
5423 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
5424 cases, use the original mode. */
5425 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)),
5427 m0
= GET_MODE (rtx_op0
);
5429 m0
= TYPE_MODE (TREE_TYPE (t_op0
));
5431 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)),
5433 m1
= GET_MODE (rtx_op1
);
5435 m1
= TYPE_MODE (TREE_TYPE (t_op1
));
5437 create_input_operand (&ops
[0], rtx_op0
, m0
);
5438 create_input_operand (&ops
[1], rtx_op1
, m1
);
5439 if (!maybe_legitimize_operands (icode
, opno
, 2, ops
))
5441 return gen_rtx_fmt_ee (rcode
, cmp_mode
, ops
[0].value
, ops
[1].value
);
5444 /* Check if vec_perm mask SEL is a constant equivalent to a shift of
5445 the first vec_perm operand, assuming the second operand is a constant
5446 vector of zeros. Return the shift distance in bits if so, or NULL_RTX
5447 if the vec_perm is not a shift. MODE is the mode of the value being
5450 shift_amt_for_vec_perm_mask (machine_mode mode
, const vec_perm_indices
&sel
)
5452 unsigned int bitsize
= GET_MODE_UNIT_BITSIZE (mode
);
5453 poly_int64 first
= sel
[0];
5454 if (maybe_ge (sel
[0], GET_MODE_NUNITS (mode
)))
5457 if (!sel
.series_p (0, 1, first
, 1))
5460 if (!GET_MODE_NUNITS (mode
).is_constant (&nelt
))
5462 for (unsigned int i
= 1; i
< nelt
; i
++)
5464 poly_int64 expected
= i
+ first
;
5465 /* Indices into the second vector are all equivalent. */
5466 if (maybe_lt (sel
[i
], nelt
)
5467 ? maybe_ne (sel
[i
], expected
)
5468 : maybe_lt (expected
, nelt
))
5473 return gen_int_shift_amount (mode
, first
* bitsize
);
5476 /* A subroutine of expand_vec_perm_var for expanding one vec_perm insn. */
5479 expand_vec_perm_1 (enum insn_code icode
, rtx target
,
5480 rtx v0
, rtx v1
, rtx sel
)
5482 machine_mode tmode
= GET_MODE (target
);
5483 machine_mode smode
= GET_MODE (sel
);
5484 struct expand_operand ops
[4];
5486 gcc_assert (GET_MODE_CLASS (smode
) == MODE_VECTOR_INT
5487 || mode_for_int_vector (tmode
).require () == smode
);
5488 create_output_operand (&ops
[0], target
, tmode
);
5489 create_input_operand (&ops
[3], sel
, smode
);
5491 /* Make an effort to preserve v0 == v1. The target expander is able to
5492 rely on this to determine if we're permuting a single input operand. */
5493 if (rtx_equal_p (v0
, v1
))
5495 if (!insn_operand_matches (icode
, 1, v0
))
5496 v0
= force_reg (tmode
, v0
);
5497 gcc_checking_assert (insn_operand_matches (icode
, 1, v0
));
5498 gcc_checking_assert (insn_operand_matches (icode
, 2, v0
));
5500 create_fixed_operand (&ops
[1], v0
);
5501 create_fixed_operand (&ops
[2], v0
);
5505 create_input_operand (&ops
[1], v0
, tmode
);
5506 create_input_operand (&ops
[2], v1
, tmode
);
5509 if (maybe_expand_insn (icode
, 4, ops
))
5510 return ops
[0].value
;
5514 /* Implement a permutation of vectors v0 and v1 using the permutation
5515 vector in SEL and return the result. Use TARGET to hold the result
5516 if nonnull and convenient.
5518 MODE is the mode of the vectors being permuted (V0 and V1). SEL_MODE
5519 is the TYPE_MODE associated with SEL, or BLKmode if SEL isn't known
5520 to have a particular mode. */
5523 expand_vec_perm_const (machine_mode mode
, rtx v0
, rtx v1
,
5524 const vec_perm_builder
&sel
, machine_mode sel_mode
,
5527 if (!target
|| !register_operand (target
, mode
))
5528 target
= gen_reg_rtx (mode
);
5530 /* Set QIMODE to a different vector mode with byte elements.
5531 If no such mode, or if MODE already has byte elements, use VOIDmode. */
5532 machine_mode qimode
;
5533 if (!qimode_for_vec_perm (mode
).exists (&qimode
))
5536 rtx_insn
*last
= get_last_insn ();
5538 bool single_arg_p
= rtx_equal_p (v0
, v1
);
5539 /* Always specify two input vectors here and leave the target to handle
5540 cases in which the inputs are equal. Not all backends can cope with
5541 the single-input representation when testing for a double-input
5542 target instruction. */
5543 vec_perm_indices
indices (sel
, 2, GET_MODE_NUNITS (mode
));
5545 /* See if this can be handled with a vec_shr. We only do this if the
5546 second vector is all zeroes. */
5547 insn_code shift_code
= optab_handler (vec_shr_optab
, mode
);
5548 insn_code shift_code_qi
= ((qimode
!= VOIDmode
&& qimode
!= mode
)
5549 ? optab_handler (vec_shr_optab
, qimode
)
5550 : CODE_FOR_nothing
);
5552 if (v1
== CONST0_RTX (GET_MODE (v1
))
5553 && (shift_code
!= CODE_FOR_nothing
5554 || shift_code_qi
!= CODE_FOR_nothing
))
5556 rtx shift_amt
= shift_amt_for_vec_perm_mask (mode
, indices
);
5559 struct expand_operand ops
[3];
5560 if (shift_code
!= CODE_FOR_nothing
)
5562 create_output_operand (&ops
[0], target
, mode
);
5563 create_input_operand (&ops
[1], v0
, mode
);
5564 create_convert_operand_from_type (&ops
[2], shift_amt
, sizetype
);
5565 if (maybe_expand_insn (shift_code
, 3, ops
))
5566 return ops
[0].value
;
5568 if (shift_code_qi
!= CODE_FOR_nothing
)
5570 rtx tmp
= gen_reg_rtx (qimode
);
5571 create_output_operand (&ops
[0], tmp
, qimode
);
5572 create_input_operand (&ops
[1], gen_lowpart (qimode
, v0
), qimode
);
5573 create_convert_operand_from_type (&ops
[2], shift_amt
, sizetype
);
5574 if (maybe_expand_insn (shift_code_qi
, 3, ops
))
5575 return gen_lowpart (mode
, ops
[0].value
);
5580 if (targetm
.vectorize
.vec_perm_const
!= NULL
)
5582 v0
= force_reg (mode
, v0
);
5586 v1
= force_reg (mode
, v1
);
5588 if (targetm
.vectorize
.vec_perm_const (mode
, target
, v0
, v1
, indices
))
5592 /* Fall back to a constant byte-based permutation. */
5593 vec_perm_indices qimode_indices
;
5594 rtx target_qi
= NULL_RTX
, v0_qi
= NULL_RTX
, v1_qi
= NULL_RTX
;
5595 if (qimode
!= VOIDmode
)
5597 qimode_indices
.new_expanded_vector (indices
, GET_MODE_UNIT_SIZE (mode
));
5598 target_qi
= gen_reg_rtx (qimode
);
5599 v0_qi
= gen_lowpart (qimode
, v0
);
5600 v1_qi
= gen_lowpart (qimode
, v1
);
5601 if (targetm
.vectorize
.vec_perm_const
!= NULL
5602 && targetm
.vectorize
.vec_perm_const (qimode
, target_qi
, v0_qi
,
5603 v1_qi
, qimode_indices
))
5604 return gen_lowpart (mode
, target_qi
);
5607 /* Otherwise expand as a fully variable permuation. */
5609 /* The optabs are only defined for selectors with the same width
5610 as the values being permuted. */
5611 machine_mode required_sel_mode
;
5612 if (!mode_for_int_vector (mode
).exists (&required_sel_mode
)
5613 || !VECTOR_MODE_P (required_sel_mode
))
5615 delete_insns_since (last
);
5619 /* We know that it is semantically valid to treat SEL as having SEL_MODE.
5620 If that isn't the mode we want then we need to prove that using
5621 REQUIRED_SEL_MODE is OK. */
5622 if (sel_mode
!= required_sel_mode
)
5624 if (!selector_fits_mode_p (required_sel_mode
, indices
))
5626 delete_insns_since (last
);
5629 sel_mode
= required_sel_mode
;
5632 insn_code icode
= direct_optab_handler (vec_perm_optab
, mode
);
5633 if (icode
!= CODE_FOR_nothing
)
5635 rtx sel_rtx
= vec_perm_indices_to_rtx (sel_mode
, indices
);
5636 rtx tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel_rtx
);
5641 if (qimode
!= VOIDmode
5642 && selector_fits_mode_p (qimode
, qimode_indices
))
5644 icode
= direct_optab_handler (vec_perm_optab
, qimode
);
5645 if (icode
!= CODE_FOR_nothing
)
5647 rtx sel_qi
= vec_perm_indices_to_rtx (qimode
, qimode_indices
);
5648 rtx tmp
= expand_vec_perm_1 (icode
, target_qi
, v0_qi
, v1_qi
, sel_qi
);
5650 return gen_lowpart (mode
, tmp
);
5654 delete_insns_since (last
);
5658 /* Implement a permutation of vectors v0 and v1 using the permutation
5659 vector in SEL and return the result. Use TARGET to hold the result
5660 if nonnull and convenient.
5662 MODE is the mode of the vectors being permuted (V0 and V1).
5663 SEL must have the integer equivalent of MODE and is known to be
5664 unsuitable for permutes with a constant permutation vector. */
5667 expand_vec_perm_var (machine_mode mode
, rtx v0
, rtx v1
, rtx sel
, rtx target
)
5669 enum insn_code icode
;
5673 u
= GET_MODE_UNIT_SIZE (mode
);
5675 if (!target
|| GET_MODE (target
) != mode
)
5676 target
= gen_reg_rtx (mode
);
5678 icode
= direct_optab_handler (vec_perm_optab
, mode
);
5679 if (icode
!= CODE_FOR_nothing
)
5681 tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel
);
5686 /* As a special case to aid several targets, lower the element-based
5687 permutation to a byte-based permutation and try again. */
5688 machine_mode qimode
;
5689 if (!qimode_for_vec_perm (mode
).exists (&qimode
)
5690 || maybe_gt (GET_MODE_NUNITS (qimode
), GET_MODE_MASK (QImode
) + 1))
5692 icode
= direct_optab_handler (vec_perm_optab
, qimode
);
5693 if (icode
== CODE_FOR_nothing
)
5696 /* Multiply each element by its byte size. */
5697 machine_mode selmode
= GET_MODE (sel
);
5699 sel
= expand_simple_binop (selmode
, PLUS
, sel
, sel
,
5700 NULL
, 0, OPTAB_DIRECT
);
5702 sel
= expand_simple_binop (selmode
, ASHIFT
, sel
,
5703 gen_int_shift_amount (selmode
, exact_log2 (u
)),
5704 NULL
, 0, OPTAB_DIRECT
);
5705 gcc_assert (sel
!= NULL
);
5707 /* Broadcast the low byte each element into each of its bytes.
5708 The encoding has U interleaved stepped patterns, one for each
5709 byte of an element. */
5710 vec_perm_builder
const_sel (GET_MODE_SIZE (mode
), u
, 3);
5711 unsigned int low_byte_in_u
= BYTES_BIG_ENDIAN
? u
- 1 : 0;
5712 for (i
= 0; i
< 3; ++i
)
5713 for (unsigned int j
= 0; j
< u
; ++j
)
5714 const_sel
.quick_push (i
* u
+ low_byte_in_u
);
5715 sel
= gen_lowpart (qimode
, sel
);
5716 sel
= expand_vec_perm_const (qimode
, sel
, sel
, const_sel
, qimode
, NULL
);
5717 gcc_assert (sel
!= NULL
);
5719 /* Add the byte offset to each byte element. */
5720 /* Note that the definition of the indicies here is memory ordering,
5721 so there should be no difference between big and little endian. */
5722 rtx_vector_builder
byte_indices (qimode
, u
, 1);
5723 for (i
= 0; i
< u
; ++i
)
5724 byte_indices
.quick_push (GEN_INT (i
));
5725 tmp
= byte_indices
.build ();
5726 sel_qi
= expand_simple_binop (qimode
, PLUS
, sel
, tmp
,
5727 sel
, 0, OPTAB_DIRECT
);
5728 gcc_assert (sel_qi
!= NULL
);
5730 tmp
= mode
!= qimode
? gen_reg_rtx (qimode
) : target
;
5731 tmp
= expand_vec_perm_1 (icode
, tmp
, gen_lowpart (qimode
, v0
),
5732 gen_lowpart (qimode
, v1
), sel_qi
);
5734 tmp
= gen_lowpart (mode
, tmp
);
5738 /* Generate insns for a VEC_COND_EXPR with mask, given its TYPE and its
5742 expand_vec_cond_mask_expr (tree vec_cond_type
, tree op0
, tree op1
, tree op2
,
5745 struct expand_operand ops
[4];
5746 machine_mode mode
= TYPE_MODE (vec_cond_type
);
5747 machine_mode mask_mode
= TYPE_MODE (TREE_TYPE (op0
));
5748 enum insn_code icode
= get_vcond_mask_icode (mode
, mask_mode
);
5749 rtx mask
, rtx_op1
, rtx_op2
;
5751 if (icode
== CODE_FOR_nothing
)
5754 mask
= expand_normal (op0
);
5755 rtx_op1
= expand_normal (op1
);
5756 rtx_op2
= expand_normal (op2
);
5758 mask
= force_reg (mask_mode
, mask
);
5759 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
5761 create_output_operand (&ops
[0], target
, mode
);
5762 create_input_operand (&ops
[1], rtx_op1
, mode
);
5763 create_input_operand (&ops
[2], rtx_op2
, mode
);
5764 create_input_operand (&ops
[3], mask
, mask_mode
);
5765 expand_insn (icode
, 4, ops
);
5767 return ops
[0].value
;
5770 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
5774 expand_vec_cond_expr (tree vec_cond_type
, tree op0
, tree op1
, tree op2
,
5777 struct expand_operand ops
[6];
5778 enum insn_code icode
;
5779 rtx comparison
, rtx_op1
, rtx_op2
;
5780 machine_mode mode
= TYPE_MODE (vec_cond_type
);
5781 machine_mode cmp_op_mode
;
5784 enum tree_code tcode
;
5786 if (COMPARISON_CLASS_P (op0
))
5788 op0a
= TREE_OPERAND (op0
, 0);
5789 op0b
= TREE_OPERAND (op0
, 1);
5790 tcode
= TREE_CODE (op0
);
5794 gcc_assert (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (op0
)));
5795 if (get_vcond_mask_icode (mode
, TYPE_MODE (TREE_TYPE (op0
)))
5796 != CODE_FOR_nothing
)
5797 return expand_vec_cond_mask_expr (vec_cond_type
, op0
, op1
,
5802 gcc_assert (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (op0
)))
5803 == MODE_VECTOR_INT
);
5805 op0b
= build_zero_cst (TREE_TYPE (op0
));
5809 cmp_op_mode
= TYPE_MODE (TREE_TYPE (op0a
));
5810 unsignedp
= TYPE_UNSIGNED (TREE_TYPE (op0a
));
5813 gcc_assert (known_eq (GET_MODE_SIZE (mode
), GET_MODE_SIZE (cmp_op_mode
))
5814 && known_eq (GET_MODE_NUNITS (mode
),
5815 GET_MODE_NUNITS (cmp_op_mode
)));
5817 icode
= get_vcond_icode (mode
, cmp_op_mode
, unsignedp
);
5818 if (icode
== CODE_FOR_nothing
)
5820 if (tcode
== EQ_EXPR
|| tcode
== NE_EXPR
)
5821 icode
= get_vcond_eq_icode (mode
, cmp_op_mode
);
5822 if (icode
== CODE_FOR_nothing
)
5826 comparison
= vector_compare_rtx (VOIDmode
, tcode
, op0a
, op0b
, unsignedp
,
5828 rtx_op1
= expand_normal (op1
);
5829 rtx_op2
= expand_normal (op2
);
5831 create_output_operand (&ops
[0], target
, mode
);
5832 create_input_operand (&ops
[1], rtx_op1
, mode
);
5833 create_input_operand (&ops
[2], rtx_op2
, mode
);
5834 create_fixed_operand (&ops
[3], comparison
);
5835 create_fixed_operand (&ops
[4], XEXP (comparison
, 0));
5836 create_fixed_operand (&ops
[5], XEXP (comparison
, 1));
5837 expand_insn (icode
, 6, ops
);
5838 return ops
[0].value
;
5841 /* Generate VEC_SERIES_EXPR <OP0, OP1>, returning a value of mode VMODE.
5842 Use TARGET for the result if nonnull and convenient. */
5845 expand_vec_series_expr (machine_mode vmode
, rtx op0
, rtx op1
, rtx target
)
5847 struct expand_operand ops
[3];
5848 enum insn_code icode
;
5849 machine_mode emode
= GET_MODE_INNER (vmode
);
5851 icode
= direct_optab_handler (vec_series_optab
, vmode
);
5852 gcc_assert (icode
!= CODE_FOR_nothing
);
5854 create_output_operand (&ops
[0], target
, vmode
);
5855 create_input_operand (&ops
[1], op0
, emode
);
5856 create_input_operand (&ops
[2], op1
, emode
);
5858 expand_insn (icode
, 3, ops
);
5859 return ops
[0].value
;
5862 /* Generate insns for a vector comparison into a mask. */
5865 expand_vec_cmp_expr (tree type
, tree exp
, rtx target
)
5867 struct expand_operand ops
[4];
5868 enum insn_code icode
;
5870 machine_mode mask_mode
= TYPE_MODE (type
);
5874 enum tree_code tcode
;
5876 op0a
= TREE_OPERAND (exp
, 0);
5877 op0b
= TREE_OPERAND (exp
, 1);
5878 tcode
= TREE_CODE (exp
);
5880 unsignedp
= TYPE_UNSIGNED (TREE_TYPE (op0a
));
5881 vmode
= TYPE_MODE (TREE_TYPE (op0a
));
5883 icode
= get_vec_cmp_icode (vmode
, mask_mode
, unsignedp
);
5884 if (icode
== CODE_FOR_nothing
)
5886 if (tcode
== EQ_EXPR
|| tcode
== NE_EXPR
)
5887 icode
= get_vec_cmp_eq_icode (vmode
, mask_mode
);
5888 if (icode
== CODE_FOR_nothing
)
5892 comparison
= vector_compare_rtx (mask_mode
, tcode
, op0a
, op0b
,
5893 unsignedp
, icode
, 2);
5894 create_output_operand (&ops
[0], target
, mask_mode
);
5895 create_fixed_operand (&ops
[1], comparison
);
5896 create_fixed_operand (&ops
[2], XEXP (comparison
, 0));
5897 create_fixed_operand (&ops
[3], XEXP (comparison
, 1));
5898 expand_insn (icode
, 4, ops
);
5899 return ops
[0].value
;
5902 /* Expand a highpart multiply. */
5905 expand_mult_highpart (machine_mode mode
, rtx op0
, rtx op1
,
5906 rtx target
, bool uns_p
)
5908 struct expand_operand eops
[3];
5909 enum insn_code icode
;
5915 method
= can_mult_highpart_p (mode
, uns_p
);
5921 tab1
= uns_p
? umul_highpart_optab
: smul_highpart_optab
;
5922 return expand_binop (mode
, tab1
, op0
, op1
, target
, uns_p
,
5925 tab1
= uns_p
? vec_widen_umult_even_optab
: vec_widen_smult_even_optab
;
5926 tab2
= uns_p
? vec_widen_umult_odd_optab
: vec_widen_smult_odd_optab
;
5929 tab1
= uns_p
? vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
5930 tab2
= uns_p
? vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
5931 if (BYTES_BIG_ENDIAN
)
5932 std::swap (tab1
, tab2
);
5938 icode
= optab_handler (tab1
, mode
);
5939 wmode
= insn_data
[icode
].operand
[0].mode
;
5940 gcc_checking_assert (known_eq (2 * GET_MODE_NUNITS (wmode
),
5941 GET_MODE_NUNITS (mode
)));
5942 gcc_checking_assert (known_eq (GET_MODE_SIZE (wmode
), GET_MODE_SIZE (mode
)));
5944 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
5945 create_input_operand (&eops
[1], op0
, mode
);
5946 create_input_operand (&eops
[2], op1
, mode
);
5947 expand_insn (icode
, 3, eops
);
5948 m1
= gen_lowpart (mode
, eops
[0].value
);
5950 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
5951 create_input_operand (&eops
[1], op0
, mode
);
5952 create_input_operand (&eops
[2], op1
, mode
);
5953 expand_insn (optab_handler (tab2
, mode
), 3, eops
);
5954 m2
= gen_lowpart (mode
, eops
[0].value
);
5956 vec_perm_builder sel
;
5959 /* The encoding has 2 interleaved stepped patterns. */
5960 sel
.new_vector (GET_MODE_NUNITS (mode
), 2, 3);
5961 for (i
= 0; i
< 6; ++i
)
5962 sel
.quick_push (!BYTES_BIG_ENDIAN
+ (i
& ~1)
5963 + ((i
& 1) ? GET_MODE_NUNITS (mode
) : 0));
5967 /* The encoding has a single interleaved stepped pattern. */
5968 sel
.new_vector (GET_MODE_NUNITS (mode
), 1, 3);
5969 for (i
= 0; i
< 3; ++i
)
5970 sel
.quick_push (2 * i
+ (BYTES_BIG_ENDIAN
? 0 : 1));
5973 return expand_vec_perm_const (mode
, m1
, m2
, sel
, BLKmode
, target
);
5976 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
5980 find_cc_set (rtx x
, const_rtx pat
, void *data
)
5982 if (REG_P (x
) && GET_MODE_CLASS (GET_MODE (x
)) == MODE_CC
5983 && GET_CODE (pat
) == SET
)
5985 rtx
*p_cc_reg
= (rtx
*) data
;
5986 gcc_assert (!*p_cc_reg
);
5991 /* This is a helper function for the other atomic operations. This function
5992 emits a loop that contains SEQ that iterates until a compare-and-swap
5993 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5994 a set of instructions that takes a value from OLD_REG as an input and
5995 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5996 set to the current contents of MEM. After SEQ, a compare-and-swap will
5997 attempt to update MEM with NEW_REG. The function returns true when the
5998 loop was generated successfully. */
6001 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
6003 machine_mode mode
= GET_MODE (mem
);
6004 rtx_code_label
*label
;
6005 rtx cmp_reg
, success
, oldval
;
6007 /* The loop we want to generate looks like
6013 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
6017 Note that we only do the plain load from memory once. Subsequent
6018 iterations use the value loaded by the compare-and-swap pattern. */
6020 label
= gen_label_rtx ();
6021 cmp_reg
= gen_reg_rtx (mode
);
6023 emit_move_insn (cmp_reg
, mem
);
6025 emit_move_insn (old_reg
, cmp_reg
);
6031 if (!expand_atomic_compare_and_swap (&success
, &oldval
, mem
, old_reg
,
6032 new_reg
, false, MEMMODEL_SYNC_SEQ_CST
,
6036 if (oldval
!= cmp_reg
)
6037 emit_move_insn (cmp_reg
, oldval
);
6039 /* Mark this jump predicted not taken. */
6040 emit_cmp_and_jump_insns (success
, const0_rtx
, EQ
, const0_rtx
,
6041 GET_MODE (success
), 1, label
,
6042 profile_probability::guessed_never ());
6047 /* This function tries to emit an atomic_exchange intruction. VAL is written
6048 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
6049 using TARGET if possible. */
6052 maybe_emit_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
6054 machine_mode mode
= GET_MODE (mem
);
6055 enum insn_code icode
;
6057 /* If the target supports the exchange directly, great. */
6058 icode
= direct_optab_handler (atomic_exchange_optab
, mode
);
6059 if (icode
!= CODE_FOR_nothing
)
6061 struct expand_operand ops
[4];
6063 create_output_operand (&ops
[0], target
, mode
);
6064 create_fixed_operand (&ops
[1], mem
);
6065 create_input_operand (&ops
[2], val
, mode
);
6066 create_integer_operand (&ops
[3], model
);
6067 if (maybe_expand_insn (icode
, 4, ops
))
6068 return ops
[0].value
;
6074 /* This function tries to implement an atomic exchange operation using
6075 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
6076 The previous contents of *MEM are returned, using TARGET if possible.
6077 Since this instructionn is an acquire barrier only, stronger memory
6078 models may require additional barriers to be emitted. */
6081 maybe_emit_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
,
6082 enum memmodel model
)
6084 machine_mode mode
= GET_MODE (mem
);
6085 enum insn_code icode
;
6086 rtx_insn
*last_insn
= get_last_insn ();
6088 icode
= optab_handler (sync_lock_test_and_set_optab
, mode
);
6090 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
6091 exists, and the memory model is stronger than acquire, add a release
6092 barrier before the instruction. */
6094 if (is_mm_seq_cst (model
) || is_mm_release (model
) || is_mm_acq_rel (model
))
6095 expand_mem_thread_fence (model
);
6097 if (icode
!= CODE_FOR_nothing
)
6099 struct expand_operand ops
[3];
6100 create_output_operand (&ops
[0], target
, mode
);
6101 create_fixed_operand (&ops
[1], mem
);
6102 create_input_operand (&ops
[2], val
, mode
);
6103 if (maybe_expand_insn (icode
, 3, ops
))
6104 return ops
[0].value
;
6107 /* If an external test-and-set libcall is provided, use that instead of
6108 any external compare-and-swap that we might get from the compare-and-
6109 swap-loop expansion later. */
6110 if (!can_compare_and_swap_p (mode
, false))
6112 rtx libfunc
= optab_libfunc (sync_lock_test_and_set_optab
, mode
);
6113 if (libfunc
!= NULL
)
6117 addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
6118 return emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
6119 mode
, addr
, ptr_mode
,
6124 /* If the test_and_set can't be emitted, eliminate any barrier that might
6125 have been emitted. */
6126 delete_insns_since (last_insn
);
6130 /* This function tries to implement an atomic exchange operation using a
6131 compare_and_swap loop. VAL is written to *MEM. The previous contents of
6132 *MEM are returned, using TARGET if possible. No memory model is required
6133 since a compare_and_swap loop is seq-cst. */
6136 maybe_emit_compare_and_swap_exchange_loop (rtx target
, rtx mem
, rtx val
)
6138 machine_mode mode
= GET_MODE (mem
);
6140 if (can_compare_and_swap_p (mode
, true))
6142 if (!target
|| !register_operand (target
, mode
))
6143 target
= gen_reg_rtx (mode
);
6144 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
6151 /* This function tries to implement an atomic test-and-set operation
6152 using the atomic_test_and_set instruction pattern. A boolean value
6153 is returned from the operation, using TARGET if possible. */
6156 maybe_emit_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
6158 machine_mode pat_bool_mode
;
6159 struct expand_operand ops
[3];
6161 if (!targetm
.have_atomic_test_and_set ())
6164 /* While we always get QImode from __atomic_test_and_set, we get
6165 other memory modes from __sync_lock_test_and_set. Note that we
6166 use no endian adjustment here. This matches the 4.6 behavior
6167 in the Sparc backend. */
6168 enum insn_code icode
= targetm
.code_for_atomic_test_and_set
;
6169 gcc_checking_assert (insn_data
[icode
].operand
[1].mode
== QImode
);
6170 if (GET_MODE (mem
) != QImode
)
6171 mem
= adjust_address_nv (mem
, QImode
, 0);
6173 pat_bool_mode
= insn_data
[icode
].operand
[0].mode
;
6174 create_output_operand (&ops
[0], target
, pat_bool_mode
);
6175 create_fixed_operand (&ops
[1], mem
);
6176 create_integer_operand (&ops
[2], model
);
6178 if (maybe_expand_insn (icode
, 3, ops
))
6179 return ops
[0].value
;
6183 /* This function expands the legacy _sync_lock test_and_set operation which is
6184 generally an atomic exchange. Some limited targets only allow the
6185 constant 1 to be stored. This is an ACQUIRE operation.
6187 TARGET is an optional place to stick the return value.
6188 MEM is where VAL is stored. */
6191 expand_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
)
6195 /* Try an atomic_exchange first. */
6196 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, MEMMODEL_SYNC_ACQUIRE
);
6200 ret
= maybe_emit_sync_lock_test_and_set (target
, mem
, val
,
6201 MEMMODEL_SYNC_ACQUIRE
);
6205 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
6209 /* If there are no other options, try atomic_test_and_set if the value
6210 being stored is 1. */
6211 if (val
== const1_rtx
)
6212 ret
= maybe_emit_atomic_test_and_set (target
, mem
, MEMMODEL_SYNC_ACQUIRE
);
6217 /* This function expands the atomic test_and_set operation:
6218 atomically store a boolean TRUE into MEM and return the previous value.
6220 MEMMODEL is the memory model variant to use.
6221 TARGET is an optional place to stick the return value. */
6224 expand_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
6226 machine_mode mode
= GET_MODE (mem
);
6227 rtx ret
, trueval
, subtarget
;
6229 ret
= maybe_emit_atomic_test_and_set (target
, mem
, model
);
6233 /* Be binary compatible with non-default settings of trueval, and different
6234 cpu revisions. E.g. one revision may have atomic-test-and-set, but
6235 another only has atomic-exchange. */
6236 if (targetm
.atomic_test_and_set_trueval
== 1)
6238 trueval
= const1_rtx
;
6239 subtarget
= target
? target
: gen_reg_rtx (mode
);
6243 trueval
= gen_int_mode (targetm
.atomic_test_and_set_trueval
, mode
);
6244 subtarget
= gen_reg_rtx (mode
);
6247 /* Try the atomic-exchange optab... */
6248 ret
= maybe_emit_atomic_exchange (subtarget
, mem
, trueval
, model
);
6250 /* ... then an atomic-compare-and-swap loop ... */
6252 ret
= maybe_emit_compare_and_swap_exchange_loop (subtarget
, mem
, trueval
);
6254 /* ... before trying the vaguely defined legacy lock_test_and_set. */
6256 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, trueval
, model
);
6258 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
6259 things with the value 1. Thus we try again without trueval. */
6260 if (!ret
&& targetm
.atomic_test_and_set_trueval
!= 1)
6261 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, const1_rtx
, model
);
6263 /* Failing all else, assume a single threaded environment and simply
6264 perform the operation. */
6267 /* If the result is ignored skip the move to target. */
6268 if (subtarget
!= const0_rtx
)
6269 emit_move_insn (subtarget
, mem
);
6271 emit_move_insn (mem
, trueval
);
6275 /* Recall that have to return a boolean value; rectify if trueval
6276 is not exactly one. */
6277 if (targetm
.atomic_test_and_set_trueval
!= 1)
6278 ret
= emit_store_flag_force (target
, NE
, ret
, const0_rtx
, mode
, 0, 1);
6283 /* This function expands the atomic exchange operation:
6284 atomically store VAL in MEM and return the previous value in MEM.
6286 MEMMODEL is the memory model variant to use.
6287 TARGET is an optional place to stick the return value. */
6290 expand_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
6292 machine_mode mode
= GET_MODE (mem
);
6295 /* If loads are not atomic for the required size and we are not called to
6296 provide a __sync builtin, do not do anything so that we stay consistent
6297 with atomic loads of the same size. */
6298 if (!can_atomic_load_p (mode
) && !is_mm_sync (model
))
6301 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6303 /* Next try a compare-and-swap loop for the exchange. */
6305 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
6310 /* This function expands the atomic compare exchange operation:
6312 *PTARGET_BOOL is an optional place to store the boolean success/failure.
6313 *PTARGET_OVAL is an optional place to store the old value from memory.
6314 Both target parameters may be NULL or const0_rtx to indicate that we do
6315 not care about that return value. Both target parameters are updated on
6316 success to the actual location of the corresponding result.
6318 MEMMODEL is the memory model variant to use.
6320 The return value of the function is true for success. */
6323 expand_atomic_compare_and_swap (rtx
*ptarget_bool
, rtx
*ptarget_oval
,
6324 rtx mem
, rtx expected
, rtx desired
,
6325 bool is_weak
, enum memmodel succ_model
,
6326 enum memmodel fail_model
)
6328 machine_mode mode
= GET_MODE (mem
);
6329 struct expand_operand ops
[8];
6330 enum insn_code icode
;
6331 rtx target_oval
, target_bool
= NULL_RTX
;
6334 /* If loads are not atomic for the required size and we are not called to
6335 provide a __sync builtin, do not do anything so that we stay consistent
6336 with atomic loads of the same size. */
6337 if (!can_atomic_load_p (mode
) && !is_mm_sync (succ_model
))
6340 /* Load expected into a register for the compare and swap. */
6341 if (MEM_P (expected
))
6342 expected
= copy_to_reg (expected
);
6344 /* Make sure we always have some place to put the return oldval.
6345 Further, make sure that place is distinct from the input expected,
6346 just in case we need that path down below. */
6347 if (ptarget_oval
&& *ptarget_oval
== const0_rtx
)
6348 ptarget_oval
= NULL
;
6350 if (ptarget_oval
== NULL
6351 || (target_oval
= *ptarget_oval
) == NULL
6352 || reg_overlap_mentioned_p (expected
, target_oval
))
6353 target_oval
= gen_reg_rtx (mode
);
6355 icode
= direct_optab_handler (atomic_compare_and_swap_optab
, mode
);
6356 if (icode
!= CODE_FOR_nothing
)
6358 machine_mode bool_mode
= insn_data
[icode
].operand
[0].mode
;
6360 if (ptarget_bool
&& *ptarget_bool
== const0_rtx
)
6361 ptarget_bool
= NULL
;
6363 /* Make sure we always have a place for the bool operand. */
6364 if (ptarget_bool
== NULL
6365 || (target_bool
= *ptarget_bool
) == NULL
6366 || GET_MODE (target_bool
) != bool_mode
)
6367 target_bool
= gen_reg_rtx (bool_mode
);
6369 /* Emit the compare_and_swap. */
6370 create_output_operand (&ops
[0], target_bool
, bool_mode
);
6371 create_output_operand (&ops
[1], target_oval
, mode
);
6372 create_fixed_operand (&ops
[2], mem
);
6373 create_input_operand (&ops
[3], expected
, mode
);
6374 create_input_operand (&ops
[4], desired
, mode
);
6375 create_integer_operand (&ops
[5], is_weak
);
6376 create_integer_operand (&ops
[6], succ_model
);
6377 create_integer_operand (&ops
[7], fail_model
);
6378 if (maybe_expand_insn (icode
, 8, ops
))
6380 /* Return success/failure. */
6381 target_bool
= ops
[0].value
;
6382 target_oval
= ops
[1].value
;
6387 /* Otherwise fall back to the original __sync_val_compare_and_swap
6388 which is always seq-cst. */
6389 icode
= optab_handler (sync_compare_and_swap_optab
, mode
);
6390 if (icode
!= CODE_FOR_nothing
)
6394 create_output_operand (&ops
[0], target_oval
, mode
);
6395 create_fixed_operand (&ops
[1], mem
);
6396 create_input_operand (&ops
[2], expected
, mode
);
6397 create_input_operand (&ops
[3], desired
, mode
);
6398 if (!maybe_expand_insn (icode
, 4, ops
))
6401 target_oval
= ops
[0].value
;
6403 /* If the caller isn't interested in the boolean return value,
6404 skip the computation of it. */
6405 if (ptarget_bool
== NULL
)
6408 /* Otherwise, work out if the compare-and-swap succeeded. */
6410 if (have_insn_for (COMPARE
, CCmode
))
6411 note_stores (PATTERN (get_last_insn ()), find_cc_set
, &cc_reg
);
6414 target_bool
= emit_store_flag_force (target_bool
, EQ
, cc_reg
,
6415 const0_rtx
, VOIDmode
, 0, 1);
6418 goto success_bool_from_val
;
6421 /* Also check for library support for __sync_val_compare_and_swap. */
6422 libfunc
= optab_libfunc (sync_compare_and_swap_optab
, mode
);
6423 if (libfunc
!= NULL
)
6425 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
6426 rtx target
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
6427 mode
, addr
, ptr_mode
,
6428 expected
, mode
, desired
, mode
);
6429 emit_move_insn (target_oval
, target
);
6431 /* Compute the boolean return value only if requested. */
6433 goto success_bool_from_val
;
6441 success_bool_from_val
:
6442 target_bool
= emit_store_flag_force (target_bool
, EQ
, target_oval
,
6443 expected
, VOIDmode
, 1, 1);
6445 /* Make sure that the oval output winds up where the caller asked. */
6447 *ptarget_oval
= target_oval
;
6449 *ptarget_bool
= target_bool
;
6453 /* Generate asm volatile("" : : : "memory") as the memory blockage. */
6456 expand_asm_memory_blockage (void)
6460 asm_op
= gen_rtx_ASM_OPERANDS (VOIDmode
, "", "", 0,
6461 rtvec_alloc (0), rtvec_alloc (0),
6462 rtvec_alloc (0), UNKNOWN_LOCATION
);
6463 MEM_VOLATILE_P (asm_op
) = 1;
6465 clob
= gen_rtx_SCRATCH (VOIDmode
);
6466 clob
= gen_rtx_MEM (BLKmode
, clob
);
6467 clob
= gen_rtx_CLOBBER (VOIDmode
, clob
);
6469 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, asm_op
, clob
)));
6472 /* Do not propagate memory accesses across this point. */
6475 expand_memory_blockage (void)
6477 if (targetm
.have_memory_blockage ())
6478 emit_insn (targetm
.gen_memory_blockage ());
6480 expand_asm_memory_blockage ();
6483 /* This routine will either emit the mem_thread_fence pattern or issue a
6484 sync_synchronize to generate a fence for memory model MEMMODEL. */
6487 expand_mem_thread_fence (enum memmodel model
)
6489 if (is_mm_relaxed (model
))
6491 if (targetm
.have_mem_thread_fence ())
6493 emit_insn (targetm
.gen_mem_thread_fence (GEN_INT (model
)));
6494 expand_memory_blockage ();
6496 else if (targetm
.have_memory_barrier ())
6497 emit_insn (targetm
.gen_memory_barrier ());
6498 else if (synchronize_libfunc
!= NULL_RTX
)
6499 emit_library_call (synchronize_libfunc
, LCT_NORMAL
, VOIDmode
);
6501 expand_memory_blockage ();
6504 /* Emit a signal fence with given memory model. */
6507 expand_mem_signal_fence (enum memmodel model
)
6509 /* No machine barrier is required to implement a signal fence, but
6510 a compiler memory barrier must be issued, except for relaxed MM. */
6511 if (!is_mm_relaxed (model
))
6512 expand_memory_blockage ();
6515 /* This function expands the atomic load operation:
6516 return the atomically loaded value in MEM.
6518 MEMMODEL is the memory model variant to use.
6519 TARGET is an option place to stick the return value. */
6522 expand_atomic_load (rtx target
, rtx mem
, enum memmodel model
)
6524 machine_mode mode
= GET_MODE (mem
);
6525 enum insn_code icode
;
6527 /* If the target supports the load directly, great. */
6528 icode
= direct_optab_handler (atomic_load_optab
, mode
);
6529 if (icode
!= CODE_FOR_nothing
)
6531 struct expand_operand ops
[3];
6532 rtx_insn
*last
= get_last_insn ();
6533 if (is_mm_seq_cst (model
))
6534 expand_memory_blockage ();
6536 create_output_operand (&ops
[0], target
, mode
);
6537 create_fixed_operand (&ops
[1], mem
);
6538 create_integer_operand (&ops
[2], model
);
6539 if (maybe_expand_insn (icode
, 3, ops
))
6541 if (!is_mm_relaxed (model
))
6542 expand_memory_blockage ();
6543 return ops
[0].value
;
6545 delete_insns_since (last
);
6548 /* If the size of the object is greater than word size on this target,
6549 then we assume that a load will not be atomic. We could try to
6550 emulate a load with a compare-and-swap operation, but the store that
6551 doing this could result in would be incorrect if this is a volatile
6552 atomic load or targetting read-only-mapped memory. */
6553 if (maybe_gt (GET_MODE_PRECISION (mode
), BITS_PER_WORD
))
6554 /* If there is no atomic load, leave the library call. */
6557 /* Otherwise assume loads are atomic, and emit the proper barriers. */
6558 if (!target
|| target
== const0_rtx
)
6559 target
= gen_reg_rtx (mode
);
6561 /* For SEQ_CST, emit a barrier before the load. */
6562 if (is_mm_seq_cst (model
))
6563 expand_mem_thread_fence (model
);
6565 emit_move_insn (target
, mem
);
6567 /* Emit the appropriate barrier after the load. */
6568 expand_mem_thread_fence (model
);
6573 /* This function expands the atomic store operation:
6574 Atomically store VAL in MEM.
6575 MEMMODEL is the memory model variant to use.
6576 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
6577 function returns const0_rtx if a pattern was emitted. */
6580 expand_atomic_store (rtx mem
, rtx val
, enum memmodel model
, bool use_release
)
6582 machine_mode mode
= GET_MODE (mem
);
6583 enum insn_code icode
;
6584 struct expand_operand ops
[3];
6586 /* If the target supports the store directly, great. */
6587 icode
= direct_optab_handler (atomic_store_optab
, mode
);
6588 if (icode
!= CODE_FOR_nothing
)
6590 rtx_insn
*last
= get_last_insn ();
6591 if (!is_mm_relaxed (model
))
6592 expand_memory_blockage ();
6593 create_fixed_operand (&ops
[0], mem
);
6594 create_input_operand (&ops
[1], val
, mode
);
6595 create_integer_operand (&ops
[2], model
);
6596 if (maybe_expand_insn (icode
, 3, ops
))
6598 if (is_mm_seq_cst (model
))
6599 expand_memory_blockage ();
6602 delete_insns_since (last
);
6605 /* If using __sync_lock_release is a viable alternative, try it.
6606 Note that this will not be set to true if we are expanding a generic
6607 __atomic_store_n. */
6610 icode
= direct_optab_handler (sync_lock_release_optab
, mode
);
6611 if (icode
!= CODE_FOR_nothing
)
6613 create_fixed_operand (&ops
[0], mem
);
6614 create_input_operand (&ops
[1], const0_rtx
, mode
);
6615 if (maybe_expand_insn (icode
, 2, ops
))
6617 /* lock_release is only a release barrier. */
6618 if (is_mm_seq_cst (model
))
6619 expand_mem_thread_fence (model
);
6625 /* If the size of the object is greater than word size on this target,
6626 a default store will not be atomic. */
6627 if (maybe_gt (GET_MODE_PRECISION (mode
), BITS_PER_WORD
))
6629 /* If loads are atomic or we are called to provide a __sync builtin,
6630 we can try a atomic_exchange and throw away the result. Otherwise,
6631 don't do anything so that we do not create an inconsistency between
6632 loads and stores. */
6633 if (can_atomic_load_p (mode
) || is_mm_sync (model
))
6635 rtx target
= maybe_emit_atomic_exchange (NULL_RTX
, mem
, val
, model
);
6637 target
= maybe_emit_compare_and_swap_exchange_loop (NULL_RTX
, mem
,
6645 /* Otherwise assume stores are atomic, and emit the proper barriers. */
6646 expand_mem_thread_fence (model
);
6648 emit_move_insn (mem
, val
);
6650 /* For SEQ_CST, also emit a barrier after the store. */
6651 if (is_mm_seq_cst (model
))
6652 expand_mem_thread_fence (model
);
6658 /* Structure containing the pointers and values required to process the
6659 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
6661 struct atomic_op_functions
6663 direct_optab mem_fetch_before
;
6664 direct_optab mem_fetch_after
;
6665 direct_optab mem_no_result
;
6668 direct_optab no_result
;
6669 enum rtx_code reverse_code
;
6673 /* Fill in structure pointed to by OP with the various optab entries for an
6674 operation of type CODE. */
6677 get_atomic_op_for_code (struct atomic_op_functions
*op
, enum rtx_code code
)
6679 gcc_assert (op
!= NULL
);
6681 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
6682 in the source code during compilation, and the optab entries are not
6683 computable until runtime. Fill in the values at runtime. */
6687 op
->mem_fetch_before
= atomic_fetch_add_optab
;
6688 op
->mem_fetch_after
= atomic_add_fetch_optab
;
6689 op
->mem_no_result
= atomic_add_optab
;
6690 op
->fetch_before
= sync_old_add_optab
;
6691 op
->fetch_after
= sync_new_add_optab
;
6692 op
->no_result
= sync_add_optab
;
6693 op
->reverse_code
= MINUS
;
6696 op
->mem_fetch_before
= atomic_fetch_sub_optab
;
6697 op
->mem_fetch_after
= atomic_sub_fetch_optab
;
6698 op
->mem_no_result
= atomic_sub_optab
;
6699 op
->fetch_before
= sync_old_sub_optab
;
6700 op
->fetch_after
= sync_new_sub_optab
;
6701 op
->no_result
= sync_sub_optab
;
6702 op
->reverse_code
= PLUS
;
6705 op
->mem_fetch_before
= atomic_fetch_xor_optab
;
6706 op
->mem_fetch_after
= atomic_xor_fetch_optab
;
6707 op
->mem_no_result
= atomic_xor_optab
;
6708 op
->fetch_before
= sync_old_xor_optab
;
6709 op
->fetch_after
= sync_new_xor_optab
;
6710 op
->no_result
= sync_xor_optab
;
6711 op
->reverse_code
= XOR
;
6714 op
->mem_fetch_before
= atomic_fetch_and_optab
;
6715 op
->mem_fetch_after
= atomic_and_fetch_optab
;
6716 op
->mem_no_result
= atomic_and_optab
;
6717 op
->fetch_before
= sync_old_and_optab
;
6718 op
->fetch_after
= sync_new_and_optab
;
6719 op
->no_result
= sync_and_optab
;
6720 op
->reverse_code
= UNKNOWN
;
6723 op
->mem_fetch_before
= atomic_fetch_or_optab
;
6724 op
->mem_fetch_after
= atomic_or_fetch_optab
;
6725 op
->mem_no_result
= atomic_or_optab
;
6726 op
->fetch_before
= sync_old_ior_optab
;
6727 op
->fetch_after
= sync_new_ior_optab
;
6728 op
->no_result
= sync_ior_optab
;
6729 op
->reverse_code
= UNKNOWN
;
6732 op
->mem_fetch_before
= atomic_fetch_nand_optab
;
6733 op
->mem_fetch_after
= atomic_nand_fetch_optab
;
6734 op
->mem_no_result
= atomic_nand_optab
;
6735 op
->fetch_before
= sync_old_nand_optab
;
6736 op
->fetch_after
= sync_new_nand_optab
;
6737 op
->no_result
= sync_nand_optab
;
6738 op
->reverse_code
= UNKNOWN
;
6745 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
6746 using memory order MODEL. If AFTER is true the operation needs to return
6747 the value of *MEM after the operation, otherwise the previous value.
6748 TARGET is an optional place to place the result. The result is unused if
6750 Return the result if there is a better sequence, otherwise NULL_RTX. */
6753 maybe_optimize_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
6754 enum memmodel model
, bool after
)
6756 /* If the value is prefetched, or not used, it may be possible to replace
6757 the sequence with a native exchange operation. */
6758 if (!after
|| target
== const0_rtx
)
6760 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
6761 if (code
== AND
&& val
== const0_rtx
)
6763 if (target
== const0_rtx
)
6764 target
= gen_reg_rtx (GET_MODE (mem
));
6765 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6768 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
6769 if (code
== IOR
&& val
== constm1_rtx
)
6771 if (target
== const0_rtx
)
6772 target
= gen_reg_rtx (GET_MODE (mem
));
6773 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6780 /* Try to emit an instruction for a specific operation varaition.
6781 OPTAB contains the OP functions.
6782 TARGET is an optional place to return the result. const0_rtx means unused.
6783 MEM is the memory location to operate on.
6784 VAL is the value to use in the operation.
6785 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
6786 MODEL is the memory model, if used.
6787 AFTER is true if the returned result is the value after the operation. */
6790 maybe_emit_op (const struct atomic_op_functions
*optab
, rtx target
, rtx mem
,
6791 rtx val
, bool use_memmodel
, enum memmodel model
, bool after
)
6793 machine_mode mode
= GET_MODE (mem
);
6794 struct expand_operand ops
[4];
6795 enum insn_code icode
;
6799 /* Check to see if there is a result returned. */
6800 if (target
== const0_rtx
)
6804 icode
= direct_optab_handler (optab
->mem_no_result
, mode
);
6805 create_integer_operand (&ops
[2], model
);
6810 icode
= direct_optab_handler (optab
->no_result
, mode
);
6814 /* Otherwise, we need to generate a result. */
6819 icode
= direct_optab_handler (after
? optab
->mem_fetch_after
6820 : optab
->mem_fetch_before
, mode
);
6821 create_integer_operand (&ops
[3], model
);
6826 icode
= optab_handler (after
? optab
->fetch_after
6827 : optab
->fetch_before
, mode
);
6830 create_output_operand (&ops
[op_counter
++], target
, mode
);
6832 if (icode
== CODE_FOR_nothing
)
6835 create_fixed_operand (&ops
[op_counter
++], mem
);
6836 /* VAL may have been promoted to a wider mode. Shrink it if so. */
6837 create_convert_operand_to (&ops
[op_counter
++], val
, mode
, true);
6839 if (maybe_expand_insn (icode
, num_ops
, ops
))
6840 return (target
== const0_rtx
? const0_rtx
: ops
[0].value
);
6846 /* This function expands an atomic fetch_OP or OP_fetch operation:
6847 TARGET is an option place to stick the return value. const0_rtx indicates
6848 the result is unused.
6849 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6850 CODE is the operation being performed (OP)
6851 MEMMODEL is the memory model variant to use.
6852 AFTER is true to return the result of the operation (OP_fetch).
6853 AFTER is false to return the value before the operation (fetch_OP).
6855 This function will *only* generate instructions if there is a direct
6856 optab. No compare and swap loops or libcalls will be generated. */
6859 expand_atomic_fetch_op_no_fallback (rtx target
, rtx mem
, rtx val
,
6860 enum rtx_code code
, enum memmodel model
,
6863 machine_mode mode
= GET_MODE (mem
);
6864 struct atomic_op_functions optab
;
6866 bool unused_result
= (target
== const0_rtx
);
6868 get_atomic_op_for_code (&optab
, code
);
6870 /* Check to see if there are any better instructions. */
6871 result
= maybe_optimize_fetch_op (target
, mem
, val
, code
, model
, after
);
6875 /* Check for the case where the result isn't used and try those patterns. */
6878 /* Try the memory model variant first. */
6879 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, true);
6883 /* Next try the old style withuot a memory model. */
6884 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, true);
6888 /* There is no no-result pattern, so try patterns with a result. */
6892 /* Try the __atomic version. */
6893 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, after
);
6897 /* Try the older __sync version. */
6898 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, after
);
6902 /* If the fetch value can be calculated from the other variation of fetch,
6903 try that operation. */
6904 if (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
)
6906 /* Try the __atomic version, then the older __sync version. */
6907 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, !after
);
6909 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, !after
);
6913 /* If the result isn't used, no need to do compensation code. */
6917 /* Issue compensation code. Fetch_after == fetch_before OP val.
6918 Fetch_before == after REVERSE_OP val. */
6920 code
= optab
.reverse_code
;
6923 result
= expand_simple_binop (mode
, AND
, result
, val
, NULL_RTX
,
6924 true, OPTAB_LIB_WIDEN
);
6925 result
= expand_simple_unop (mode
, NOT
, result
, target
, true);
6928 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
6929 true, OPTAB_LIB_WIDEN
);
6934 /* No direct opcode can be generated. */
6940 /* This function expands an atomic fetch_OP or OP_fetch operation:
6941 TARGET is an option place to stick the return value. const0_rtx indicates
6942 the result is unused.
6943 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6944 CODE is the operation being performed (OP)
6945 MEMMODEL is the memory model variant to use.
6946 AFTER is true to return the result of the operation (OP_fetch).
6947 AFTER is false to return the value before the operation (fetch_OP). */
6949 expand_atomic_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
6950 enum memmodel model
, bool after
)
6952 machine_mode mode
= GET_MODE (mem
);
6954 bool unused_result
= (target
== const0_rtx
);
6956 /* If loads are not atomic for the required size and we are not called to
6957 provide a __sync builtin, do not do anything so that we stay consistent
6958 with atomic loads of the same size. */
6959 if (!can_atomic_load_p (mode
) && !is_mm_sync (model
))
6962 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, val
, code
, model
,
6968 /* Add/sub can be implemented by doing the reverse operation with -(val). */
6969 if (code
== PLUS
|| code
== MINUS
)
6972 enum rtx_code reverse
= (code
== PLUS
? MINUS
: PLUS
);
6975 tmp
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, true);
6976 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, tmp
, reverse
,
6980 /* PLUS worked so emit the insns and return. */
6987 /* PLUS did not work, so throw away the negation code and continue. */
6991 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
6992 if (!can_compare_and_swap_p (mode
, false))
6996 enum rtx_code orig_code
= code
;
6997 struct atomic_op_functions optab
;
6999 get_atomic_op_for_code (&optab
, code
);
7000 libfunc
= optab_libfunc (after
? optab
.fetch_after
7001 : optab
.fetch_before
, mode
);
7003 && (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
))
7007 code
= optab
.reverse_code
;
7008 libfunc
= optab_libfunc (after
? optab
.fetch_before
7009 : optab
.fetch_after
, mode
);
7011 if (libfunc
!= NULL
)
7013 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
7014 result
= emit_library_call_value (libfunc
, NULL
, LCT_NORMAL
, mode
,
7015 addr
, ptr_mode
, val
, mode
);
7017 if (!unused_result
&& fixup
)
7018 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
7019 true, OPTAB_LIB_WIDEN
);
7023 /* We need the original code for any further attempts. */
7027 /* If nothing else has succeeded, default to a compare and swap loop. */
7028 if (can_compare_and_swap_p (mode
, true))
7031 rtx t0
= gen_reg_rtx (mode
), t1
;
7035 /* If the result is used, get a register for it. */
7038 if (!target
|| !register_operand (target
, mode
))
7039 target
= gen_reg_rtx (mode
);
7040 /* If fetch_before, copy the value now. */
7042 emit_move_insn (target
, t0
);
7045 target
= const0_rtx
;
7050 t1
= expand_simple_binop (mode
, AND
, t1
, val
, NULL_RTX
,
7051 true, OPTAB_LIB_WIDEN
);
7052 t1
= expand_simple_unop (mode
, code
, t1
, NULL_RTX
, true);
7055 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
, true,
7058 /* For after, copy the value now. */
7059 if (!unused_result
&& after
)
7060 emit_move_insn (target
, t1
);
7061 insn
= get_insns ();
7064 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
7071 /* Return true if OPERAND is suitable for operand number OPNO of
7072 instruction ICODE. */
7075 insn_operand_matches (enum insn_code icode
, unsigned int opno
, rtx operand
)
7077 return (!insn_data
[(int) icode
].operand
[opno
].predicate
7078 || (insn_data
[(int) icode
].operand
[opno
].predicate
7079 (operand
, insn_data
[(int) icode
].operand
[opno
].mode
)));
7082 /* TARGET is a target of a multiword operation that we are going to
7083 implement as a series of word-mode operations. Return true if
7084 TARGET is suitable for this purpose. */
7087 valid_multiword_target_p (rtx target
)
7092 mode
= GET_MODE (target
);
7093 if (!GET_MODE_SIZE (mode
).is_constant (&size
))
7095 for (i
= 0; i
< size
; i
+= UNITS_PER_WORD
)
7096 if (!validate_subreg (word_mode
, mode
, target
, i
))
7101 /* Make OP describe an input operand that has value INTVAL and that has
7102 no inherent mode. This function should only be used for operands that
7103 are always expand-time constants. The backend may request that INTVAL
7104 be copied into a different kind of rtx, but it must specify the mode
7105 of that rtx if so. */
7108 create_integer_operand (struct expand_operand
*op
, poly_int64 intval
)
7110 create_expand_operand (op
, EXPAND_INTEGER
,
7111 gen_int_mode (intval
, MAX_MODE_INT
),
7112 VOIDmode
, false, intval
);
7115 /* Like maybe_legitimize_operand, but do not change the code of the
7116 current rtx value. */
7119 maybe_legitimize_operand_same_code (enum insn_code icode
, unsigned int opno
,
7120 struct expand_operand
*op
)
7122 /* See if the operand matches in its current form. */
7123 if (insn_operand_matches (icode
, opno
, op
->value
))
7126 /* If the operand is a memory whose address has no side effects,
7127 try forcing the address into a non-virtual pseudo register.
7128 The check for side effects is important because copy_to_mode_reg
7129 cannot handle things like auto-modified addresses. */
7130 if (insn_data
[(int) icode
].operand
[opno
].allows_mem
&& MEM_P (op
->value
))
7135 addr
= XEXP (mem
, 0);
7136 if (!(REG_P (addr
) && REGNO (addr
) > LAST_VIRTUAL_REGISTER
)
7137 && !side_effects_p (addr
))
7142 last
= get_last_insn ();
7143 mode
= get_address_mode (mem
);
7144 mem
= replace_equiv_address (mem
, copy_to_mode_reg (mode
, addr
));
7145 if (insn_operand_matches (icode
, opno
, mem
))
7150 delete_insns_since (last
);
7157 /* Try to make OP match operand OPNO of instruction ICODE. Return true
7158 on success, storing the new operand value back in OP. */
7161 maybe_legitimize_operand (enum insn_code icode
, unsigned int opno
,
7162 struct expand_operand
*op
)
7164 machine_mode mode
, imode
;
7165 bool old_volatile_ok
, result
;
7171 old_volatile_ok
= volatile_ok
;
7173 result
= maybe_legitimize_operand_same_code (icode
, opno
, op
);
7174 volatile_ok
= old_volatile_ok
;
7178 gcc_assert (mode
!= VOIDmode
);
7180 && op
->value
!= const0_rtx
7181 && GET_MODE (op
->value
) == mode
7182 && maybe_legitimize_operand_same_code (icode
, opno
, op
))
7185 op
->value
= gen_reg_rtx (mode
);
7191 gcc_assert (mode
!= VOIDmode
);
7192 gcc_assert (GET_MODE (op
->value
) == VOIDmode
7193 || GET_MODE (op
->value
) == mode
);
7194 if (maybe_legitimize_operand_same_code (icode
, opno
, op
))
7197 op
->value
= copy_to_mode_reg (mode
, op
->value
);
7200 case EXPAND_CONVERT_TO
:
7201 gcc_assert (mode
!= VOIDmode
);
7202 op
->value
= convert_to_mode (mode
, op
->value
, op
->unsigned_p
);
7205 case EXPAND_CONVERT_FROM
:
7206 if (GET_MODE (op
->value
) != VOIDmode
)
7207 mode
= GET_MODE (op
->value
);
7209 /* The caller must tell us what mode this value has. */
7210 gcc_assert (mode
!= VOIDmode
);
7212 imode
= insn_data
[(int) icode
].operand
[opno
].mode
;
7213 if (imode
!= VOIDmode
&& imode
!= mode
)
7215 op
->value
= convert_modes (imode
, mode
, op
->value
, op
->unsigned_p
);
7220 case EXPAND_ADDRESS
:
7221 op
->value
= convert_memory_address (as_a
<scalar_int_mode
> (mode
),
7225 case EXPAND_INTEGER
:
7226 mode
= insn_data
[(int) icode
].operand
[opno
].mode
;
7227 if (mode
!= VOIDmode
7228 && known_eq (trunc_int_for_mode (op
->int_value
, mode
),
7231 op
->value
= gen_int_mode (op
->int_value
, mode
);
7236 return insn_operand_matches (icode
, opno
, op
->value
);
7239 /* Make OP describe an input operand that should have the same value
7240 as VALUE, after any mode conversion that the target might request.
7241 TYPE is the type of VALUE. */
7244 create_convert_operand_from_type (struct expand_operand
*op
,
7245 rtx value
, tree type
)
7247 create_convert_operand_from (op
, value
, TYPE_MODE (type
),
7248 TYPE_UNSIGNED (type
));
7251 /* Return true if the requirements on operands OP1 and OP2 of instruction
7252 ICODE are similar enough for the result of legitimizing OP1 to be
7253 reusable for OP2. OPNO1 and OPNO2 are the operand numbers associated
7254 with OP1 and OP2 respectively. */
7257 can_reuse_operands_p (enum insn_code icode
,
7258 unsigned int opno1
, unsigned int opno2
,
7259 const struct expand_operand
*op1
,
7260 const struct expand_operand
*op2
)
7262 /* Check requirements that are common to all types. */
7263 if (op1
->type
!= op2
->type
7264 || op1
->mode
!= op2
->mode
7265 || (insn_data
[(int) icode
].operand
[opno1
].mode
7266 != insn_data
[(int) icode
].operand
[opno2
].mode
))
7269 /* Check the requirements for specific types. */
7273 /* Outputs must remain distinct. */
7278 case EXPAND_ADDRESS
:
7279 case EXPAND_INTEGER
:
7282 case EXPAND_CONVERT_TO
:
7283 case EXPAND_CONVERT_FROM
:
7284 return op1
->unsigned_p
== op2
->unsigned_p
;
7289 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
7290 of instruction ICODE. Return true on success, leaving the new operand
7291 values in the OPS themselves. Emit no code on failure. */
7294 maybe_legitimize_operands (enum insn_code icode
, unsigned int opno
,
7295 unsigned int nops
, struct expand_operand
*ops
)
7297 rtx_insn
*last
= get_last_insn ();
7298 rtx
*orig_values
= XALLOCAVEC (rtx
, nops
);
7299 for (unsigned int i
= 0; i
< nops
; i
++)
7301 orig_values
[i
] = ops
[i
].value
;
7303 /* First try reusing the result of an earlier legitimization.
7304 This avoids duplicate rtl and ensures that tied operands
7307 This search is linear, but NOPS is bounded at compile time
7308 to a small number (current a single digit). */
7311 if (can_reuse_operands_p (icode
, opno
+ j
, opno
+ i
, &ops
[j
], &ops
[i
])
7312 && rtx_equal_p (orig_values
[j
], orig_values
[i
])
7314 && insn_operand_matches (icode
, opno
+ i
, ops
[j
].value
))
7316 ops
[i
].value
= copy_rtx (ops
[j
].value
);
7320 /* Otherwise try legitimizing the operand on its own. */
7321 if (j
== i
&& !maybe_legitimize_operand (icode
, opno
+ i
, &ops
[i
]))
7323 delete_insns_since (last
);
7330 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
7331 as its operands. Return the instruction pattern on success,
7332 and emit any necessary set-up code. Return null and emit no
7336 maybe_gen_insn (enum insn_code icode
, unsigned int nops
,
7337 struct expand_operand
*ops
)
7339 gcc_assert (nops
== (unsigned int) insn_data
[(int) icode
].n_generator_args
);
7340 if (!maybe_legitimize_operands (icode
, 0, nops
, ops
))
7346 return GEN_FCN (icode
) (ops
[0].value
);
7348 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
);
7350 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
);
7352 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7355 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7356 ops
[3].value
, ops
[4].value
);
7358 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7359 ops
[3].value
, ops
[4].value
, ops
[5].value
);
7361 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7362 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7365 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7366 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7367 ops
[6].value
, ops
[7].value
);
7369 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7370 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7371 ops
[6].value
, ops
[7].value
, ops
[8].value
);
7376 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7377 as its operands. Return true on success and emit no code on failure. */
7380 maybe_expand_insn (enum insn_code icode
, unsigned int nops
,
7381 struct expand_operand
*ops
)
7383 rtx_insn
*pat
= maybe_gen_insn (icode
, nops
, ops
);
7392 /* Like maybe_expand_insn, but for jumps. */
7395 maybe_expand_jump_insn (enum insn_code icode
, unsigned int nops
,
7396 struct expand_operand
*ops
)
7398 rtx_insn
*pat
= maybe_gen_insn (icode
, nops
, ops
);
7401 emit_jump_insn (pat
);
7407 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7411 expand_insn (enum insn_code icode
, unsigned int nops
,
7412 struct expand_operand
*ops
)
7414 if (!maybe_expand_insn (icode
, nops
, ops
))
7418 /* Like expand_insn, but for jumps. */
7421 expand_jump_insn (enum insn_code icode
, unsigned int nops
,
7422 struct expand_operand
*ops
)
7424 if (!maybe_expand_jump_insn (icode
, nops
, ops
))